├── .circleci └── config.yml ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── custom.md │ └── feature_request.md ├── OWNERS └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── .osdk-scorecard.yaml ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.adoc ├── build ├── Dockerfile └── bin │ ├── entrypoint │ └── user_setup ├── cmd └── manager │ └── main.go ├── deploy ├── crds │ ├── postgresql.dev4devs.com_backups_crd.yaml │ ├── postgresql.dev4devs.com_databases_crd.yaml │ ├── postgresql.dev4devs.com_v1alpha1_backup_cr.yaml │ └── postgresql.dev4devs.com_v1alpha1_database_cr.yaml ├── olm-catalog │ ├── olm-test │ │ ├── postgresql-operator.operator-group.yaml │ │ ├── postgresql-operator.operator-source.yaml │ │ └── postgresql-operator.operator-subscription.yaml │ └── postgresql-operator │ │ ├── 0.1.1 │ │ ├── postgresql-operator.v0.1.1.clusterserviceversion.yaml │ │ ├── postgresql.dev4devs.com_backups_crd.yaml │ │ └── postgresql.dev4devs.com_databases_crd.yaml │ │ ├── 0.1.2 │ │ ├── postgresql-operator.v0.1.2.clusterserviceversion.yaml │ │ ├── postgresql.dev4devs.com_backups_crd.yaml │ │ └── postgresql.dev4devs.com_databases_crd.yaml │ │ ├── manifests │ │ ├── postgresql-operator.clusterserviceversion.yaml │ │ ├── postgresql.dev4devs.com_backups_crd.yaml │ │ └── postgresql.dev4devs.com_databases_crd.yaml │ │ └── postgresql-operator.package.yaml ├── operator.yaml ├── role.yaml ├── role_binding.yaml └── service_account.yaml ├── go.mod ├── go.sum ├── pkg ├── apis │ ├── addtoscheme_postgresql_v1alpha1.go │ ├── apis.go │ └── postgresql │ │ ├── group.go │ │ └── v1alpha1 │ │ ├── backup_types.go │ │ ├── database_types.go │ │ ├── doc.go │ │ ├── register.go │ │ ├── zz_generated.deepcopy.go │ │ └── zz_generated.openapi.go ├── config │ ├── backup.go │ └── database.go ├── controller │ ├── add_backup.go │ ├── add_database.go │ ├── backup │ │ ├── controller.go │ │ ├── controller_test.go │ │ ├── create_resources.go │ │ ├── fakeclient_test.go │ │ ├── helpers.go │ │ ├── mocks_test.go │ │ ├── status.go │ │ └── status_test.go │ ├── controller.go │ └── database │ │ ├── controller.go │ │ ├── controller_test.go │ │ ├── create_resources.go │ │ ├── fakeclient_test.go │ │ ├── manage_resources.go │ │ ├── mocks_test.go │ │ ├── status.go │ │ └── status_test.go ├── resource │ ├── cronjobs.go │ ├── deployments.go │ ├── pvs.go │ ├── secrets.go │ └── services.go ├── service │ ├── crs_fetches.go │ ├── generic_fetches.go │ ├── postgresql_fetches.go │ └── watches.go └── utils │ ├── backup_mandatory_specs.go │ ├── constants.go │ ├── database_envvars.go │ ├── database_mandatory_specs.go │ └── utils.go ├── scripts └── export_local_envvars.sh ├── test └── e2e │ ├── main_test.go │ └── oper_test.go ├── tools.go └── version └── version.go /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | # Golang CircleCI 2.0 configuration file 2 | # 3 | # Check https://circleci.com/docs/2.0/language-go/ for more details 4 | version: 2 5 | jobs: 6 | build: 7 | working_directory: /go/src/github.com/dev4devs-com/postgresql-operator 8 | 9 | docker: 10 | - image: circleci/golang:1.13 11 | environment: 12 | GO111MODULE=on 13 | steps: 14 | - checkout 15 | - run: 16 | name: Install operator-sdk to run make setup 17 | command: curl -Lo operator-sdk https://github.com/operator-framework/operator-sdk/releases/download/v0.18.1/operator-sdk-v0.18.1-x86_64-linux-gnu && chmod +x operator-sdk && sudo mv operator-sdk /usr/local/bin 18 | - run: export GOPROXY=https://proxy.golang.org/ 19 | - run: make setup 20 | - run: make code-build-linux 21 | - run: GO111MODULE=off go get github.com/mattn/goveralls 22 | - run: make test 23 | - run: /go/bin/goveralls -coverprofile=coverage.out -service=circle-ci -repotoken=jFmmyeYiX3zI1sj2jDCsR1QYwIrL33Xij 24 | 25 | image_push_master: 26 | working_directory: /go/src/github.com/dev4devs-com/postgresql-operator 27 | docker: 28 | - image: circleci/golang:1.13 29 | environment: 30 | GO111MODULE=on 31 | steps: 32 | - checkout 33 | - run: 34 | name: Install operator-sdk to run make setup 35 | command: curl -Lo operator-sdk https://github.com/operator-framework/operator-sdk/releases/download/v0.18.1/operator-sdk-v0.18.1-x86_64-linux-gnu && chmod +x operator-sdk && sudo mv operator-sdk /usr/local/bin 36 | - run: export GOPROXY=https://proxy.golang.org/ 37 | - run: make setup 38 | - run: make code-build-linux 39 | # circle ci key required for docker builds 40 | - setup_remote_docker 41 | - run: make image-build-master 42 | - run: make image-push-master 43 | 44 | image_release: 45 | working_directory: /go/src/github.com/dev4devs-com/postgresql-operator 46 | docker: 47 | - image: circleci/golang:1.13 48 | environment: 49 | GO111MODULE=on 50 | steps: 51 | - checkout 52 | - run: 53 | name: Install operator-sdk to run make setup 54 | command: curl -Lo operator-sdk https://github.com/operator-framework/operator-sdk/releases/download/v0.18.1/operator-sdk-v0.18.1-x86_64-linux-gnu && chmod +x operator-sdk && sudo mv operator-sdk /usr/local/bin 55 | - run: export GOPROXY=https://proxy.golang.org/ 56 | - run: make setup 57 | - run: make code-build-linux 58 | # circle ci key required for docker builds 59 | - setup_remote_docker 60 | - run: make image-build-release 61 | - run: make image-push-release 62 | 63 | workflows: 64 | version: 2 65 | build: 66 | jobs: 67 | - build: 68 | filters: 69 | tags: 70 | only: /.*/ 71 | - image_push_master: 72 | requires: 73 | - build 74 | filters: 75 | branches: 76 | only: 77 | - master 78 | - image_release: 79 | requires: 80 | - build 81 | filters: 82 | tags: 83 | only: /\d{1,2}\.\d{1,2}\.\d{1,2}/ 84 | branches: 85 | ignore: /.*/ -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | --- 8 | 9 | ## Bug Report 10 | 11 | ### What did you do? 12 | 13 | 14 | ### What did you expect to see? 15 | 16 | 17 | ### What did you see instead? Under which circumstances? 18 | 19 | 20 | ### Environment 21 | * postgresql-operator version: 22 | 23 | 24 | 25 | * go version: 26 | 27 | 28 | 29 | * Kubernetes version information: 30 | 31 | 32 | 33 | 34 | ### Possible Solution 35 | 36 | 37 | ### Additional context 38 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/custom.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question 3 | about: Raise questions for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Question 11 | 12 | ### What did you do? 13 | 14 | 15 | ### What did you expect to see? 16 | 17 | 18 | ### What did you see instead? Under which circumstances? 19 | 20 | 21 | 22 | ### Environment 23 | * postgresql-operator version: 24 | 25 | 26 | 27 | * Kubernetes version information: 28 | 29 | 30 | 31 | * Kubernetes cluster kind: 32 | 33 | ### Additional context 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Feature Request 11 | 12 | ### Is your feature request related to a problem? Please describe. 13 | 14 | 15 | ### Describe the solution you'd like 16 | -------------------------------------------------------------------------------- /.github/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - camilamacedo86 3 | - allandequeiroz 4 | reviewers: 5 | - camilamacedo86 6 | - allandequeiroz 7 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | **Description** 2 | 3 | **Motivation** 4 | 5 | Closes: #Issue-number -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # IDEA Golang files 2 | .idea/* 3 | # Temporary Build Files 4 | build/_output 5 | build/_test 6 | # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 7 | ### Emacs ### 8 | # -*- mode: gitignore; -*- 9 | *~ 10 | \#*\# 11 | /.emacs.desktop 12 | /.emacs.desktop.lock 13 | *.elc 14 | auto-save-list 15 | tramp 16 | .\#* 17 | # Org-mode 18 | .org-id-locations 19 | *_archive 20 | # flymake-mode 21 | *_flymake.* 22 | # eshell files 23 | /eshell/history 24 | /eshell/lastdir 25 | # elpa packages 26 | /elpa/ 27 | # reftex files 28 | *.rel 29 | # AUCTeX auto folder 30 | /auto/ 31 | # cask packages 32 | .cask/ 33 | dist/ 34 | # Flycheck 35 | flycheck_*.el 36 | # server auth directory 37 | /server/ 38 | # projectiles files 39 | .projectile 40 | projectile-bookmarks.eld 41 | # directory configuration 42 | .dir-locals.el 43 | # saveplace 44 | places 45 | # url cache 46 | url/cache/ 47 | # cedet 48 | ede-projects.el 49 | # smex 50 | smex-items 51 | # company-statistics 52 | company-statistics-cache.el 53 | # anaconda-mode 54 | anaconda-mode/ 55 | ### Go ### 56 | # Binaries for programs and plugins 57 | *.exe 58 | *.exe~ 59 | *.dll 60 | *.so 61 | *.dylib 62 | # Test binary, build with 'go test -c' 63 | *.test 64 | # Output of the go coverage tool, specifically when used with LiteIDE 65 | *.out 66 | ### Vim ### 67 | # swap 68 | .sw[a-p] 69 | .*.sw[a-p] 70 | # session 71 | Session.vim 72 | # temporary 73 | .netrwhist 74 | # auto-generated tag files 75 | tags 76 | ### VisualStudioCode ### 77 | .vscode/* 78 | .history 79 | # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 80 | -------------------------------------------------------------------------------- /.osdk-scorecard.yaml: -------------------------------------------------------------------------------- 1 | ## 2 | # It is used to call the scorecard test/feature of the operatr-sdk tool 3 | # Following the tests to use it: 4 | # 1. Create the role_binding namespace ` kubectl create namespace postgresql-operator` 5 | # 2. Run `operator-sdk scorecard` 6 | ## 7 | scorecard: 8 | output: json 9 | plugins: 10 | - basic: 11 | cr-manifest: 12 | - "deploy/crds/postgresql.dev4devs.com_v1alpha1_database_cr.yaml" 13 | - "deploy/crds/postgresql.dev4devs.com_v1alpha1_backup_cr.yaml" 14 | csv-path: "deploy/olm-catalog/postgresql-operator/0.1.1/postgresql-operator.v0.1.1.clusterserviceversion.yaml" 15 | - olm: 16 | cr-manifest: 17 | - "deploy/crds/postgresql.dev4devs.com_v1alpha1_database_cr.yaml" 18 | - "deploy/crds/postgresql.dev4devs.com_v1alpha1_backup_cr.yaml" 19 | csv-path: "deploy/olm-catalog/postgresql-operator/0.1.1/postgresql-operator.v0.1.1.clusterserviceversion.yaml" 20 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) 5 | and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). 6 | 7 | ## Unreleased 8 | 9 | ## [0.2.0] - 2020-07-06 10 | 11 | - Create new dir `deploy/olm-catalog/postgresql-operator/manifests` which the latest version which is point out for the next release version 0.2.0 12 | - Allow define the Storage Class of the PVC used for the database 13 | - Start to use CRDs version v1 instead of v1beta1 14 | - Remove the support for k8s clusters < 1.16.0 15 | - Upgrade project to use version 0.18.1 of SDK 16 | - Update the title of the project in the CVS file 17 | 18 | ## [0.1.1] - 2019-09-09 19 | - Add status in CSV OLM integration files 20 | 21 | ## [0.1.0] - 2019-09-08 22 | - Initial development release which allows work with as standalone and has OLM files. 23 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at remy@rymai.me. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at [http://contributor-covenant.org/version/1/4][version] 72 | 73 | [homepage]: http://contributor-covenant.org 74 | [version]: http://contributor-covenant.org/version/1/4/ -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## How to contribute 2 | 3 | Thank you for your interest in contributing to the Dev4Devs project. We want 4 | keep this process as easy as possible so we've outlined a few guidelines below. 5 | 6 | ## Getting Started 7 | 8 | * Make sure you have a [GitHub account](https://github.com/signup/free) 9 | * Submit a ticket for your issue in the repository of this project, assuming one does 10 | not already exist. 11 | * Clearly describe the issue including steps to reproduce when it is a bug. 12 | * Make sure you fill in the earliest version that you know has the issue. 13 | * Fork the repository on GitHub. 14 | 15 | ## Making changes 16 | 17 | * Create a topic branch from where you want to base your work. 18 | * This is usually the master branch. 19 | * To quickly create a topic branch based on master; `git checkout -b 20 | <branch name> master`. 21 | * Please avoid working directly on the `master` branch. 22 | * Make commits of logical units. 23 | * Prepend your commit messages with a Issute ticket number, e.g. "fix(ISSUE-1234): 24 | typo mistake in README." 25 | * Follow the coding style in use. 26 | * Check for unnecessary whitespace with `git diff --check` before committing. 27 | * Make sure you have added the necessary tests for your changes. 28 | * Run _all_ the tests to assure nothing else was accidentally broken. 29 | 30 | ## Submitting changes 31 | 32 | * Push your changes to a topic branch in your fork of the repository. 33 | * Submit a pull request to the repository of this project and choose branch you want to patch 34 | (usually master). 35 | * Advanced users may want to install the [GitHub CLI]( https://hub.github.com/) 36 | and use the `hub pull-request` command. 37 | * Update your ISSUE ticket to mark that you have submitted code and are ready 38 | for it to be reviewed (Link the PR on it). 39 | * Include a link to the pull request in the ticket. 40 | * Add detail about the change to the pull request including screenshots if the change affects the UI. 41 | 42 | ## Reviewing changes 43 | 44 | * After submitting a pull request, one of Dev4Devs team members will review it. 45 | * Changes may be requested to conform to our style guide and internal requirements. 46 | * When the changes are approved and all tests are passing, a Dev4Devs team member will merge them. 47 | 48 | NOTE: If you have write access to the repository, do not directly merge pull requests. Let another team member review your pull request and approve it. 49 | 50 | ## Additional Resources 51 | 52 | * [General GitHub documentation](http://help.github.com/) 53 | * [GitHub pull request documentation](https://help.github.com/articles/about-pull-requests/) 54 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | APP_NAME = postgresql-operator 2 | ORG_NAME = dev4devs-com 3 | PKG = github.com/$(ORG_NAME)/$(APP_NAME) 4 | TOP_SRC_DIRS = pkg 5 | PACKAGES ?= $(shell sh -c "find $(TOP_SRC_DIRS) -name \\*_test.go \ 6 | -exec dirname {} \\; | sort | uniq") 7 | TEST_PKGS = $(addprefix $(PKG)/,$(PACKAGES)) 8 | APP_FILE=./cmd/manager/main.go 9 | BIN_DIR := $(GOPATH)/bin 10 | BINARY ?= postgresql-operator 11 | IMAGE_REGISTRY=quay.io 12 | IMAGE_LATEST_TAG=$(IMAGE_REGISTRY)/$(ORG_NAME)/$(APP_NAME):latest 13 | IMAGE_MASTER_TAG=$(IMAGE_REGISTRY)/$(ORG_NAME)/$(APP_NAME):master 14 | IMAGE_RELEASE_TAG=$(IMAGE_REGISTRY)/$(ORG_NAME)/$(APP_NAME):$(CIRCLE_TAG) 15 | NAMESPACE=postgresql-operator 16 | TEST_COMPILE_OUTPUT ?= build/_output/bin/$(APP_NAME)-test 17 | 18 | # This follows the output format for goreleaser 19 | BINARY_LINUX_64 = ./dist/linux_amd64/$(BINARY) 20 | 21 | LDFLAGS=-ldflags "-w -s -X main.Version=${TAG}" 22 | .DEFAULT_GOAL:=help 23 | 24 | export GOPROXY?=https://proxy.golang.org/ 25 | 26 | ############################## 27 | # INSTALL-UNINSTALL # 28 | ############################## 29 | 30 | ##@ Application 31 | 32 | .PHONY: install 33 | install: ## Install all resources (CR-CRD's, RBCA and Operator) 34 | @echo ....... Creating namespace ....... 35 | - kubectl create namespace ${NAMESPACE} 36 | @echo ....... Applying CRDS and Operator ....... 37 | - kubectl apply -f deploy/crds/postgresql.dev4devs.com_databases_crd.yaml -n ${NAMESPACE} 38 | - kubectl apply -f deploy/crds/postgresql.dev4devs.com_backups_crd.yaml -n ${NAMESPACE} 39 | @echo ....... Applying Rules and Service Account ....... 40 | - kubectl apply -f deploy/role.yaml -n ${NAMESPACE} 41 | - kubectl apply -f deploy/role_binding.yaml -n ${NAMESPACE} 42 | - kubectl apply -f deploy/service_account.yaml -n ${NAMESPACE} 43 | @echo ....... Applying Database Operator ....... 44 | - kubectl apply -f deploy/operator.yaml -n ${NAMESPACE} 45 | @echo ....... Creating the Database ....... 46 | - kubectl apply -f deploy/crds/postgresql.dev4devs.com_v1alpha1_database_cr.yaml -n ${NAMESPACE} 47 | 48 | .PHONY: uninstall 49 | uninstall: ## Uninstall all that all performed in the $ make install 50 | @echo ....... Uninstalling ....... 51 | @echo ....... Deleting CRDs....... 52 | - kubectl delete -f deploy/crds/postgresql.dev4devs.com_backups_crd.yaml -n ${NAMESPACE} 53 | - kubectl delete -f deploy/crds/postgresql.dev4devs.com_databases_crd.yaml -n ${NAMESPACE} 54 | @echo ....... Deleting Rules and Service Account ....... 55 | - kubectl delete -f deploy/role.yaml -n ${NAMESPACE} 56 | - kubectl delete -f deploy/role_binding.yaml -n ${NAMESPACE} 57 | - kubectl delete -f deploy/service_account.yaml -n ${NAMESPACE} 58 | @echo ....... Deleting Operator ....... 59 | - kubectl delete -f deploy/operator.yaml -n ${NAMESPACE} 60 | @echo ....... Deleting namespace ${NAMESPACE}....... 61 | - kubectl delete namespace ${NAMESPACE} 62 | 63 | .PHONY: install-backup 64 | install-backup: ## Install backup feature ( Backup CR ) 65 | @echo Installing backup service in ${NAMESPACE} : 66 | - kubectl apply -f deploy/crds/postgresql.dev4devs.com_v1alpha1_backup_cr.yaml -n ${NAMESPACE} 67 | 68 | .PHONY: uninstall-backup 69 | uninstall-backup: ## Uninstall backup feature ( Backup CR ) 70 | @echo Uninstalling backup service from ${NAMESPACE} : 71 | - kubectl delete -f deploy/crds/postgresql.dev4devs.com_v1alpha1_backup_cr.yaml -n ${NAMESPACE} 72 | 73 | ############################## 74 | # CI # 75 | ############################## 76 | 77 | ##@ CI 78 | 79 | .PHONY: code-build-linux 80 | code-build-linux: ## Build binary for Linux SO (amd64) 81 | env GOOS=linux GOARCH=amd64 go build $(APP_FILE) 82 | 83 | .PHONY: image-build-master 84 | image-build-master: ## Build master branch image 85 | @echo Building operator with the tag: $(IMAGE_MASTER_TAG) 86 | operator-sdk build $(IMAGE_MASTER_TAG) 87 | 88 | .PHONY: image-build-release 89 | image-build-release: ## Build release and latest tag image 90 | @echo Building operator with the tag: $(IMAGE_RELEASE_TAG) 91 | operator-sdk build $(IMAGE_RELEASE_TAG) 92 | operator-sdk build $(IMAGE_LATEST_TAG) 93 | 94 | .PHONY: image-push-master 95 | image-push-master: ## Push master branch image 96 | @echo Pushing operator with tag $(IMAGE_MASTER_TAG) to $(IMAGE_REGISTRY) 97 | @docker login --username $(QUAY_USERNAME) --password $(QUAY_PASSWORD) quay.io 98 | docker push $(IMAGE_MASTER_TAG) 99 | 100 | .PHONY: image-push-release 101 | image-push-release: ## Push release and latest tag image 102 | @echo Pushing operator with tag $(IMAGE_RELEASE_TAG) to $(IMAGE_REGISTRY) 103 | @docker login --username $(QUAY_USERNAME) --password $(QUAY_PASSWORD) quay.io 104 | docker push $(IMAGE_RELEASE_TAG) 105 | @echo Pushing operator with tag $(IMAGE_LATEST_TAG) to $(IMAGE_REGISTRY) 106 | docker push $(IMAGE_LATEST_TAG) 107 | 108 | 109 | ############################## 110 | # Local Development # 111 | ############################## 112 | 113 | ##@ Development 114 | 115 | .PHONY: setup-debug 116 | setup-debug: ## Setup local env to debug. It will export env vars and install the project in the cluster 117 | @echo Exporting env vars to run operator locally: 118 | - . .-scripts-export_local_envvars.sh 119 | @echo Installing ... 120 | - make install 121 | 122 | .PHONY: setup 123 | setup: 124 | go mod tidy 125 | 126 | .PHONY: run-local 127 | run-local: ## Run project locally for debbug purposes. 128 | @echo Exporting env vars to run operator locally: 129 | - . .-scripts-export_local_envvars.sh 130 | @echo ....... Installing ... 131 | - make install 132 | @echo Starting ... 133 | - operator-sdk up local 134 | 135 | .PHONY: vet 136 | vet: ## Run go vet for the project 137 | @echo go vet 138 | go vet $$(go list ./... ) 139 | 140 | .PHONY: fmt 141 | fmt: ## Run go fmt for the project 142 | @echo go fmt 143 | go fmt $$(go list ./... ) 144 | 145 | .PHONY: dev 146 | dev: ## Run all required dev commands. (It should be used always before send a PR) 147 | - make fmt 148 | - make vet 149 | - make gen 150 | 151 | .PHONY: gen 152 | gen: ## Run SDK commands to generated-upddate the project 153 | operator-sdk generate k8s 154 | 155 | ############################## 156 | # Tests # 157 | ############################## 158 | 159 | ##@ Tests 160 | .PHONY: test 161 | test: ## Run unit test 162 | @echo Running tests: 163 | go test -coverprofile=coverage.out -covermode=count -count=1 -short ./cmd/... ./pkg/... 164 | 165 | .PHONY: integration-cover 166 | integration-cover: ## Run coveralls 167 | echo "mode: count" > coverage-all.out 168 | $(foreach pkg,$(PACKAGES),\ 169 | go test -failfast -tags=integration -coverprofile=coverage.out -covermode=count $(addprefix $(PKG)/,$(pkg)) || exit 1;\ 170 | tail -n +2 coverage.out >> coverage-all.out;) 171 | 172 | .PHONY: compile-e2e 173 | compile-e2e: ## Compile binary to run integration tests 174 | @GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go test -c -o=$(TEST_COMPILE_OUTPUT) .-test-e2e ... 175 | 176 | .PHONY: test-e2e 177 | test-e2e: ## Run integration tests locally 178 | - kubectl create namespace ${NAMESPACE} 179 | operator-sdk test local ./test/e2e --up-local --namespace=${NAMESPACE} 180 | 181 | ##@ General 182 | 183 | .PHONY: help 184 | help: ## Display this help 185 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) -------------------------------------------------------------------------------- /build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi8/ubi-minimal:latest 2 | 3 | ENV OPERATOR=/usr/local/bin/postgresql-operator \ 4 | USER_UID=1001 \ 5 | USER_NAME=postgresql-operator 6 | 7 | # install operator binary 8 | COPY build/_output/bin/postgresql-operator ${OPERATOR} 9 | 10 | COPY build/bin /usr/local/bin 11 | RUN /usr/local/bin/user_setup 12 | 13 | ENTRYPOINT ["/usr/local/bin/entrypoint"] 14 | 15 | USER ${USER_UID} 16 | -------------------------------------------------------------------------------- /build/bin/entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | exec ${OPERATOR} $@ 4 | -------------------------------------------------------------------------------- /build/bin/user_setup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | # ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) 5 | echo "${USER_NAME}:x:${USER_UID}:0:${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd 6 | mkdir -p "${HOME}" 7 | chown "${USER_UID}:0" "${HOME}" 8 | chmod ug+rwx "${HOME}" 9 | 10 | # no need for this script to remain in the image after running 11 | rm "$0" 12 | -------------------------------------------------------------------------------- /cmd/manager/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "flag" 7 | "fmt" 8 | "os" 9 | "runtime" 10 | "strings" 11 | 12 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 13 | _ "k8s.io/client-go/plugin/pkg/client/auth" 14 | "k8s.io/client-go/rest" 15 | 16 | "github.com/dev4devs-com/postgresql-operator/pkg/apis" 17 | "github.com/dev4devs-com/postgresql-operator/pkg/controller" 18 | "github.com/dev4devs-com/postgresql-operator/version" 19 | 20 | "github.com/operator-framework/operator-sdk/pkg/k8sutil" 21 | kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics" 22 | "github.com/operator-framework/operator-sdk/pkg/leader" 23 | "github.com/operator-framework/operator-sdk/pkg/log/zap" 24 | "github.com/operator-framework/operator-sdk/pkg/metrics" 25 | sdkVersion "github.com/operator-framework/operator-sdk/version" 26 | "github.com/spf13/pflag" 27 | v1 "k8s.io/api/core/v1" 28 | "k8s.io/apimachinery/pkg/util/intstr" 29 | "sigs.k8s.io/controller-runtime/pkg/cache" 30 | "sigs.k8s.io/controller-runtime/pkg/client/config" 31 | logf "sigs.k8s.io/controller-runtime/pkg/log" 32 | "sigs.k8s.io/controller-runtime/pkg/manager" 33 | "sigs.k8s.io/controller-runtime/pkg/manager/signals" 34 | ) 35 | 36 | // Change below variables to serve metrics on different host or port. 37 | var ( 38 | metricsHost = "0.0.0.0" 39 | metricsPort int32 = 8383 40 | operatorMetricsPort int32 = 8686 41 | ) 42 | var log = logf.Log.WithName("cmd") 43 | 44 | func printVersion() { 45 | log.Info(fmt.Sprintf("Operator Version: %s", version.Version)) 46 | log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) 47 | log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) 48 | log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version)) 49 | } 50 | 51 | func main() { 52 | // Add the zap logger flag set to the CLI. The flag set must 53 | // be added before calling pflag.Parse(). 54 | pflag.CommandLine.AddFlagSet(zap.FlagSet()) 55 | 56 | // Add flags registered by imported packages (e.g. glog and 57 | // controller-runtime) 58 | pflag.CommandLine.AddGoFlagSet(flag.CommandLine) 59 | 60 | pflag.Parse() 61 | 62 | // Use a zap logr.Logger implementation. If none of the zap 63 | // flags are configured (or if the zap flag set is not being 64 | // used), this defaults to a production zap logger. 65 | // 66 | // The logger instantiated here can be changed to any logger 67 | // implementing the logr.Logger interface. This logger will 68 | // be propagated through the whole operator, generating 69 | // uniform and structured logs. 70 | logf.SetLogger(zap.Logger()) 71 | 72 | printVersion() 73 | 74 | namespace, err := k8sutil.GetWatchNamespace() 75 | if err != nil { 76 | log.Error(err, "Failed to get watch namespace") 77 | os.Exit(1) 78 | } 79 | 80 | // Get a config to talk to the apiserver 81 | cfg, err := config.GetConfig() 82 | if err != nil { 83 | log.Error(err, "") 84 | os.Exit(1) 85 | } 86 | 87 | ctx := context.TODO() 88 | // Become the leader before proceeding 89 | err = leader.Become(ctx, "postgresql-operator-lock") 90 | if err != nil { 91 | log.Error(err, "") 92 | os.Exit(1) 93 | } 94 | 95 | // Set default manager options 96 | options := manager.Options{ 97 | Namespace: namespace, 98 | MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), 99 | } 100 | 101 | // Add support for MultiNamespace set in WATCH_NAMESPACE (e.g ns1,ns2) 102 | // Note that this is not intended to be used for excluding namespaces, this is better done via a Predicate 103 | // Also note that you may face performance issues when using this with a high number of namespaces. 104 | // More Info: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder 105 | if strings.Contains(namespace, ",") { 106 | options.Namespace = "" 107 | options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ",")) 108 | } 109 | 110 | // Create a new manager to provide shared dependencies and start components 111 | mgr, err := manager.New(cfg, options) 112 | if err != nil { 113 | log.Error(err, "") 114 | os.Exit(1) 115 | } 116 | 117 | log.Info("Registering Components.") 118 | 119 | // Setup Scheme for all resources 120 | if err := apis.AddToScheme(mgr.GetScheme()); err != nil { 121 | log.Error(err, "") 122 | os.Exit(1) 123 | } 124 | 125 | // Setup all Controllers 126 | if err := controller.AddToManager(mgr); err != nil { 127 | log.Error(err, "") 128 | os.Exit(1) 129 | } 130 | 131 | // Add the Metrics Service 132 | addMetrics(ctx, cfg) 133 | 134 | log.Info("Starting the Cmd.") 135 | 136 | // Start the Cmd 137 | if err := mgr.Start(signals.SetupSignalHandler()); err != nil { 138 | log.Error(err, "Manager exited non-zero") 139 | os.Exit(1) 140 | } 141 | } 142 | 143 | // addMetrics will create the Services and Service Monitors to allow the operator export the metrics by using 144 | // the Prometheus operator 145 | func addMetrics(ctx context.Context, cfg *rest.Config) { 146 | // Get the namespace the operator is currently deployed in. 147 | operatorNs, err := k8sutil.GetOperatorNamespace() 148 | if err != nil { 149 | if errors.Is(err, k8sutil.ErrRunLocal) { 150 | log.Info("Skipping CR metrics server creation; not running in a cluster.") 151 | return 152 | } 153 | } 154 | 155 | if err := serveCRMetrics(cfg, operatorNs); err != nil { 156 | log.Info("Could not generate and serve custom resource metrics", "error", err.Error()) 157 | } 158 | 159 | // Add to the below struct any other metrics ports you want to expose. 160 | servicePorts := []v1.ServicePort{ 161 | {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}}, 162 | {Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}}, 163 | } 164 | 165 | // Create Service object to expose the metrics port(s). 166 | service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts) 167 | if err != nil { 168 | log.Info("Could not create metrics Service", "error", err.Error()) 169 | } 170 | 171 | // CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources 172 | // necessary to configure Prometheus to scrape metrics from this operator. 173 | services := []*v1.Service{service} 174 | 175 | // The ServiceMonitor is created in the same namespace where the operator is deployed 176 | _, err = metrics.CreateServiceMonitors(cfg, operatorNs, services) 177 | if err != nil { 178 | log.Info("Could not create ServiceMonitor object", "error", err.Error()) 179 | // If this operator is deployed to a cluster without the prometheus-operator running, it will return 180 | // ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation. 181 | if err == metrics.ErrServiceMonitorNotPresent { 182 | log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error()) 183 | } 184 | } 185 | } 186 | 187 | // serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types. 188 | // It serves those metrics on "http://metricsHost:operatorMetricsPort". 189 | func serveCRMetrics(cfg *rest.Config, operatorNs string) error { 190 | // The function below returns a list of filtered operator/CR specific GVKs. For more control, override the GVK list below 191 | // with your own custom logic. Note that if you are adding third party API schemas, probably you will need to 192 | // customize this implementation to avoid permissions issues. 193 | filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme) 194 | if err != nil { 195 | return err 196 | } 197 | 198 | // The metrics will be generated from the namespaces which are returned here. 199 | // NOTE that passing nil or an empty list of namespaces in GenerateAndServeCRMetrics will result in an error. 200 | ns, err := kubemetrics.GetNamespacesForMetrics(operatorNs) 201 | if err != nil { 202 | return err 203 | } 204 | 205 | // Generate and serve custom resource specific metrics. 206 | err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort) 207 | if err != nil { 208 | return err 209 | } 210 | return nil 211 | } 212 | -------------------------------------------------------------------------------- /deploy/crds/postgresql.dev4devs.com_v1alpha1_backup_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: postgresql.dev4devs.com/v1alpha1 2 | kind: Backup 3 | metadata: 4 | name: backup 5 | spec: 6 | # --------------------------------- 7 | # ## Backup Container 8 | # ---------------------------- 9 | # See https://github.com/integr8ly/backup-container-image 10 | # --------------------------------- 11 | 12 | # --------------------------------- 13 | # IMPORTANT: In this CR you will find an example of all options and possible configurations. 14 | # However, by default values are applied by the operator if values are not specified below. 15 | # --------------------------------- 16 | 17 | # --------------------------------- 18 | # ## Default Setup 19 | # --------------------------------- 20 | 21 | # This spec allow you setup the backup frequency 22 | schedule: "0 0 * * *" # daily at 00:00 23 | 24 | # The following specs are required to send the data to your AWS S3 storage 25 | awsS3BucketName: "example-awsS3BucketName" 26 | awsAccessKeyId: "example-awsAccessKeyId" 27 | awsSecretAccessKey: "example-awsSecretAccessKey" 28 | 29 | # --------------------------------- 30 | # ## Customizations Options 31 | # --------------------------------- 32 | 33 | # Change the following spec if you change the name of the Database CR 34 | # databaseCRName: "database" 35 | 36 | # This spec allow you change the : used to perform the backup 37 | # --------------------------------- 38 | # image: "quay.io/integreatly/backup-container:1.0.8" 39 | 40 | # This spec allow you change the "version" of the database which will be installed 41 | # used to perform the backup(It needs be the same used by the Database CR) 42 | # --------------------------------- 43 | # databaseVersion: "9.6" 44 | 45 | # --------------------------------- 46 | # TIP: Following the steps to get the databaseVersion 47 | # --------------------------------- 48 | # 1. Go to the database pod terminal 49 | # 2. Run the command psql as follows. 50 | # //sh-4.2$ psql 51 | # //psql (9.6.10) 52 | # IMPORTANT: Just the first 2 digits should be used. 53 | # --------------------------------- 54 | 55 | # The following information is optional and will be used to tag the data in the AWS bucket 56 | # --------------------------------- 57 | # productName: "postgresql" 58 | 59 | # The following attribute allows you tell for the operator that it should use an pre-existing secret 60 | # with the AWS data info in the format required. 61 | # See here the template: https://github.com/integr8ly/backup-container-image/blob/master/templates/openshift/sample-config/s3-secret.yaml 62 | # --------------------------------- 63 | # NOTE: If the namespace be not informed then the operator will try to find it in the same namespace where it is applied 64 | # --------------------------------- 65 | # awsSecretName: "example-awsCredentialsSecretName" 66 | # awsSecretNamespace: "example-awsSecretNamespace" 67 | 68 | # --------------------------------- 69 | # EncryptKey (Optional Setup) 70 | # ---------------------------- 71 | 72 | # The following attribute allows you tell for the operator that it should use an pre-existing secret 73 | # with the encrypt data info in the format required. 74 | # See here the template: https://github.com/integr8ly/backup-container-image/blob/master/templates/openshift/sample-config/gpg-secret.yaml 75 | # --------------------------------- 76 | # NOTE: If the namespace be not informed then the operator will try to find it in the same namespace where it is applied 77 | # --------------------------------- 78 | # encryptKeySecretName: "example-encryptKeySecretName" 79 | # encryptKeySecretNamespace: "example-encryptKeySecretNamespace" 80 | 81 | # OR 82 | 83 | # Add the encryption details in order to allow the operator create the secret for you 84 | # See here how to create this key : https://help.github.com/en/articles/generating-a-new-gpg-key 85 | 86 | # base64 encoded public opengpg cert 87 | # --------------------------------- 88 | # gpgPublicKey: "example-gpgPublicKey" 89 | # gpgEmail: "email@example.com" 90 | # gpgTrustModel: "always" 91 | -------------------------------------------------------------------------------- /deploy/crds/postgresql.dev4devs.com_v1alpha1_database_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: postgresql.dev4devs.com/v1alpha1 2 | kind: Database 3 | metadata: 4 | name: database 5 | spec: 6 | # --------------------------------- 7 | # IMPORTANT: In this CR you will find an example of all options and possible configurations. 8 | # However, by default values are applied by the operator if values are not specified below. 9 | # --------------------------------- 10 | 11 | # --------------------------------- 12 | # ## Default Setup 13 | # --------------------------------- 14 | 15 | size: 1 16 | databaseMemoryLimit: "512Mi" 17 | databaseMemoryRequest: "128Mi" 18 | databaseCpuLimit: "60m" 19 | databaseCpu: "30m" 20 | databaseStorageRequest: "1Gi" 21 | 22 | # The imaged used in this project is from Red Hat. See more in https://docs.okd.io/latest/using_images/db_images/postgresql.html 23 | image: "centos/postgresql-96-centos7" 24 | 25 | # Environment Variables 26 | # --------------------------------- 27 | # Following are the values which will be used as the key label for the environment variable of the database image. 28 | # NOTES: 29 | # - They need to be as the values expected by the image used 30 | # - If you are using the optional setup to allow the operator looking for this value in a ConfigMap pre-existent then it will look for these keys by default 31 | # --------------------------------- 32 | databaseNameKeyEnvVar: "POSTGRESQL_DATABASE" 33 | databasePasswordKeyEnvVar: "POSTGRESQL_PASSWORD" 34 | databaseUserKeyEnvVar: "POSTGRESQL_USER" 35 | 36 | # The following values will be used fill the env variables. (Optional) 37 | # They will be not required if you are using the configMap setup 38 | # --------------------------------- 39 | databaseName: "example" 40 | databasePassword: "postgres" 41 | databaseUser: "postgres" 42 | 43 | # --------------------------------- 44 | # ## Customizations Options 45 | # --------------------------------- 46 | 47 | # Database Container 48 | # --------------------------------- 49 | # Name which will be used to create the container where the database will be running 50 | 51 | # containerName: "database" 52 | # databasePort: 5432 53 | 54 | # Use the following spec if you would like to define the image pull policy 55 | # containerImagePullPolicy: "IfNotPresent" 56 | 57 | # Get Values from ConfigMap 58 | # --------------------------------- 59 | # NOTE: It is very useful if you would like to centralize and share this information with your solutions 60 | 61 | # The following attribute allows you tell for the operator that it should look for the data to create the env vars is some 62 | # ConfigMap which pre-exist in the cluster 63 | # configMapName: "cfgmap-name" 64 | 65 | # The following specs allow you customize the name of the keys which the operator should look for the env vars in the configMap. 66 | # configMapDatabaseNameKey: "POSTGRESQL_DATABASE" 67 | # configMapDatabasePasswordKey: "POSTGRESQL_PASSWORD" 68 | # configMapDatabaseUserKey: "POSTGRESQL_USER" 69 | 70 | # The following allow you customize the name of the Storage Class that should be used 71 | # databaseStorageClassName: "standard" -------------------------------------------------------------------------------- /deploy/olm-catalog/olm-test/postgresql-operator.operator-group.yaml: -------------------------------------------------------------------------------- 1 | # --------------- 2 | # It is just used to test it in OLM before it be published 3 | # --------------- 4 | apiVersion: operators.coreos.com/v1alpha2 5 | kind: OperatorGroup 6 | metadata: 7 | name: postgresql-operatorgroup 8 | namespace: marketplace 9 | spec: 10 | targetNamespaces: 11 | - marketplace -------------------------------------------------------------------------------- /deploy/olm-catalog/olm-test/postgresql-operator.operator-source.yaml: -------------------------------------------------------------------------------- 1 | # --------------- 2 | # It is just used to test it in OLM before it be published 3 | # --------------- 4 | apiVersion: operators.coreos.com/v1 5 | kind: OperatorSource 6 | metadata: 7 | name: postgresql-operator 8 | namespace: marketplace 9 | spec: 10 | type: appregistry 11 | endpoint: https://quay.io/cnr 12 | # The registryNamespace corresponds to the Quay user created under this URL : https://quay.io/organization/ 13 | # This org or registry will contain different applications published 14 | # Example : https://quay.io/application/dev4devs-com/postgresql-operator-operatorhub 15 | registryNamespace: dev4devs-com 16 | displayName: "Postgresql Operator" 17 | publisher: "Dev4Devs-com" -------------------------------------------------------------------------------- /deploy/olm-catalog/olm-test/postgresql-operator.operator-subscription.yaml: -------------------------------------------------------------------------------- 1 | # --------------- 2 | # It is just used to test it in OLM before it be published 3 | # --------------- 4 | apiVersion: operators.coreos.com/v1alpha1 5 | kind: Subscription 6 | metadata: 7 | name: postgresql-operatorsubsription 8 | namespace: marketplace 9 | spec: 10 | channel: alpha 11 | name: postgresql-operator 12 | source: postgresql-operator 13 | sourceNamespace: marketplace -------------------------------------------------------------------------------- /deploy/olm-catalog/postgresql-operator/0.1.1/postgresql.dev4devs.com_backups_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: backups.postgresql.dev4devs.com 5 | spec: 6 | group: postgresql.dev4devs.com 7 | names: 8 | kind: Backup 9 | listKind: BackupList 10 | plural: backups 11 | singular: backup 12 | scope: Namespaced 13 | subresources: 14 | status: {} 15 | validation: 16 | openAPIV3Schema: 17 | properties: 18 | apiVersion: 19 | description: 'APIVersion defines the versioned schema of this representation 20 | of an object. Servers should convert recognized schemas to the latest 21 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 22 | type: string 23 | kind: 24 | description: 'Kind is a string value representing the REST resource this 25 | object represents. Servers may infer this from the endpoint the client 26 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 27 | type: string 28 | metadata: 29 | type: object 30 | spec: 31 | description: BackupSpec defines the desired state of Backup 32 | properties: 33 | awsAccessKeyId: 34 | description: 'Key ID of AWS S3 storage. Default Value: nil Required 35 | to create the Secret with the data to allow send the backup files 36 | to AWS S3 storage.' 37 | type: string 38 | awsS3BucketName: 39 | description: 'Name of AWS S3 storage. Default Value: nil Required to 40 | create the Secret with the AWS data to allow send the backup files 41 | to AWS S3 storage.' 42 | type: string 43 | awsSecretAccessKey: 44 | description: 'Secret/Token of AWS S3 storage. Default Value: nil Required 45 | to create the Secret with the data to allow send the backup files 46 | to AWS S3 storage.' 47 | type: string 48 | awsSecretName: 49 | description: 'Name of the secret with the AWS data credentials pre-existing 50 | in the cluster Default Value: nil See here the template: https://github.com/integr8ly/backup-container-image/blob/master/templates/openshift/sample-config/s3-secret.yaml' 51 | type: string 52 | awsSecretNamespace: 53 | description: 'Namespace of the secret with the AWS data credentials 54 | pre-existing in the cluster Default Value: nil NOTE: If the namespace 55 | be not informed then the operator will try to find it in the same 56 | namespace where it is applied' 57 | type: string 58 | databaseCRName: 59 | description: 'Name of the Database CR applied which this backup will 60 | work with Default Value: "database"' 61 | type: string 62 | databaseVersion: 63 | description: 'Database version. (E.g 9.6). Default Value: <9.6> IMPORTANT: 64 | Just the first 2 digits should be used.' 65 | type: string 66 | encryptKeySecretName: 67 | description: 'Name of the secret with the Encrypt data pre-existing 68 | in the cluster Default Value: nil See here the template: https://github.com/integr8ly/backup-container-image/blob/master/templates/openshift/sample-config/gpg-secret.yaml' 69 | type: string 70 | encryptKeySecretNamespace: 71 | description: 'Namespace of the secret with the Encrypt data pre-existing 72 | in the cluster Default Value: nil NOTE: If the namespace be not informed 73 | then the operator will try to find it in the same namespace where 74 | it is applied' 75 | type: string 76 | gpgEmail: 77 | description: 'GPG email to create the EncryptionKeySecret with this 78 | data Default Value: nil See here how to create this key : https://help.github.com/en/articles/generating-a-new-gpg-key' 79 | type: string 80 | gpgPublicKey: 81 | description: 'GPG public key to create the EncryptionKeySecret with 82 | this data Default Value: nil See here how to create this key : https://help.github.com/en/articles/generating-a-new-gpg-key' 83 | type: string 84 | gpgTrustModel: 85 | description: 'GPG trust model to create the EncryptionKeySecret with 86 | this data. the default value is true when it is empty. Default Value: 87 | nil See here how to create this key : https://help.github.com/en/articles/generating-a-new-gpg-key' 88 | type: string 89 | image: 90 | description: 'Image:tag used to do the backup. Default Value: 91 | More Info: https://github.com/integr8ly/backup-container-image' 92 | type: string 93 | productName: 94 | description: 'Used to create the directory where the files will be stored 95 | Default Value: ' 96 | type: string 97 | schedule: 98 | description: 'Schedule period for the CronJob. Default Value: <0 0 * 99 | * *> daily at 00:00' 100 | type: string 101 | type: object 102 | status: 103 | description: BackupStatus defines the observed state of Backup 104 | properties: 105 | awsCredentialsSecretNamespace: 106 | description: Namespace of the secret object with the Aws data to allow 107 | send the backup files to the AWS storage 108 | type: string 109 | awsSecretName: 110 | description: Name of the secret object with the Aws data to allow send 111 | the backup files to the AWS storage 112 | type: string 113 | backupStatus: 114 | description: Will be as "OK when all objects are created successfully 115 | type: string 116 | cronJobName: 117 | description: Name of the CronJob object created and managed by it to 118 | schedule the backup job 119 | type: string 120 | cronJobStatus: 121 | description: Status of the CronJob object 122 | properties: 123 | active: 124 | description: A list of pointers to currently running jobs. 125 | items: 126 | description: ObjectReference contains enough information to let 127 | you inspect or modify the referred object. 128 | properties: 129 | apiVersion: 130 | description: API version of the referent. 131 | type: string 132 | fieldPath: 133 | description: 'If referring to a piece of an object instead 134 | of an entire object, this string should contain a valid 135 | JSON/Go field access statement, such as desiredState.manifest.containers[2]. 136 | For example, if the object reference is to a container within 137 | a pod, this would take on a value like: "spec.containers{name}" 138 | (where "name" refers to the name of the container that triggered 139 | the event) or if no container name is specified "spec.containers[2]" 140 | (container with index 2 in this pod). This syntax is chosen 141 | only to have some well-defined way of referencing a part 142 | of an object. TODO: this design is not final and this field 143 | is subject to change in the future.' 144 | type: string 145 | kind: 146 | description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 147 | type: string 148 | name: 149 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' 150 | type: string 151 | namespace: 152 | description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' 153 | type: string 154 | resourceVersion: 155 | description: 'Specific resourceVersion to which this reference 156 | is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' 157 | type: string 158 | uid: 159 | description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' 160 | type: string 161 | type: object 162 | type: array 163 | lastScheduleTime: 164 | description: Information when was the last time the job was successfully 165 | scheduled. 166 | format: date-time 167 | type: string 168 | type: object 169 | dbSecretName: 170 | description: Name of the secret object created with the database data 171 | to allow the backup image connect to the database 172 | type: string 173 | encryptKeySecretName: 174 | description: Name of the secret object with the Encryption GPG Key 175 | type: string 176 | encryptKeySecretNamespace: 177 | description: Namespace of the secret object with the Encryption GPG 178 | Key 179 | type: string 180 | hasEncryptKey: 181 | description: Boolean value which has true when it has an EncryptionKey 182 | to be used to send the backup files 183 | type: boolean 184 | isDatabasePodFound: 185 | description: Boolean value which has true when the Database Pod was 186 | found in order to create the secret with the database data to allow 187 | the backup image connect into it. 188 | type: boolean 189 | isDatabaseServiceFound: 190 | description: Boolean value which has true when the Service Database 191 | Pod was found in order to create the secret with the database data 192 | to allow the backup image connect into it. 193 | type: boolean 194 | required: 195 | - awsCredentialsSecretNamespace 196 | - awsSecretName 197 | - backupStatus 198 | - cronJobName 199 | - cronJobStatus 200 | - dbSecretName 201 | - encryptKeySecretName 202 | - encryptKeySecretNamespace 203 | - hasEncryptKey 204 | - isDatabasePodFound 205 | - isDatabaseServiceFound 206 | type: object 207 | type: object 208 | version: v1alpha1 209 | versions: 210 | - name: v1alpha1 211 | served: true 212 | storage: true 213 | -------------------------------------------------------------------------------- /deploy/olm-catalog/postgresql-operator/0.1.2/postgresql.dev4devs.com_backups_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: backups.postgresql.dev4devs.com 5 | spec: 6 | group: postgresql.dev4devs.com 7 | names: 8 | kind: Backup 9 | listKind: BackupList 10 | plural: backups 11 | singular: backup 12 | scope: Namespaced 13 | subresources: 14 | status: {} 15 | validation: 16 | openAPIV3Schema: 17 | properties: 18 | apiVersion: 19 | description: 'APIVersion defines the versioned schema of this representation 20 | of an object. Servers should convert recognized schemas to the latest 21 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 22 | type: string 23 | kind: 24 | description: 'Kind is a string value representing the REST resource this 25 | object represents. Servers may infer this from the endpoint the client 26 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 27 | type: string 28 | metadata: 29 | type: object 30 | spec: 31 | description: BackupSpec defines the desired state of Backup 32 | properties: 33 | awsAccessKeyId: 34 | description: 'Key ID of AWS S3 storage. Default Value: nil Required 35 | to create the Secret with the data to allow send the backup files 36 | to AWS S3 storage.' 37 | type: string 38 | awsS3BucketName: 39 | description: 'Name of AWS S3 storage. Default Value: nil Required to 40 | create the Secret with the AWS data to allow send the backup files 41 | to AWS S3 storage.' 42 | type: string 43 | awsSecretAccessKey: 44 | description: 'Secret/Token of AWS S3 storage. Default Value: nil Required 45 | to create the Secret with the data to allow send the backup files 46 | to AWS S3 storage.' 47 | type: string 48 | awsSecretName: 49 | description: 'Name of the secret with the AWS data credentials pre-existing 50 | in the cluster Default Value: nil See here the template: https://github.com/integr8ly/backup-container-image/blob/master/templates/openshift/sample-config/s3-secret.yaml' 51 | type: string 52 | awsSecretNamespace: 53 | description: 'Namespace of the secret with the AWS data credentials 54 | pre-existing in the cluster Default Value: nil NOTE: If the namespace 55 | be not informed then the operator will try to find it in the same 56 | namespace where it is applied' 57 | type: string 58 | databaseCRName: 59 | description: 'Name of the Database CR applied which this backup will 60 | work with Default Value: "database"' 61 | type: string 62 | databaseVersion: 63 | description: 'Database version. (E.g 9.6). Default Value: <9.6> IMPORTANT: 64 | Just the first 2 digits should be used.' 65 | type: string 66 | encryptKeySecretName: 67 | description: 'Name of the secret with the Encrypt data pre-existing 68 | in the cluster Default Value: nil See here the template: https://github.com/integr8ly/backup-container-image/blob/master/templates/openshift/sample-config/gpg-secret.yaml' 69 | type: string 70 | encryptKeySecretNamespace: 71 | description: 'Namespace of the secret with the Encrypt data pre-existing 72 | in the cluster Default Value: nil NOTE: If the namespace be not informed 73 | then the operator will try to find it in the same namespace where 74 | it is applied' 75 | type: string 76 | gpgEmail: 77 | description: 'GPG email to create the EncryptionKeySecret with this 78 | data Default Value: nil See here how to create this key : https://help.github.com/en/articles/generating-a-new-gpg-key' 79 | type: string 80 | gpgPublicKey: 81 | description: 'GPG public key to create the EncryptionKeySecret with 82 | this data Default Value: nil See here how to create this key : https://help.github.com/en/articles/generating-a-new-gpg-key' 83 | type: string 84 | gpgTrustModel: 85 | description: 'GPG trust model to create the EncryptionKeySecret with 86 | this data. the default value is true when it is empty. Default Value: 87 | nil See here how to create this key : https://help.github.com/en/articles/generating-a-new-gpg-key' 88 | type: string 89 | image: 90 | description: 'Image:tag used to do the backup. Default Value: 91 | More Info: https://github.com/integr8ly/backup-container-image' 92 | type: string 93 | productName: 94 | description: 'Used to create the directory where the files will be stored 95 | Default Value: ' 96 | type: string 97 | schedule: 98 | description: 'Schedule period for the CronJob. Default Value: <0 0 * 99 | * *> daily at 00:00' 100 | type: string 101 | type: object 102 | status: 103 | description: BackupStatus defines the observed state of Backup 104 | properties: 105 | awsCredentialsSecretNamespace: 106 | description: Namespace of the secret object with the Aws data to allow 107 | send the backup files to the AWS storage 108 | type: string 109 | awsSecretName: 110 | description: Name of the secret object with the Aws data to allow send 111 | the backup files to the AWS storage 112 | type: string 113 | backupStatus: 114 | description: Will be as "OK when all objects are created successfully 115 | type: string 116 | cronJobName: 117 | description: Name of the CronJob object created and managed by it to 118 | schedule the backup job 119 | type: string 120 | cronJobStatus: 121 | description: Status of the CronJob object 122 | properties: 123 | active: 124 | description: A list of pointers to currently running jobs. 125 | items: 126 | description: ObjectReference contains enough information to let 127 | you inspect or modify the referred object. 128 | properties: 129 | apiVersion: 130 | description: API version of the referent. 131 | type: string 132 | fieldPath: 133 | description: 'If referring to a piece of an object instead 134 | of an entire object, this string should contain a valid 135 | JSON/Go field access statement, such as desiredState.manifest.containers[2]. 136 | For example, if the object reference is to a container within 137 | a pod, this would take on a value like: "spec.containers{name}" 138 | (where "name" refers to the name of the container that triggered 139 | the event) or if no container name is specified "spec.containers[2]" 140 | (container with index 2 in this pod). This syntax is chosen 141 | only to have some well-defined way of referencing a part 142 | of an object. TODO: this design is not final and this field 143 | is subject to change in the future.' 144 | type: string 145 | kind: 146 | description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 147 | type: string 148 | name: 149 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' 150 | type: string 151 | namespace: 152 | description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' 153 | type: string 154 | resourceVersion: 155 | description: 'Specific resourceVersion to which this reference 156 | is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' 157 | type: string 158 | uid: 159 | description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' 160 | type: string 161 | type: object 162 | type: array 163 | lastScheduleTime: 164 | description: Information when was the last time the job was successfully 165 | scheduled. 166 | format: date-time 167 | type: string 168 | type: object 169 | dbSecretName: 170 | description: Name of the secret object created with the database data 171 | to allow the backup image connect to the database 172 | type: string 173 | encryptKeySecretName: 174 | description: Name of the secret object with the Encryption GPG Key 175 | type: string 176 | encryptKeySecretNamespace: 177 | description: Namespace of the secret object with the Encryption GPG 178 | Key 179 | type: string 180 | hasEncryptKey: 181 | description: Boolean value which has true when it has an EncryptionKey 182 | to be used to send the backup files 183 | type: boolean 184 | isDatabasePodFound: 185 | description: Boolean value which has true when the Database Pod was 186 | found in order to create the secret with the database data to allow 187 | the backup image connect into it. 188 | type: boolean 189 | isDatabaseServiceFound: 190 | description: Boolean value which has true when the Service Database 191 | Pod was found in order to create the secret with the database data 192 | to allow the backup image connect into it. 193 | type: boolean 194 | required: 195 | - awsCredentialsSecretNamespace 196 | - awsSecretName 197 | - backupStatus 198 | - cronJobName 199 | - cronJobStatus 200 | - dbSecretName 201 | - encryptKeySecretName 202 | - encryptKeySecretNamespace 203 | - hasEncryptKey 204 | - isDatabasePodFound 205 | - isDatabaseServiceFound 206 | type: object 207 | type: object 208 | version: v1alpha1 209 | versions: 210 | - name: v1alpha1 211 | served: true 212 | storage: true 213 | -------------------------------------------------------------------------------- /deploy/olm-catalog/postgresql-operator/postgresql-operator.package.yaml: -------------------------------------------------------------------------------- 1 | channels: 2 | - currentCSV: postgresql-operator.v0.1.1 3 | name: alpha 4 | defaultChannel: alpha 5 | packageName: postgresql-operator-dev4devs-com 6 | -------------------------------------------------------------------------------- /deploy/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: postgresql-operator 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | name: postgresql-operator 10 | template: 11 | metadata: 12 | labels: 13 | name: postgresql-operator 14 | spec: 15 | serviceAccountName: postgresql-operator 16 | containers: 17 | - name: postgresql-operator 18 | # Replace this with the built image name 19 | image: quay.io/dev4devs-com/postgresql-operator:master 20 | command: 21 | - postgresql-operator 22 | imagePullPolicy: Always 23 | resources: 24 | limits: 25 | cpu: 60m 26 | memory: 128Mi 27 | requests: 28 | cpu: 30m 29 | memory: 64Mi 30 | env: 31 | - name: WATCH_NAMESPACE 32 | valueFrom: 33 | fieldRef: 34 | fieldPath: metadata.namespace 35 | - name: POD_NAME 36 | valueFrom: 37 | fieldRef: 38 | fieldPath: metadata.name 39 | - name: OPERATOR_NAME 40 | value: "postgresql-operator" 41 | -------------------------------------------------------------------------------- /deploy/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: postgresql-operator 5 | rules: 6 | - apiGroups: 7 | - '*' 8 | resources: 9 | - '*' 10 | verbs: 11 | - get 12 | - list 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - pods/exec 17 | verbs: 18 | - create 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - deployments 23 | - daemonsets 24 | - replicasets 25 | - statefulsets 26 | - serviceaccounts 27 | - rolebindings 28 | - pods 29 | - services 30 | - services/finalizers 31 | - endpoints 32 | - persistentvolumeclaims 33 | - events 34 | - configmaps 35 | - secrets 36 | verbs: 37 | - '*' 38 | - apiGroups: 39 | - batch 40 | resources: 41 | - cronjobs 42 | verbs: 43 | - get 44 | - list 45 | - patch 46 | - update 47 | - watch 48 | - create 49 | - update 50 | - patch 51 | - delete 52 | - apiGroups: 53 | - apps 54 | resources: 55 | - deployments 56 | - daemonsets 57 | - replicasets 58 | - statefulsets 59 | verbs: 60 | - create 61 | - delete 62 | - get 63 | - list 64 | - patch 65 | - update 66 | - watch 67 | - apiGroups: 68 | - monitoring.coreos.com 69 | resources: 70 | - servicemonitors 71 | verbs: 72 | - get 73 | - create 74 | - apiGroups: 75 | - apps 76 | resourceNames: 77 | - postgresql-operator 78 | - postgresql 79 | - backup 80 | resources: 81 | - deployments/finalizers 82 | verbs: 83 | - update 84 | - apiGroups: 85 | - "" 86 | resources: 87 | - pods 88 | verbs: 89 | - get 90 | - apiGroups: 91 | - apps 92 | resources: 93 | - replicasets 94 | - deployments 95 | verbs: 96 | - get 97 | - apiGroups: 98 | - postgresql-operator.dev4devs.com 99 | resources: 100 | - '*' 101 | - backups 102 | verbs: 103 | - '*' 104 | - apiGroups: 105 | - postgresql.dev4devs.com 106 | resources: 107 | - '*' 108 | - backups 109 | - databases 110 | verbs: 111 | - '*' 112 | -------------------------------------------------------------------------------- /deploy/role_binding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: postgresql-operator 5 | subjects: 6 | - kind: ServiceAccount 7 | name: postgresql-operator 8 | # Replace this with the namespace where the operator will be deployed. 9 | namespace: postgresql-operator 10 | roleRef: 11 | kind: ClusterRole 12 | name: postgresql-operator 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /deploy/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: postgresql-operator 5 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/dev4devs-com/postgresql-operator 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/go-logr/logr v0.1.0 7 | github.com/go-openapi/spec v0.19.3 8 | github.com/operator-framework/operator-sdk v0.18.1 9 | github.com/spf13/pflag v1.0.5 10 | k8s.io/api v0.18.2 11 | k8s.io/apimachinery v0.18.2 12 | k8s.io/client-go v12.0.0+incompatible 13 | k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c 14 | sigs.k8s.io/controller-runtime v0.6.0 15 | ) 16 | 17 | replace ( 18 | github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.2+incompatible // Required by OLM 19 | k8s.io/client-go => k8s.io/client-go v0.18.2 // Required by prometheus-operator 20 | ) 21 | -------------------------------------------------------------------------------- /pkg/apis/addtoscheme_postgresql_v1alpha1.go: -------------------------------------------------------------------------------- 1 | package apis 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | ) 6 | 7 | func init() { 8 | // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back 9 | AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/apis/apis.go: -------------------------------------------------------------------------------- 1 | package apis 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/runtime" 5 | ) 6 | 7 | // AddToSchemes may be used to add all resources defined in the project to a Scheme 8 | var AddToSchemes runtime.SchemeBuilder 9 | 10 | // AddToScheme adds all Resources to the Scheme 11 | func AddToScheme(s *runtime.Scheme) error { 12 | return AddToSchemes.AddToScheme(s) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/apis/postgresql/group.go: -------------------------------------------------------------------------------- 1 | // Package postgresql contains postgresql API versions. 2 | // 3 | // This file ensures Go source parsers acknowledge the postgresql package 4 | // and any child packages. It can be removed if any other Go source files are 5 | // added to this package. 6 | package postgresql 7 | -------------------------------------------------------------------------------- /pkg/apis/postgresql/v1alpha1/database_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | appsv1 "k8s.io/api/apps/v1" 5 | v1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 10 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 11 | 12 | // DatabaseSpec defines the desired state of Database 13 | // +k8s:openapi-gen=true 14 | type DatabaseSpec struct { 15 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 16 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 17 | // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html 18 | 19 | // Value for the Database Environment Variable (spec.databaseNameKeyEnvVar). 20 | // Default value: example 21 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 22 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Database Name" 23 | DatabaseName string `json:"databaseName,omitempty"` 24 | 25 | // Value for the Database Environment Variable (spec.databasePasswordKeyEnvVar). 26 | // Default value: postgres 27 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 28 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Database Password" 29 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:password" 30 | DatabasePassword string `json:"databasePassword,omitempty"` 31 | 32 | // Value for the Database Environment Variable (spec.databaseUserKeyEnvVar). 33 | // Default value: postgres 34 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 35 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Database User" 36 | DatabaseUser string `json:"databaseUser,omitempty"` 37 | 38 | // Key Value for the Database Environment Variable in order to inform the database mame 39 | // Note that each database version/image can expected a different value for it. 40 | // Default value: nil 41 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 42 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="EnvVar Key (Database Name)" 43 | DatabaseNameKeyEnvVar string `json:"databaseNameKeyEnvVar,omitempty"` 44 | 45 | // Key Value for the Database Environment Variable in order to inform the database password 46 | // Note that each database version/image can expected a different value for it. 47 | // Default value: nil 48 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 49 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="EnvVar Key (Database Password)" 50 | DatabasePasswordKeyEnvVar string `json:"databasePasswordKeyEnvVar,omitempty"` 51 | 52 | // Key Value for the Database Environment Variable in order to inform the database user 53 | // Note that each database version/image can expected a different value for it. 54 | // Default value: nil 55 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 56 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="EnvVar Key (Database User)" 57 | DatabaseUserKeyEnvVar string `json:"databaseUserKeyEnvVar,omitempty"` 58 | 59 | // Value for the Database Environment Variable in order to define the port which it should use. It will be used in its container as well 60 | DatabasePort int32 `json:"databasePort,omitempty"` 61 | 62 | // Quantity of instances 63 | // Default value: 1 64 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 65 | Size int32 `json:"size,omitempty"` 66 | 67 | // Database image:tag 68 | // Default value: centos/postgresql-96-centos7 69 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 70 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Image:tag" 71 | Image string `json:"image,omitempty"` 72 | 73 | // Name to create the Database container 74 | ContainerName string `json:"containerName,omitempty"` 75 | 76 | // Limit of Memory which will be available for the database container 77 | // Default value: 512Mi 78 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 79 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Database Memory Limit" 80 | DatabaseMemoryLimit string `json:"databaseMemoryLimit,omitempty"` 81 | 82 | // Limit of Memory Request which will be available for the database container 83 | // Default value: 128Mi 84 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 85 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Database Memory Request" 86 | DatabaseMemoryRequest string `json:"databaseMemoryRequest,omitempty"` 87 | 88 | // Limit of Storage Request which will be available for the database container 89 | // Default value: 1Gi 90 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 91 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Database Storage Request" 92 | DatabaseStorageRequest string `json:"databaseStorageRequest,omitempty"` 93 | 94 | // CPU resource request which will be available for the database container 95 | // Default value: 10Mi 96 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 97 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Database CPU" 98 | DatabaseCpu string `json:"databaseCpu,omitempty"` 99 | 100 | // Limit of CPU request which will be available for the database container 101 | // Default value: 20Mi 102 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 103 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Database CPU Limit" 104 | DatabaseCpuLimit string `json:"databaseCpuLimit,omitempty"` 105 | 106 | // Policy definition to pull the Database Image 107 | // More info: https://kubernetes.io/docs/concepts/containers/images/ 108 | // Default value: 109 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 110 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Container Image Pull Policy" 111 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:imagePullPolicy" 112 | ContainerImagePullPolicy v1.PullPolicy `json:"containerImagePullPolicy,omitempty"` 113 | 114 | // Name of the ConfigMap where the operator should looking for the EnvVars keys and/or values only 115 | // Default value: nil 116 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 117 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="ConfigMap name" 118 | ConfigMapName string `json:"configMapName,omitempty"` 119 | 120 | // Name the Storage Class name of the PVC which will be created for the Database 121 | // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims 122 | // Default value: standard 123 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 124 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="ConfigMap name" 125 | DatabaseStorageClassName string `json:"databaseStorageClassName,omitempty"` 126 | 127 | // Name of the configMap key where the operator should looking for the value for the database name for its env var 128 | // Default value: nil 129 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 130 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="ConfigMap Database Key" 131 | ConfigMapDatabaseNameKey string `json:"configMapDatabaseNameKey,omitempty"` 132 | 133 | // Name of the configMap key where the operator should looking for the value for the database user for its env var 134 | // Default value: nil 135 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 136 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="ConfigMap Password Key" 137 | ConfigMapDatabasePasswordKey string `json:"configMapDatabasePasswordKey,omitempty"` 138 | 139 | // Name of the configMap key where the operator should looking for the value for the database password for its env var 140 | // Default value: nil 141 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true 142 | // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="ConfigMap User Key" 143 | ConfigMapDatabaseUserKey string `json:"configMapDatabaseUserKey,omitempty"` 144 | } 145 | 146 | // DatabaseStatus defines the observed state of Database 147 | // +k8s:openapi-gen=true 148 | type DatabaseStatus struct { 149 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 150 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 151 | // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html 152 | 153 | // Name of the PersistentVolumeClaim created and managed by it 154 | // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true 155 | // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="v1.PersistentVolumeClaimStatus" 156 | PVCStatus v1.PersistentVolumeClaimStatus `json:"pvcStatus"` 157 | 158 | // Status of the Database Deployment created and managed by it 159 | // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true 160 | // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="appsv1.DeploymentStatus" 161 | DeploymentStatus appsv1.DeploymentStatus `json:"deploymentStatus"` 162 | 163 | // Status of the Database Service created and managed by it 164 | // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true 165 | // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="v1.ServiceStatus" 166 | ServiceStatus v1.ServiceStatus `json:"serviceStatus"` 167 | 168 | // It will be as "OK when all objects are created successfully 169 | // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true 170 | // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Database Status" 171 | DatabaseStatus string `json:"databaseStatus"` 172 | } 173 | 174 | // Database is the Schema for the the Database Database API 175 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 176 | 177 | // +k8s:openapi-gen=true 178 | // +kubebuilder:subresource:status 179 | // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Database Database" 180 | // +operator-sdk:gen-csv:customresourcedefinitions.resources="Deployment,v1,\"A Kubernetes Deployment\"" 181 | // +operator-sdk:gen-csv:customresourcedefinitions.resources="Service,v1,\"A Kubernetes Service\"" 182 | // +operator-sdk:gen-csv:customresourcedefinitions.resources="PersistentVolumeClaim,v1,\"A Kubernetes PersistentVolumeClaim\"" 183 | type Database struct { 184 | metav1.TypeMeta `json:",inline"` 185 | metav1.ObjectMeta `json:"metadata,omitempty"` 186 | 187 | Spec DatabaseSpec `json:"spec,omitempty"` 188 | Status DatabaseStatus `json:"status,omitempty"` 189 | } 190 | 191 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 192 | 193 | // DatabaseList contains a list of Database 194 | type DatabaseList struct { 195 | metav1.TypeMeta `json:",inline"` 196 | metav1.ListMeta `json:"metadata,omitempty"` 197 | Items []Database `json:"items"` 198 | } 199 | 200 | func init() { 201 | SchemeBuilder.Register(&Database{}, &DatabaseList{}) 202 | } 203 | -------------------------------------------------------------------------------- /pkg/apis/postgresql/v1alpha1/doc.go: -------------------------------------------------------------------------------- 1 | // Package v1alpha1 contains API Schema definitions for the postgresql v1alpha1 API group 2 | // +k8s:deepcopy-gen=package,register 3 | // +groupName=postgresql.dev4devs.com 4 | package v1alpha1 5 | -------------------------------------------------------------------------------- /pkg/apis/postgresql/v1alpha1/register.go: -------------------------------------------------------------------------------- 1 | // NOTE: Boilerplate only. Ignore this file. 2 | 3 | // Package v1alpha1 contains API Schema definitions for the postgresql v1alpha1 API group 4 | // +k8s:deepcopy-gen=package,register 5 | // +groupName=postgresql.dev4devs.com 6 | package v1alpha1 7 | 8 | import ( 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "sigs.k8s.io/controller-runtime/pkg/scheme" 11 | ) 12 | 13 | var ( 14 | // SchemeGroupVersion is group version used to register these objects 15 | SchemeGroupVersion = schema.GroupVersion{Group: "postgresql.dev4devs.com", Version: "v1alpha1"} 16 | 17 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 18 | SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} 19 | ) 20 | -------------------------------------------------------------------------------- /pkg/apis/postgresql/v1alpha1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | // +build !ignore_autogenerated 2 | 3 | // Code generated by operator-sdk. DO NOT EDIT. 4 | 5 | package v1alpha1 6 | 7 | import ( 8 | runtime "k8s.io/apimachinery/pkg/runtime" 9 | ) 10 | 11 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 12 | func (in *Backup) DeepCopyInto(out *Backup) { 13 | *out = *in 14 | out.TypeMeta = in.TypeMeta 15 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 16 | out.Spec = in.Spec 17 | in.Status.DeepCopyInto(&out.Status) 18 | return 19 | } 20 | 21 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup. 22 | func (in *Backup) DeepCopy() *Backup { 23 | if in == nil { 24 | return nil 25 | } 26 | out := new(Backup) 27 | in.DeepCopyInto(out) 28 | return out 29 | } 30 | 31 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 32 | func (in *Backup) DeepCopyObject() runtime.Object { 33 | if c := in.DeepCopy(); c != nil { 34 | return c 35 | } 36 | return nil 37 | } 38 | 39 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 40 | func (in *BackupList) DeepCopyInto(out *BackupList) { 41 | *out = *in 42 | out.TypeMeta = in.TypeMeta 43 | in.ListMeta.DeepCopyInto(&out.ListMeta) 44 | if in.Items != nil { 45 | in, out := &in.Items, &out.Items 46 | *out = make([]Backup, len(*in)) 47 | for i := range *in { 48 | (*in)[i].DeepCopyInto(&(*out)[i]) 49 | } 50 | } 51 | return 52 | } 53 | 54 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList. 55 | func (in *BackupList) DeepCopy() *BackupList { 56 | if in == nil { 57 | return nil 58 | } 59 | out := new(BackupList) 60 | in.DeepCopyInto(out) 61 | return out 62 | } 63 | 64 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 65 | func (in *BackupList) DeepCopyObject() runtime.Object { 66 | if c := in.DeepCopy(); c != nil { 67 | return c 68 | } 69 | return nil 70 | } 71 | 72 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 73 | func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { 74 | *out = *in 75 | return 76 | } 77 | 78 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec. 79 | func (in *BackupSpec) DeepCopy() *BackupSpec { 80 | if in == nil { 81 | return nil 82 | } 83 | out := new(BackupSpec) 84 | in.DeepCopyInto(out) 85 | return out 86 | } 87 | 88 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 89 | func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { 90 | *out = *in 91 | in.CronJobStatus.DeepCopyInto(&out.CronJobStatus) 92 | return 93 | } 94 | 95 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. 96 | func (in *BackupStatus) DeepCopy() *BackupStatus { 97 | if in == nil { 98 | return nil 99 | } 100 | out := new(BackupStatus) 101 | in.DeepCopyInto(out) 102 | return out 103 | } 104 | 105 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 106 | func (in *Database) DeepCopyInto(out *Database) { 107 | *out = *in 108 | out.TypeMeta = in.TypeMeta 109 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 110 | out.Spec = in.Spec 111 | in.Status.DeepCopyInto(&out.Status) 112 | return 113 | } 114 | 115 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database. 116 | func (in *Database) DeepCopy() *Database { 117 | if in == nil { 118 | return nil 119 | } 120 | out := new(Database) 121 | in.DeepCopyInto(out) 122 | return out 123 | } 124 | 125 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 126 | func (in *Database) DeepCopyObject() runtime.Object { 127 | if c := in.DeepCopy(); c != nil { 128 | return c 129 | } 130 | return nil 131 | } 132 | 133 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 134 | func (in *DatabaseList) DeepCopyInto(out *DatabaseList) { 135 | *out = *in 136 | out.TypeMeta = in.TypeMeta 137 | in.ListMeta.DeepCopyInto(&out.ListMeta) 138 | if in.Items != nil { 139 | in, out := &in.Items, &out.Items 140 | *out = make([]Database, len(*in)) 141 | for i := range *in { 142 | (*in)[i].DeepCopyInto(&(*out)[i]) 143 | } 144 | } 145 | return 146 | } 147 | 148 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseList. 149 | func (in *DatabaseList) DeepCopy() *DatabaseList { 150 | if in == nil { 151 | return nil 152 | } 153 | out := new(DatabaseList) 154 | in.DeepCopyInto(out) 155 | return out 156 | } 157 | 158 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 159 | func (in *DatabaseList) DeepCopyObject() runtime.Object { 160 | if c := in.DeepCopy(); c != nil { 161 | return c 162 | } 163 | return nil 164 | } 165 | 166 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 167 | func (in *DatabaseSpec) DeepCopyInto(out *DatabaseSpec) { 168 | *out = *in 169 | return 170 | } 171 | 172 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseSpec. 173 | func (in *DatabaseSpec) DeepCopy() *DatabaseSpec { 174 | if in == nil { 175 | return nil 176 | } 177 | out := new(DatabaseSpec) 178 | in.DeepCopyInto(out) 179 | return out 180 | } 181 | 182 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 183 | func (in *DatabaseStatus) DeepCopyInto(out *DatabaseStatus) { 184 | *out = *in 185 | in.PVCStatus.DeepCopyInto(&out.PVCStatus) 186 | in.DeploymentStatus.DeepCopyInto(&out.DeploymentStatus) 187 | in.ServiceStatus.DeepCopyInto(&out.ServiceStatus) 188 | return 189 | } 190 | 191 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseStatus. 192 | func (in *DatabaseStatus) DeepCopy() *DatabaseStatus { 193 | if in == nil { 194 | return nil 195 | } 196 | out := new(DatabaseStatus) 197 | in.DeepCopyInto(out) 198 | return out 199 | } 200 | -------------------------------------------------------------------------------- /pkg/config/backup.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | const ( 4 | schedule = "0 0 * * *" 5 | bakupImage = "quay.io/integreatly/backup-container:1.0.8" 6 | databaseVersion = "9.6" 7 | databaseCRName = "database" 8 | ) 9 | 10 | type DefaultBackupConfig struct { 11 | Schedule string `json:"schedule"` 12 | Image string `json:"image"` 13 | DatabaseVersion string `json:"databaseVersion"` 14 | DatabaseCRName string `json:"databaseCRName"` 15 | } 16 | 17 | func NewDefaultBackupConfig() *DefaultBackupConfig { 18 | return &DefaultBackupConfig{ 19 | Schedule: schedule, 20 | Image: bakupImage, 21 | DatabaseVersion: databaseVersion, 22 | DatabaseCRName: databaseCRName, 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /pkg/config/database.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | const ( 4 | size = 1 5 | databaseName = "solution" 6 | databasePassword = "postgres" 7 | databaseUser = "postgres" 8 | databaseNameKeyEnvVar = "POSTGRESQL_DATABASE" 9 | databasePasswordKeyEnvVar = "POSTGRESQL_PASSWORD" 10 | databaseUserKeyEnvVar = "POSTGRESQL_USER" 11 | image = "centos/postgresql-96-centos7" 12 | containerName = "database" 13 | databasePort = 5432 14 | databaseMemoryLimit = "512Mi" 15 | databaseMemoryRequest = "128Mi" 16 | databaseStorageRequest = "1Gi" 17 | databaseStorageClassName = "standard" 18 | databaseCpuLimit = "60m" 19 | databaseCpu = "30m" 20 | ) 21 | 22 | type DefaultDatabaseConfig struct { 23 | Size int32 `json:"size"` 24 | Image string `json:"image"` 25 | DatabaseName string `json:"databaseName"` 26 | DatabasePassword string `json:"databasePassword"` 27 | DatabaseUser string `json:"databaseUser"` 28 | DatabaseNameKeyEnvVar string `json:"databaseNameKeyEnvVar"` 29 | DatabasePasswordKeyEnvVar string `json:"databasePasswordKeyEnvVar"` 30 | DatabaseUserKeyEnvVar string `json:"databaseUserKeyEnvVar"` 31 | ContainerName string `json:"containerName"` 32 | DatabasePort int32 `json:"databasePort"` 33 | DatabaseMemoryLimit string `json:"databaseMemoryLimit"` 34 | DatabaseMemoryRequest string `json:"databaseMemoryRequest"` 35 | DatabaseCpuLimit string `json:"databaseCpuLimit"` 36 | DatabaseCpu string `json:"databaseCpu"` 37 | DatabaseStorageRequest string `json:"databaseStorageRequest"` 38 | DatabaseStorageClassName string `json:"databaseStorageClassName"` 39 | } 40 | 41 | func NewDatabaseConfig() *DefaultDatabaseConfig { 42 | return &DefaultDatabaseConfig{ 43 | Size: size, 44 | Image: image, 45 | DatabaseName: databaseName, 46 | DatabasePassword: databasePassword, 47 | DatabaseUser: databaseUser, 48 | DatabaseNameKeyEnvVar: databaseNameKeyEnvVar, 49 | DatabasePasswordKeyEnvVar: databasePasswordKeyEnvVar, 50 | DatabaseUserKeyEnvVar: databaseUserKeyEnvVar, 51 | ContainerName: containerName, 52 | DatabasePort: databasePort, 53 | DatabaseMemoryLimit: databaseMemoryLimit, 54 | DatabaseMemoryRequest: databaseMemoryRequest, 55 | DatabaseCpu: databaseCpu, 56 | DatabaseCpuLimit: databaseCpuLimit, 57 | DatabaseStorageRequest: databaseStorageRequest, 58 | DatabaseStorageClassName: databaseStorageClassName, 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /pkg/controller/add_backup.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/controller/backup" 5 | ) 6 | 7 | func init() { 8 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 9 | AddToManagerFuncs = append(AddToManagerFuncs, backup.Add) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/controller/add_database.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/controller/database" 5 | ) 6 | 7 | func init() { 8 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 9 | AddToManagerFuncs = append(AddToManagerFuncs, database.Add) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/controller/backup/controller.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/service" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 7 | "k8s.io/api/batch/v1beta1" 8 | "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "k8s.io/client-go/rest" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | "sigs.k8s.io/controller-runtime/pkg/controller" 14 | "sigs.k8s.io/controller-runtime/pkg/handler" 15 | "sigs.k8s.io/controller-runtime/pkg/manager" 16 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 17 | "sigs.k8s.io/controller-runtime/pkg/source" 18 | ) 19 | 20 | /** 21 | * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller 22 | * business logic. Delete these comments after modifying this file.* 23 | */ 24 | 25 | // Add creates a new Backup Controller and adds it to the Manager. The Manager will set fields on the Controller 26 | // and Start it when the Manager is Started. 27 | func Add(mgr manager.Manager) error { 28 | return add(mgr, newReconciler(mgr)) 29 | } 30 | 31 | // newReconciler returns a new reconcile.Reconciler 32 | func newReconciler(mgr manager.Manager) reconcile.Reconciler { 33 | return &ReconcileBackup{client: mgr.GetClient(), scheme: mgr.GetScheme(), config: mgr.GetConfig()} 34 | } 35 | 36 | // add adds a new Controller to mgr with r as the reconcile.Reconciler 37 | func add(mgr manager.Manager, r reconcile.Reconciler) error { 38 | // Create a new controller 39 | c, err := controller.New(utils.BackupControllerName, mgr, controller.Options{Reconciler: r}) 40 | if err != nil { 41 | return err 42 | } 43 | 44 | // Watch for changes to primary resource Backup 45 | err = c.Watch(&source.Kind{Type: &v1alpha1.Backup{}}, &handler.EnqueueRequestForObject{}) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | // Watch CronJob resource controlled and created by it 51 | if err := service.Watch(c, &v1beta1.CronJob{}, true, &v1alpha1.Backup{}); err != nil { 52 | return err 53 | } 54 | 55 | // Watch Secret resource controlled and created by it 56 | if err := service.Watch(c, &v1.Secret{}, true, &v1alpha1.Backup{}); err != nil { 57 | return err 58 | } 59 | 60 | // Watch Service resource managed by the Database 61 | if err := service.Watch(c, &v1.Service{}, false, &v1alpha1.Database{}); err != nil { 62 | return err 63 | } 64 | 65 | return nil 66 | } 67 | 68 | // blank assignment to verify that ReconcileBackup implements reconcile.Reconciler 69 | var _ reconcile.Reconciler = &ReconcileBackup{} 70 | 71 | // ReconcileBackup reconciles a Backup object 72 | type ReconcileBackup struct { 73 | // This client, initialized using mgr.Client() above, is a split client 74 | // that reads objects from the cache and writes to the apiserver 75 | client client.Client 76 | config *rest.Config 77 | scheme *runtime.Scheme 78 | dbPod *v1.Pod 79 | dbService *v1.Service 80 | } 81 | 82 | // Reconcile reads that state of the cluster for a Backup object and makes changes based on the state read 83 | // and what is in the Backup.Spec 84 | // Note: 85 | // The Controller will requeue the Request to be processed again if the returned error is non-nil or 86 | // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. 87 | func (r *ReconcileBackup) Reconcile(request reconcile.Request) (reconcile.Result, error) { 88 | reqLogger := utils.GetLoggerByRequestAndController(request, utils.BackupControllerName) 89 | reqLogger.Info("Reconciling Backup ...") 90 | 91 | bkp, err := service.FetchBackupCR(request.Name, request.Namespace, r.client) 92 | if err != nil { 93 | if errors.IsNotFound(err) { 94 | // Request object not found, could have been deleted after reconcile request. 95 | // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. 96 | // Return and don't requeue 97 | reqLogger.Info("Backup resource not found. Ignoring since object must be deleted.") 98 | return reconcile.Result{}, nil 99 | } 100 | // Error reading the object - requeue the request. 101 | reqLogger.Error(err, "Failed to get Backup.") 102 | return reconcile.Result{}, err 103 | } 104 | 105 | // Add const values for mandatory specs 106 | reqLogger.Info("Adding backup mandatory specs") 107 | utils.AddBackupMandatorySpecs(bkp) 108 | 109 | // Create mandatory objects for the Backup 110 | if err := r.createResources(bkp, request); err != nil { 111 | reqLogger.Error(err, "Failed to create and update the secondary resource required for the Backup CR") 112 | return reconcile.Result{}, err 113 | } 114 | 115 | // Update the CR status for the primary resource 116 | if err := r.createUpdateCRStatus(request); err != nil { 117 | reqLogger.Error(err, "Failed to create and update the status in the Backup CR") 118 | return reconcile.Result{}, err 119 | } 120 | 121 | reqLogger.Info("Stop Reconciling Backup ...") 122 | return reconcile.Result{}, nil 123 | } 124 | 125 | //createResources will create and update the secondary resource which are required in order to make works successfully the primary resource(CR) 126 | func (r *ReconcileBackup) createResources(bkp *v1alpha1.Backup, request reconcile.Request) error { 127 | reqLogger := utils.GetLoggerByRequestAndController(request, utils.BackupControllerName) 128 | reqLogger.Info("Creating secondary Backup resources ...") 129 | 130 | // Check if the database instance was created 131 | db, err := service.FetchDatabaseCR(bkp.Spec.DatabaseCRName, request.Namespace, r.client) 132 | if err != nil { 133 | reqLogger.Error(err, "Failed to fetch Database instance/cr") 134 | return err 135 | } 136 | 137 | // Get the Database Pod created by the Database Controller 138 | // NOTE: This data is required in order to create the secrets which will access the database container to do the backup 139 | if err := r.getDatabasePod(bkp, db); err != nil { 140 | reqLogger.Error(err, "Failed to get a Database pod") 141 | return err 142 | } 143 | 144 | // Get the Database Service created by the Database Controller 145 | // NOTE: This data is required in order to create the secrets which will access the database container to do the backup 146 | if err := r.getDatabaseService(bkp, db); err != nil { 147 | reqLogger.Error(err, "Failed to get a Database service") 148 | return err 149 | } 150 | 151 | // Checks if the secret with the database is created, if not create one 152 | if err := r.createDatabaseSecret(bkp, db); err != nil { 153 | reqLogger.Error(err, "Failed to create the Database secret") 154 | return err 155 | } 156 | 157 | // Check if the secret with the aws data is created, if not create one 158 | // NOTE: The user can config in the CR to use a pre-existing one by informing the name 159 | if err := r.createAwsSecret(bkp); err != nil { 160 | reqLogger.Error(err, "Failed to create the Aws secret") 161 | return err 162 | } 163 | 164 | // Check if the encryptionKey is created, if not create one 165 | // NOTE: The user can config in the CR to use a pre-existing one by informing the name 166 | if err := r.createEncryptionKey(bkp); err != nil { 167 | reqLogger.Error(err, "Failed to create a Enc Secret") 168 | return err 169 | } 170 | 171 | // Check if the cronJob is created, if not create one 172 | if err := r.createCronJob(bkp); err != nil { 173 | reqLogger.Error(err, "Failed to create the CronJob") 174 | return err 175 | } 176 | return nil 177 | } 178 | 179 | //createUpdateCRStatus will create and update the status in the CR applied in the cluster 180 | func (r *ReconcileBackup) createUpdateCRStatus(request reconcile.Request) error { 181 | reqLogger := utils.GetLoggerByRequestAndController(request, utils.BackupControllerName) 182 | reqLogger.Info("Create/Update Backup status ...") 183 | 184 | if err := r.updatePodDatabaseFoundStatus(request); err != nil { 185 | reqLogger.Error(err, "Failed to create/update isDatabasePodFound status") 186 | return err 187 | } 188 | 189 | if err := r.updateDbServiceFoundStatus(request); err != nil { 190 | reqLogger.Error(err, "Failed to create/update isDatabaseServiceFound status") 191 | return err 192 | } 193 | 194 | if err := r.updateCronJobStatus(request); err != nil { 195 | reqLogger.Error(err, "Failed to create/update cronJob status") 196 | return err 197 | } 198 | 199 | if err := r.updateDBSecretStatus(request); err != nil { 200 | reqLogger.Error(err, "Failed to create/update dbSecret status") 201 | return err 202 | } 203 | 204 | if err := r.updateAWSSecretStatus(request); err != nil { 205 | reqLogger.Error(err, "Failed to create/update awsSecret status") 206 | return err 207 | } 208 | 209 | if err := r.updateEncSecretStatus(request); err != nil { 210 | reqLogger.Error(err, "Failed to create/update encSecret status") 211 | return err 212 | } 213 | 214 | if err := r.updateBackupStatus(request); err != nil { 215 | reqLogger.Error(err, "Failed to create/update backup status") 216 | return err 217 | } 218 | return nil 219 | } 220 | -------------------------------------------------------------------------------- /pkg/controller/backup/controller_test.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "context" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 7 | "k8s.io/api/batch/v1beta1" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | "k8s.io/apimachinery/pkg/types" 11 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 12 | "testing" 13 | ) 14 | 15 | func TestReconcileBackup(t *testing.T) { 16 | type fields struct { 17 | objs []runtime.Object 18 | } 19 | type args struct { 20 | bkpInstance v1alpha1.Backup 21 | } 22 | tests := []struct { 23 | name string 24 | fields fields 25 | args args 26 | wantRequeue bool 27 | wantAwsSecret bool 28 | wantDBSecret bool 29 | wantEncSecret bool 30 | wantCronJob bool 31 | wantErr bool 32 | }{ 33 | { 34 | name: "Should work with default values as key of env vars variables", 35 | fields: fields{ 36 | objs: []runtime.Object{&bkpInstanceWithMandatorySpec, &dbInstanceWithConfigMap, &podDatabaseConfigMap, &serviceDatabase, &configMapDefault}, 37 | }, 38 | args: args{ 39 | bkpInstance: bkpInstanceWithMandatorySpec, 40 | }, 41 | wantErr: false, 42 | wantRequeue: false, 43 | wantAwsSecret: true, 44 | wantDBSecret: true, 45 | wantEncSecret: false, 46 | wantCronJob: true, 47 | }, 48 | { 49 | name: "Should fail with wrong database name key mapped when it will build the db data secret", 50 | fields: fields{ 51 | objs: []runtime.Object{ 52 | &bkpInstanceWithMandatorySpec, 53 | &dbInstanceWithConfigMap, 54 | &podDatabaseConfigMap, 55 | &serviceDatabase, 56 | &configMapInvalidDatabaseKey, 57 | }, 58 | }, 59 | args: args{ 60 | bkpInstance: bkpInstanceWithMandatorySpec, 61 | }, 62 | wantErr: true, 63 | wantRequeue: false, 64 | wantAwsSecret: false, 65 | wantDBSecret: false, 66 | wantEncSecret: false, 67 | wantCronJob: false, 68 | }, 69 | { 70 | name: "Should fail with wrong database user key mapped when it will build the db data secret", 71 | fields: fields{ 72 | objs: []runtime.Object{ 73 | &bkpInstanceWithMandatorySpec, 74 | &dbInstanceWithConfigMap, 75 | &podDatabaseConfigMap, 76 | &serviceDatabase, 77 | &configMapInvalidUserKey, 78 | }, 79 | }, 80 | args: args{ 81 | bkpInstance: bkpInstanceWithMandatorySpec, 82 | }, 83 | wantErr: true, 84 | wantRequeue: false, 85 | wantAwsSecret: false, 86 | wantDBSecret: false, 87 | wantEncSecret: false, 88 | wantCronJob: false, 89 | }, 90 | { 91 | name: "Should fail with wrong database pwd key mapped when it will build the db data secret", 92 | fields: fields{ 93 | objs: []runtime.Object{ 94 | &bkpInstanceWithMandatorySpec, 95 | &dbInstanceWithConfigMap, 96 | &podDatabaseConfigMap, 97 | &serviceDatabase, 98 | &configMapInvalidPwdKey, 99 | }, 100 | }, 101 | args: args{ 102 | bkpInstance: bkpInstanceWithMandatorySpec, 103 | }, 104 | wantErr: true, 105 | wantRequeue: false, 106 | wantAwsSecret: false, 107 | wantDBSecret: false, 108 | wantEncSecret: false, 109 | wantCronJob: false, 110 | }, 111 | { 112 | name: "Should work with customized keys for the db env vars", 113 | fields: fields{ 114 | objs: []runtime.Object{ 115 | &bkpInstanceWithMandatorySpec, 116 | &dbInstanceWithConfigMapAndCustomizeKeys, 117 | &podDatabaseConfigMap, 118 | &serviceDatabase, 119 | &configMapInvalidDatabaseKey, 120 | }, 121 | }, 122 | args: args{ 123 | bkpInstance: bkpInstanceWithMandatorySpec, 124 | }, 125 | wantErr: false, 126 | wantRequeue: false, 127 | wantAwsSecret: true, 128 | wantDBSecret: true, 129 | wantEncSecret: false, 130 | wantCronJob: true, 131 | }, 132 | { 133 | name: "Should work with encryption secret data and create this secret", 134 | fields: fields{ 135 | objs: []runtime.Object{ 136 | &bkpInstanceWithEncSecretData, 137 | &dbInstanceWithoutSpec, 138 | &podDatabase, 139 | &serviceDatabase, 140 | }, 141 | }, 142 | args: args{ 143 | bkpInstance: bkpInstanceWithEncSecretData, 144 | }, 145 | 146 | wantErr: false, 147 | wantRequeue: false, 148 | wantAwsSecret: true, 149 | wantDBSecret: true, 150 | wantEncSecret: true, 151 | wantCronJob: true, 152 | }, 153 | { 154 | name: "Should work with secret names and found the secrets", 155 | fields: fields{ 156 | objs: []runtime.Object{ 157 | &bkpInstanceWithSecretNames, 158 | &dbInstanceWithoutSpec, 159 | &podDatabase, 160 | &serviceDatabase, 161 | &awsSecretWithSecretNames, 162 | &encSecretWithSecretNames, 163 | }, 164 | }, 165 | args: args{ 166 | bkpInstance: bkpInstanceWithSecretNames, 167 | }, 168 | 169 | wantErr: false, 170 | wantRequeue: false, 171 | wantAwsSecret: true, 172 | wantDBSecret: true, 173 | wantEncSecret: true, 174 | wantCronJob: true, 175 | }, 176 | { 177 | name: "Should fail when the aws secret informed by the user do not exist", 178 | fields: fields{ 179 | objs: []runtime.Object{ 180 | &bkpInstanceWithSecretNames, 181 | &dbInstanceWithoutSpec, 182 | &podDatabase, 183 | &serviceDatabase, 184 | &encSecretWithSecretNames, 185 | }, 186 | }, 187 | args: args{ 188 | bkpInstance: bkpInstanceWithSecretNames, 189 | }, 190 | 191 | wantErr: true, 192 | wantRequeue: false, 193 | wantAwsSecret: false, 194 | wantDBSecret: true, 195 | wantEncSecret: true, 196 | wantCronJob: true, 197 | }, 198 | { 199 | name: "Should fail when the enc secret informed by the user do not exist", 200 | fields: fields{ 201 | objs: []runtime.Object{ 202 | &bkpInstanceWithSecretNames, 203 | &dbInstanceWithoutSpec, 204 | &podDatabase, 205 | &serviceDatabase, 206 | &awsSecretWithSecretNames, 207 | }, 208 | }, 209 | args: args{ 210 | bkpInstance: bkpInstanceWithSecretNames, 211 | }, 212 | 213 | wantErr: true, 214 | wantRequeue: false, 215 | wantAwsSecret: true, 216 | wantDBSecret: true, 217 | wantEncSecret: false, 218 | wantCronJob: false, 219 | }, 220 | { 221 | name: "Should fail when it is missing the pod database", 222 | fields: fields{ 223 | objs: []runtime.Object{ 224 | &bkpInstanceWithMandatorySpec, 225 | &dbInstanceWithoutSpec, 226 | &serviceDatabase, 227 | }, 228 | }, 229 | args: args{ 230 | bkpInstance: bkpInstanceWithMandatorySpec, 231 | }, 232 | 233 | wantErr: true, 234 | wantRequeue: false, 235 | wantAwsSecret: false, 236 | wantDBSecret: false, 237 | wantEncSecret: false, 238 | wantCronJob: false, 239 | }, 240 | { 241 | name: "Should fail when it is missing the service from database", 242 | fields: fields{ 243 | objs: []runtime.Object{ 244 | &bkpInstanceWithMandatorySpec, 245 | &dbInstanceWithoutSpec, 246 | &podDatabase, 247 | }, 248 | }, 249 | args: args{ 250 | bkpInstance: bkpInstanceWithMandatorySpec, 251 | }, 252 | 253 | wantErr: true, 254 | wantRequeue: false, 255 | wantAwsSecret: false, 256 | wantDBSecret: false, 257 | wantEncSecret: false, 258 | wantCronJob: false, 259 | }, 260 | { 261 | name: "Should fail when it is missing the service from database", 262 | fields: fields{ 263 | objs: []runtime.Object{ 264 | &bkpInstanceWithMandatorySpec, 265 | &dbInstanceWithoutSpec, 266 | &podDatabase, 267 | }, 268 | }, 269 | args: args{ 270 | bkpInstance: bkpInstanceWithMandatorySpec, 271 | }, 272 | 273 | wantErr: true, 274 | wantRequeue: false, 275 | wantAwsSecret: false, 276 | wantDBSecret: false, 277 | wantEncSecret: false, 278 | wantCronJob: false, 279 | }, { 280 | name: "Should fail since has database CR was not applied", 281 | fields: fields{ 282 | objs: []runtime.Object{ 283 | &bkpInstanceWithMandatorySpec, 284 | }, 285 | }, 286 | args: args{ 287 | bkpInstance: bkpInstanceWithMandatorySpec, 288 | }, 289 | 290 | wantErr: true, 291 | wantRequeue: false, 292 | wantAwsSecret: false, 293 | wantDBSecret: false, 294 | wantEncSecret: false, 295 | wantCronJob: false, 296 | }, 297 | { 298 | name: "Should fail because is missing the Database CR", 299 | fields: fields{ 300 | objs: []runtime.Object{&bkpInstanceWithMandatorySpec, &podDatabaseConfigMap, &serviceDatabase, &configMapDefault}, 301 | }, 302 | args: args{ 303 | bkpInstance: bkpInstanceWithMandatorySpec, 304 | }, 305 | wantErr: true, 306 | wantRequeue: false, 307 | wantAwsSecret: false, 308 | wantDBSecret: false, 309 | wantEncSecret: false, 310 | wantCronJob: false, 311 | }, 312 | { 313 | name: "When Backup CR was not applied", 314 | fields: fields{ 315 | objs: []runtime.Object{&dbInstanceWithoutSpec, &podDatabaseConfigMap, &serviceDatabase, &configMapDefault}, 316 | }, 317 | wantErr: false, 318 | wantRequeue: false, 319 | wantAwsSecret: false, 320 | wantDBSecret: false, 321 | wantEncSecret: false, 322 | wantCronJob: false, 323 | }, 324 | } 325 | for _, tt := range tests { 326 | t.Run(tt.name, func(t *testing.T) { 327 | 328 | r := buildReconcileWithFakeClientWithMocks(tt.fields.objs) 329 | 330 | // mock request to simulate Reconcile() being called on an event for a watched resource 331 | req := reconcile.Request{ 332 | NamespacedName: types.NamespacedName{ 333 | Name: tt.args.bkpInstance.Name, 334 | Namespace: tt.args.bkpInstance.Namespace, 335 | }, 336 | } 337 | 338 | res, err := r.Reconcile(req) 339 | if (err != nil) != tt.wantErr { 340 | t.Errorf("TestReconcileBackup reconcile: error = %v, wantErr %v", err, tt.wantErr) 341 | return 342 | } 343 | 344 | awsSecret := &corev1.Secret{} 345 | err = r.client.Get(context.TODO(), types.NamespacedName{Name: utils.GetAWSSecretName(&tt.args.bkpInstance), Namespace: utils.GetAwsSecretNamespace(&tt.args.bkpInstance)}, awsSecret) 346 | if (err == nil) != tt.wantAwsSecret { 347 | t.Errorf("TestReconcileBackup to get aws secret error = %v, wantErr %v", err, tt.wantAwsSecret) 348 | return 349 | } 350 | 351 | dbSecret := &corev1.Secret{} 352 | err = r.client.Get(context.TODO(), types.NamespacedName{Name: utils.DbSecretPrefix + tt.args.bkpInstance.Name, Namespace: tt.args.bkpInstance.Namespace}, dbSecret) 353 | if (err == nil) != tt.wantDBSecret { 354 | t.Errorf("TestReconcileBackup to get db secret error = %v, wantErr %v", err, tt.wantDBSecret) 355 | return 356 | } 357 | 358 | encSecret := &corev1.Secret{} 359 | err = r.client.Get(context.TODO(), types.NamespacedName{Name: utils.GetEncSecretName(&tt.args.bkpInstance), Namespace: utils.GetEncSecretNamespace(&tt.args.bkpInstance)}, encSecret) 360 | if (err == nil) != tt.wantEncSecret { 361 | t.Errorf("TestReconcileBackup to get enc secret error = %v, wantErr %v", err, tt.wantEncSecret) 362 | return 363 | } 364 | 365 | cronJob := &v1beta1.CronJob{} 366 | err = r.client.Get(context.TODO(), types.NamespacedName{Name: tt.args.bkpInstance.Name, Namespace: tt.args.bkpInstance.Namespace}, cronJob) 367 | if (err == nil) != tt.wantCronJob { 368 | t.Errorf("TestReconcileBackup to get cronjob error = %v, wantErr %v", err, tt.wantCronJob) 369 | return 370 | } 371 | 372 | if (res.Requeue) != tt.wantRequeue { 373 | t.Errorf("TestReconcileBackup expect request to requeue res.Requeue = %v, wantRequeue %v", res.Requeue, tt.wantRequeue) 374 | return 375 | } 376 | }) 377 | } 378 | } 379 | -------------------------------------------------------------------------------- /pkg/controller/backup/create_resources.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 7 | "github.com/dev4devs-com/postgresql-operator/pkg/resource" 8 | "github.com/dev4devs-com/postgresql-operator/pkg/service" 9 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 10 | ) 11 | 12 | // Set in the ReconcileBackup the Pod database created by Database 13 | // NOTE: This data is required in order to create the secrets which will access the database container to do the backup 14 | func (r *ReconcileBackup) getDatabasePod(bkp *v1alpha1.Backup, db *v1alpha1.Database) error { 15 | dbPod, err := service.FetchDatabasePod(bkp, db, r.client) 16 | if err != nil || dbPod == nil { 17 | r.dbPod = nil 18 | err := fmt.Errorf("Unable to find the Database Pod") 19 | return err 20 | } 21 | r.dbPod = dbPod 22 | return nil 23 | } 24 | 25 | // Set in the ReconcileBackup the service database created by Database 26 | // NOTE: This data is required in order to create the secrets which will access the database container to do the backup 27 | func (r *ReconcileBackup) getDatabaseService(bkp *v1alpha1.Backup, db *v1alpha1.Database) error { 28 | dbService, err := service.FetchDatabaseService(bkp, db, r.client) 29 | if err != nil || dbService == nil { 30 | r.dbService = nil 31 | err := fmt.Errorf("Unable to find the Database Service") 32 | return err 33 | } 34 | r.dbService = dbService 35 | return nil 36 | } 37 | 38 | // Check if the cronJob is created, if not create one 39 | func (r *ReconcileBackup) createCronJob(bkp *v1alpha1.Backup) error { 40 | if _, err := service.FetchCronJob(bkp.Name, bkp.Namespace, r.client); err != nil { 41 | if err := r.client.Create(context.TODO(), resource.NewBackupCronJob(bkp, r.scheme)); err != nil { 42 | return err 43 | } 44 | } 45 | return nil 46 | } 47 | 48 | // Check if the encryptionKey is created, if not create one 49 | // NOTE: The user can config in the CR to use a pre-existing one by informing the name 50 | func (r *ReconcileBackup) createEncryptionKey(bkp *v1alpha1.Backup) error { 51 | if utils.IsEncryptionKeyOptionConfig(bkp) { 52 | if _, err := service.FetchSecret(utils.GetEncSecretNamespace(bkp), utils.GetEncSecretName(bkp), r.client); err != nil { 53 | // The user can just inform the name of the Secret which is already applied in the cluster 54 | if utils.IsEncKeySetupByName(bkp) { 55 | return err 56 | } else { 57 | secretData, secretStringData := createEncDataMaps(bkp) 58 | encSecret := resource.NewBackupSecret(bkp, utils.EncSecretPrefix, secretData, secretStringData, r.scheme) 59 | if err := r.client.Create(context.TODO(), encSecret); err != nil { 60 | return err 61 | } 62 | } 63 | } 64 | } 65 | return nil 66 | } 67 | 68 | // createAwsSecret checks if the secret with the aws data is created, if not create one 69 | // NOTE: The user can config in the CR to use a pre-existing one by informing the name 70 | func (r *ReconcileBackup) createAwsSecret(bkp *v1alpha1.Backup) error { 71 | if _, err := service.FetchSecret(utils.GetAwsSecretNamespace(bkp), utils.GetAWSSecretName(bkp), r.client); err != nil { 72 | // The user can just inform the name of the Secret which is already applied in the cluster 73 | if !utils.IsAwsKeySetupByName(bkp) { 74 | secretData := createAwsDataByteMap(bkp) 75 | awsSecret := resource.NewBackupSecret(bkp, utils.AwsSecretPrefix, secretData, nil, r.scheme) 76 | if err := r.client.Create(context.TODO(), awsSecret); err != nil { 77 | return err 78 | } 79 | } 80 | } 81 | return nil 82 | } 83 | 84 | // createDatabaseSecret checks if the secret with the database is created, if not create one 85 | func (r *ReconcileBackup) createDatabaseSecret(bkp *v1alpha1.Backup, db *v1alpha1.Database) error { 86 | dbSecretName := utils.DbSecretPrefix + bkp.Name 87 | if _, err := service.FetchSecret(bkp.Namespace, dbSecretName, r.client); err != nil { 88 | secretData, err := r.buildDBSecretData(bkp, db) 89 | if err != nil { 90 | return err 91 | } 92 | dbSecret := resource.NewBackupSecret(bkp, utils.DbSecretPrefix, secretData, nil, r.scheme) 93 | if err := r.client.Create(context.TODO(), dbSecret); err != nil { 94 | return err 95 | } 96 | } 97 | return nil 98 | } 99 | -------------------------------------------------------------------------------- /pkg/controller/backup/fakeclient_test.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "k8s.io/apimachinery/pkg/runtime" 6 | "k8s.io/client-go/kubernetes/scheme" 7 | _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" 8 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 9 | ) 10 | 11 | //buildReconcileWithFakeClientWithMocks return reconcile with fake client, schemes and mock objects 12 | func buildReconcileWithFakeClientWithMocks(objs []runtime.Object) *ReconcileBackup { 13 | s := scheme.Scheme 14 | 15 | s.AddKnownTypes(v1alpha1.SchemeGroupVersion, &v1alpha1.Backup{}) 16 | s.AddKnownTypes(v1alpha1.SchemeGroupVersion, &v1alpha1.Database{}) 17 | 18 | // create a fake client to mock API calls with the mock objects 19 | cl := fake.NewFakeClientWithScheme(s, objs...) 20 | 21 | // create a Database object with the scheme and fake client 22 | return &ReconcileBackup{client: cl, scheme: s, dbPod: &podDatabase, dbService: &serviceDatabase} 23 | } 24 | -------------------------------------------------------------------------------- /pkg/controller/backup/helpers.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "fmt" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/service" 7 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 8 | ) 9 | 10 | // DbSecret keep the data which will be used in the DB secret 11 | type DbSecret struct { 12 | databaseName string 13 | user string 14 | pwd string 15 | host string 16 | superuser string 17 | dbVersion string 18 | } 19 | 20 | // HelperDbSecret just help build the Map Data for the DB Secret 21 | type HelperDbSecret struct { 22 | envVarName string 23 | envVarValue string 24 | cfgName string 25 | cfgKey string 26 | cfgNamespace string 27 | } 28 | 29 | // buildDBSecretData will returns the data required to create the database secret according to the configuration 30 | // NOTE: The user can: 31 | // - Customize the environment variables keys as values that should be used with 32 | // - Inform the name and namespace of an Config Map as the keys which has the values which should be used (E.g. user, password and database name already setup for another application ) 33 | func (r *ReconcileBackup) buildDBSecretData(bkp *v1alpha1.Backup, db *v1alpha1.Database) (map[string][]byte, error) { 34 | 35 | dbSecret := r.newDBSecret(bkp) 36 | 37 | for i := 0; i < len(r.dbPod.Spec.Containers[0].Env); i++ { 38 | 39 | helper := r.newHelperDbSecret(i, bkp) 40 | var err error 41 | 42 | switch helper.envVarName { 43 | case utils.GetEnvVarKey(db.Spec.ConfigMapDatabaseNameKey, db.Spec.DatabaseNameKeyEnvVar): 44 | dbSecret.databaseName, err = r.getEnvVarValue(dbSecret.databaseName, dbSecret, helper) 45 | if err != nil { 46 | return nil, err 47 | } 48 | case utils.GetEnvVarKey(db.Spec.ConfigMapDatabaseUserKey, db.Spec.DatabaseUserKeyEnvVar): 49 | dbSecret.user, err = r.getEnvVarValue(dbSecret.user, dbSecret, helper) 50 | if err != nil { 51 | return nil, err 52 | } 53 | case utils.GetEnvVarKey(db.Spec.ConfigMapDatabasePasswordKey, db.Spec.DatabasePasswordKeyEnvVar): 54 | dbSecret.pwd, err = r.getEnvVarValue(dbSecret.pwd, dbSecret, helper) 55 | if err != nil { 56 | return nil, err 57 | } 58 | } 59 | } 60 | 61 | return dbSecret.createMap(), nil 62 | } 63 | 64 | // getEnvVarValue will return the value that should be used for the Key informed 65 | func (r *ReconcileBackup) getEnvVarValue(value string, dbSecret *DbSecret, helper *HelperDbSecret) (string, error) { 66 | value = helper.envVarValue 67 | if value == "" { 68 | value = r.getKeyValueFromConfigMap(helper) 69 | if value == "" { 70 | return "", helper.newErrorUnableToGetKeyFrom() 71 | } 72 | } 73 | return value, nil 74 | } 75 | 76 | // newHelperDbSecret is a strtuct to keep the data in the loop in order to help fid the key and values which should be used 77 | func (r *ReconcileBackup) newHelperDbSecret(i int, bkp *v1alpha1.Backup) *HelperDbSecret { 78 | dt := new(HelperDbSecret) 79 | dt.envVarName = r.dbPod.Spec.Containers[0].Env[i].Name 80 | dt.envVarValue = r.dbPod.Spec.Containers[0].Env[i].Value 81 | dt.cfgNamespace = bkp.Namespace 82 | if r.dbPod.Spec.Containers[0].Env[i].ValueFrom != nil { 83 | dt.cfgName = r.dbPod.Spec.Containers[0].Env[i].ValueFrom.ConfigMapKeyRef.Name 84 | dt.cfgKey = r.dbPod.Spec.Containers[0].Env[i].ValueFrom.ConfigMapKeyRef.Key 85 | } 86 | return dt 87 | } 88 | 89 | // newDBSecret will create the DbSecret with the data which is required to add in its secret 90 | func (r *ReconcileBackup) newDBSecret(bkp *v1alpha1.Backup) *DbSecret { 91 | db := new(DbSecret) 92 | db.host = r.dbService.Name + "." + bkp.Namespace + ".svc" 93 | db.superuser = "false" 94 | db.dbVersion = bkp.Spec.DatabaseVersion 95 | return db 96 | } 97 | 98 | // newErrorUnableToGetKeyFrom returns an error when is not possible find the key into the configMap and namespace in order 99 | // to create the mandatory envvar for the database 100 | func (dt *HelperDbSecret) newErrorUnableToGetKeyFrom() error { 101 | return fmt.Errorf("Unable to get the key (%v) in the configMap (%v) in the namespace (%v) to create the secret", 102 | dt.cfgKey, dt.cfgName, dt.cfgNamespace) 103 | } 104 | 105 | // getKeyValueFromConfigMap returns the value of some key defined in the ConfigMap 106 | func (r *ReconcileBackup) getKeyValueFromConfigMap(dt *HelperDbSecret) string { 107 | // search for ConfigMap 108 | cfg, err := service.FetchConfigMap(dt.cfgName, dt.cfgNamespace, r.client) 109 | if err != nil { 110 | return "" 111 | } 112 | // Get ENV value 113 | return cfg.Data[dt.cfgKey] 114 | } 115 | 116 | // createMap returns the a map with the data in the []byte format required to create the database secret 117 | func (data *DbSecret) createMap() map[string][]byte { 118 | return map[string][]byte{ 119 | "POSTGRES_USERNAME": []byte(data.user), 120 | "POSTGRES_PASSWORD": []byte(data.pwd), 121 | "POSTGRES_DATABASE": []byte(data.databaseName), 122 | "POSTGRES_HOST": []byte(data.host), 123 | "POSTGRES_SUPERUSER": []byte(data.superuser), 124 | "VERSION": []byte(data.dbVersion), 125 | } 126 | } 127 | 128 | func createAwsDataByteMap(bkp *v1alpha1.Backup) map[string][]byte { 129 | dataByte := map[string][]byte{ 130 | "AWS_S3_BUCKET_NAME": []byte(bkp.Spec.AwsS3BucketName), 131 | "AWS_ACCESS_KEY_ID": []byte(bkp.Spec.AwsAccessKeyId), 132 | "AWS_SECRET_ACCESS_KEY": []byte(bkp.Spec.AwsSecretAccessKey), 133 | } 134 | return dataByte 135 | } 136 | 137 | func createEncDataMaps(bkp *v1alpha1.Backup) (map[string][]byte, map[string]string) { 138 | dataByte := map[string][]byte{ 139 | "GPG_PUBLIC_KEY": []byte(bkp.Spec.GpgPublicKey), 140 | } 141 | 142 | dataString := map[string]string{ 143 | "GPG_RECIPIENT": bkp.Spec.GpgEmail, 144 | "GPG_TRUST_MODEL": bkp.Spec.GpgTrustModel, 145 | } 146 | return dataByte, dataString 147 | } 148 | -------------------------------------------------------------------------------- /pkg/controller/backup/mocks_test.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 6 | "k8s.io/api/batch/v1beta1" 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ) 10 | 11 | // Centralized mock objects for use in tests 12 | var ( 13 | 14 | /** 15 | BKP CR using mandatory specs 16 | */ 17 | bkpInstanceWithMandatorySpec = v1alpha1.Backup{ 18 | ObjectMeta: metav1.ObjectMeta{ 19 | Name: "backup", 20 | Namespace: "postgresql-operator", 21 | }, 22 | } 23 | 24 | awsSecretWithMadatorySpec = corev1.Secret{ 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: utils.GetAWSSecretName(&bkpInstanceWithMandatorySpec), 27 | Namespace: utils.GetAwsSecretNamespace(&bkpInstanceWithMandatorySpec), 28 | }, 29 | } 30 | 31 | cronJobWithMadatorySpec = v1beta1.CronJob{ 32 | ObjectMeta: metav1.ObjectMeta{ 33 | Name: bkpInstanceWithMandatorySpec.Name, 34 | Namespace: bkpInstanceWithMandatorySpec.Namespace, 35 | }, 36 | } 37 | 38 | dbSecretWithMadatorySpec = corev1.Secret{ 39 | ObjectMeta: metav1.ObjectMeta{ 40 | Name: utils.DbSecretPrefix + bkpInstanceWithMandatorySpec.Name, 41 | Namespace: bkpInstanceWithMandatorySpec.Namespace, 42 | }, 43 | } 44 | 45 | /** 46 | BKP CR to test when the user pass the name of the secrets 47 | */ 48 | 49 | bkpInstanceWithSecretNames = v1alpha1.Backup{ 50 | ObjectMeta: metav1.ObjectMeta{ 51 | Name: "backup", 52 | Namespace: "postgresql-operator", 53 | }, 54 | Spec: v1alpha1.BackupSpec{ 55 | EncryptKeySecretName: "enc-secret-test", 56 | EncryptKeySecretNamespace: "postgresql-operator", 57 | AwsSecretName: "aws-secret-test", 58 | AwsSecretNamespace: "postgresql-operator", 59 | }, 60 | } 61 | 62 | awsSecretWithSecretNames = corev1.Secret{ 63 | ObjectMeta: metav1.ObjectMeta{ 64 | Name: utils.GetAWSSecretName(&bkpInstanceWithSecretNames), 65 | Namespace: utils.GetAwsSecretNamespace(&bkpInstanceWithSecretNames), 66 | }, 67 | } 68 | 69 | croJobWithSecretNames = v1beta1.CronJob{ 70 | ObjectMeta: metav1.ObjectMeta{ 71 | Name: bkpInstanceWithSecretNames.Name, 72 | Namespace: bkpInstanceWithSecretNames.Namespace, 73 | }, 74 | } 75 | 76 | encSecretWithSecretNames = corev1.Secret{ 77 | ObjectMeta: metav1.ObjectMeta{ 78 | Name: utils.GetEncSecretName(&bkpInstanceWithSecretNames), 79 | Namespace: utils.GetEncSecretNamespace(&bkpInstanceWithSecretNames), 80 | }, 81 | } 82 | 83 | dbSecretWithSecretNames = corev1.Secret{ 84 | ObjectMeta: metav1.ObjectMeta{ 85 | Name: utils.DbSecretPrefix + bkpInstanceWithSecretNames.Name, 86 | Namespace: bkpInstanceWithSecretNames.Namespace, 87 | }, 88 | } 89 | 90 | /** 91 | BKP CR to test when the user pass the secret data 92 | */ 93 | 94 | bkpInstanceWithEncSecretData = v1alpha1.Backup{ 95 | ObjectMeta: metav1.ObjectMeta{ 96 | Name: "backup", 97 | Namespace: "postgresql-operator", 98 | }, 99 | Spec: v1alpha1.BackupSpec{ 100 | GpgPublicKey: "example-gpgPublicKey", 101 | GpgEmail: "email@gmai.com", 102 | GpgTrustModel: "always", 103 | }, 104 | } 105 | 106 | /** 107 | Mock of Database resource 108 | */ 109 | 110 | dbInstanceWithoutSpec = v1alpha1.Database{ 111 | ObjectMeta: metav1.ObjectMeta{ 112 | Name: "database", 113 | Namespace: "postgresql-operator", 114 | }, 115 | } 116 | 117 | podDatabase = corev1.Pod{ 118 | ObjectMeta: metav1.ObjectMeta{ 119 | Name: "database-test", 120 | Namespace: "postgresql-operator", 121 | Labels: utils.GetLabels(dbInstanceWithoutSpec.Name), 122 | }, 123 | Spec: corev1.PodSpec{ 124 | Containers: []corev1.Container{{ 125 | Image: "postgresql", 126 | Name: "postgresql", 127 | Ports: []corev1.ContainerPort{{ 128 | ContainerPort: 5000, 129 | Protocol: "TCP", 130 | }}, 131 | Env: []corev1.EnvVar{ 132 | corev1.EnvVar{ 133 | Name: "PGDATABASE", 134 | Value: "test", 135 | }, 136 | corev1.EnvVar{ 137 | Name: "PGUSER", 138 | Value: "test", 139 | }, 140 | corev1.EnvVar{ 141 | Name: "PGPASSWORD", 142 | Value: "test", 143 | }, 144 | { 145 | Name: "PGDATA", 146 | Value: "/var/lib/pgsql/data", 147 | }, 148 | }, 149 | VolumeMounts: []corev1.VolumeMount{ 150 | { 151 | Name: "test", 152 | MountPath: "/var/lib/pgsql/data", 153 | }, 154 | }, 155 | }}, 156 | }, 157 | } 158 | 159 | dbInstanceWithConfigMap = v1alpha1.Database{ 160 | ObjectMeta: metav1.ObjectMeta{ 161 | Name: "database", 162 | Namespace: "postgresql-operator", 163 | }, 164 | Spec: v1alpha1.DatabaseSpec{ 165 | ConfigMapName: "config-map-test", 166 | DatabaseNameKeyEnvVar: "POSTGRESQL_DATABASE", 167 | DatabasePasswordKeyEnvVar: "POSTGRESQL_PASSWORD", 168 | DatabaseUserKeyEnvVar: "POSTGRESQL_USER", 169 | DatabaseName: "solution-database-name", 170 | DatabasePassword: "postgres", 171 | DatabaseUser: "postgresql", 172 | }, 173 | } 174 | 175 | podDatabaseConfigMap = corev1.Pod{ 176 | ObjectMeta: metav1.ObjectMeta{ 177 | Name: "database-test", 178 | Namespace: "postgresql-operator", 179 | Labels: utils.GetLabels(dbInstanceWithConfigMap.Name), 180 | }, 181 | Spec: corev1.PodSpec{ 182 | Containers: []corev1.Container{{ 183 | Image: dbInstanceWithConfigMap.Spec.Image, 184 | Name: dbInstanceWithConfigMap.Spec.ContainerName, 185 | ImagePullPolicy: dbInstanceWithConfigMap.Spec.ContainerImagePullPolicy, 186 | Ports: []corev1.ContainerPort{{ 187 | ContainerPort: dbInstanceWithConfigMap.Spec.DatabasePort, 188 | Protocol: "TCP", 189 | }}, 190 | Env: []corev1.EnvVar{ 191 | corev1.EnvVar{ 192 | Name: dbInstanceWithConfigMap.Spec.DatabaseNameKeyEnvVar, 193 | ValueFrom: &corev1.EnvVarSource{ 194 | ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ 195 | LocalObjectReference: corev1.LocalObjectReference{ 196 | Name: dbInstanceWithConfigMap.Spec.ConfigMapName, 197 | }, 198 | Key: utils.GetEnvVarKey(dbInstanceWithConfigMap.Spec.ConfigMapDatabaseNameKey, dbInstanceWithConfigMap.Spec.DatabaseNameKeyEnvVar), 199 | }, 200 | }, 201 | }, 202 | corev1.EnvVar{ 203 | Name: dbInstanceWithConfigMap.Spec.DatabaseUserKeyEnvVar, 204 | ValueFrom: &corev1.EnvVarSource{ 205 | ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ 206 | LocalObjectReference: corev1.LocalObjectReference{ 207 | Name: dbInstanceWithConfigMap.Spec.ConfigMapName, 208 | }, 209 | Key: utils.GetEnvVarKey(dbInstanceWithConfigMap.Spec.ConfigMapDatabaseUserKey, dbInstanceWithConfigMap.Spec.DatabaseUserKeyEnvVar), 210 | }, 211 | }, 212 | }, 213 | corev1.EnvVar{ 214 | Name: dbInstanceWithConfigMap.Spec.DatabasePasswordKeyEnvVar, 215 | ValueFrom: &corev1.EnvVarSource{ 216 | ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ 217 | LocalObjectReference: corev1.LocalObjectReference{ 218 | Name: dbInstanceWithConfigMap.Spec.ConfigMapName, 219 | }, 220 | Key: utils.GetEnvVarKey(dbInstanceWithConfigMap.Spec.ConfigMapDatabasePasswordKey, dbInstanceWithConfigMap.Spec.DatabasePasswordKeyEnvVar), 221 | }, 222 | }, 223 | }, 224 | { 225 | Name: "PGDATA", 226 | Value: "/var/lib/pgsql/data", 227 | }, 228 | }, 229 | VolumeMounts: []corev1.VolumeMount{ 230 | { 231 | Name: dbInstanceWithConfigMap.Name, 232 | MountPath: "/var/lib/pgsql/data", 233 | }, 234 | }, 235 | LivenessProbe: &corev1.Probe{ 236 | Handler: corev1.Handler{ 237 | Exec: &corev1.ExecAction{ 238 | Command: []string{ 239 | "/usr/libexec/check-container", 240 | "'--live'", 241 | }, 242 | }, 243 | }, 244 | FailureThreshold: 3, 245 | InitialDelaySeconds: 120, 246 | PeriodSeconds: 10, 247 | TimeoutSeconds: 10, 248 | SuccessThreshold: 1, 249 | }, 250 | ReadinessProbe: &corev1.Probe{ 251 | Handler: corev1.Handler{ 252 | Exec: &corev1.ExecAction{ 253 | Command: []string{ 254 | "/usr/libexec/check-container", 255 | }, 256 | }, 257 | }, 258 | FailureThreshold: 3, 259 | InitialDelaySeconds: 5, 260 | PeriodSeconds: 10, 261 | TimeoutSeconds: 1, 262 | SuccessThreshold: 1, 263 | }, 264 | TerminationMessagePath: "/dev/termination-log", 265 | }}, 266 | }, 267 | } 268 | 269 | serviceDatabase = corev1.Service{ 270 | ObjectMeta: metav1.ObjectMeta{ 271 | Name: "database", 272 | Namespace: "postgresql-operator", 273 | Labels: utils.GetLabels(dbInstanceWithoutSpec.Name), 274 | }, 275 | } 276 | 277 | configMapDefault = corev1.ConfigMap{ 278 | ObjectMeta: metav1.ObjectMeta{ 279 | Name: "config-map-test", 280 | Namespace: "postgresql-operator", 281 | }, 282 | Data: map[string]string{ 283 | "POSTGRESQL_DATABASE": "solution-database-name", 284 | "POSTGRESQL_PASSWORD": "postgres", 285 | "POSTGRESQL_USER": "postgresql", 286 | }, 287 | } 288 | 289 | configMapOtherKeyValues = corev1.ConfigMap{ 290 | ObjectMeta: metav1.ObjectMeta{ 291 | Name: "config-otherkeys", 292 | Namespace: "postgresql-operator", 293 | }, 294 | Data: map[string]string{ 295 | dbInstanceWithConfigMap.Spec.DatabaseNameKeyEnvVar: "dbname", 296 | dbInstanceWithConfigMap.Spec.DatabasePasswordKeyEnvVar: "root", 297 | dbInstanceWithConfigMap.Spec.DatabaseUserKeyEnvVar: "root", 298 | }, 299 | } 300 | 301 | configMapInvalidDatabaseKey = corev1.ConfigMap{ 302 | ObjectMeta: metav1.ObjectMeta{ 303 | Name: "config-otherkeys", 304 | Namespace: "postgresql-operator", 305 | }, 306 | Data: map[string]string{ 307 | "invalid": "dbname", 308 | dbInstanceWithConfigMap.Spec.DatabaseUserKeyEnvVar: "root", 309 | dbInstanceWithConfigMap.Spec.DatabasePasswordKeyEnvVar: "root", 310 | }, 311 | } 312 | 313 | configMapInvalidUserKey = corev1.ConfigMap{ 314 | ObjectMeta: metav1.ObjectMeta{ 315 | Name: "config-otherkeys", 316 | Namespace: "postgresql-operator", 317 | }, 318 | Data: map[string]string{ 319 | dbInstanceWithConfigMap.Spec.DatabaseNameKeyEnvVar: "dbname", 320 | "invalid": "root", 321 | dbInstanceWithConfigMap.Spec.DatabasePasswordKeyEnvVar: "root", 322 | }, 323 | } 324 | 325 | configMapInvalidPwdKey = corev1.ConfigMap{ 326 | ObjectMeta: metav1.ObjectMeta{ 327 | Name: "config-otherkeys", 328 | Namespace: "postgresql-operator", 329 | }, 330 | Data: map[string]string{ 331 | dbInstanceWithConfigMap.Spec.DatabaseNameKeyEnvVar: "dbname", 332 | dbInstanceWithConfigMap.Spec.DatabaseUserKeyEnvVar: "root", 333 | "invalid": "root", 334 | }, 335 | } 336 | 337 | dbInstanceWithConfigMapAndCustomizeKeys = v1alpha1.Database{ 338 | ObjectMeta: metav1.ObjectMeta{ 339 | Name: "database", 340 | Namespace: "postgresql-operator", 341 | }, 342 | Spec: v1alpha1.DatabaseSpec{ 343 | ConfigMapName: "config-otherkeys", 344 | ConfigMapDatabaseNameKey: "PGDATABASE", 345 | ConfigMapDatabasePasswordKey: "PGPASSWORD", 346 | ConfigMapDatabaseUserKey: "PGUSER", 347 | }, 348 | } 349 | ) 350 | -------------------------------------------------------------------------------- /pkg/controller/backup/status.go: -------------------------------------------------------------------------------- 1 | package backup 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 7 | "github.com/dev4devs-com/postgresql-operator/pkg/service" 8 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 9 | "k8s.io/api/batch/v1beta1" 10 | corev1 "k8s.io/api/core/v1" 11 | "reflect" 12 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 13 | ) 14 | 15 | const statusOk = "OK" 16 | 17 | //updateAppStatus returns error when status regards all required resource could not be updated with OK 18 | func (r *ReconcileBackup) updateBackupStatus(request reconcile.Request) error { 19 | bkp, err := service.FetchBackupCR(request.Name, request.Namespace, r.client) 20 | if err != nil { 21 | return err 22 | } 23 | 24 | statusMsgUpdate := statusOk 25 | // Check if all required resource were created and found 26 | if err := r.isAllCreated(bkp); err != nil { 27 | statusMsgUpdate = err.Error() 28 | } 29 | 30 | // Check if BackupStatus was changed, if yes update it 31 | if err := r.insertUpdateBackupStatus(bkp, statusMsgUpdate); err != nil { 32 | return err 33 | } 34 | return nil 35 | } 36 | 37 | // Check if BackupStatus was changed, if yes update it 38 | func (r *ReconcileBackup) insertUpdateBackupStatus(bkp *v1alpha1.Backup, statusMsgUpdate string) error { 39 | if statusMsgUpdate != bkp.Status.BackupStatus { 40 | bkp.Status.BackupStatus = statusMsgUpdate 41 | if err := r.client.Status().Update(context.TODO(), bkp); err != nil { 42 | return err 43 | } 44 | } 45 | return nil 46 | } 47 | 48 | // updateCronJobStatus returns error when was not possible update the CronJob status successfully 49 | func (r *ReconcileBackup) updateCronJobStatus(request reconcile.Request) error { 50 | bkp, err := service.FetchBackupCR(request.Name, request.Namespace, r.client) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | // Check if Cronjob Name or Status was changed, if yes update it 56 | cronJob, err := service.FetchCronJob(bkp.Name, bkp.Namespace, r.client) 57 | if err != nil { 58 | return err 59 | } 60 | 61 | // Check if CronJob changed, if yes update its status 62 | if err := r.insertUpdateCronJobStatus(cronJob, bkp); err != nil { 63 | return err 64 | } 65 | return nil 66 | } 67 | 68 | // insertUpdateCronJobStatus if CronJob name and status was changed the its status wil be updated 69 | func (r *ReconcileBackup) insertUpdateCronJobStatus(cronJob *v1beta1.CronJob, bkp *v1alpha1.Backup) error { 70 | if cronJob.Name != bkp.Status.CronJobName || !reflect.DeepEqual(cronJob.Status, bkp.Status.CronJobStatus) { 71 | 72 | bkp.Status.CronJobStatus = cronJob.Status 73 | bkp.Status.CronJobName = cronJob.Name 74 | 75 | if err := r.client.Status().Update(context.TODO(), bkp); err != nil { 76 | return err 77 | } 78 | } 79 | return nil 80 | } 81 | 82 | // updateAWSSecretStatus returns error when was not possible update the AWS status fields in the CR successfully 83 | func (r *ReconcileBackup) updateAWSSecretStatus(request reconcile.Request) error { 84 | bkp, err := service.FetchBackupCR(request.Name, request.Namespace, r.client) 85 | if err != nil { 86 | return err 87 | } 88 | 89 | aws, err := service.FetchSecret(utils.GetAwsSecretNamespace(bkp), utils.GetAWSSecretName(bkp), r.client) 90 | if err != nil { 91 | return err 92 | } 93 | 94 | // Check if the Secret with the AWS data was changed, if yes update its status 95 | if err := r.insertUpdateAwsSecretStatus(aws, bkp); err != nil { 96 | return err 97 | } 98 | return nil 99 | } 100 | 101 | // insertUpdateAwsSecretStatus will check and update the AWS Secret status if the Secret with the AWS data was changed 102 | func (r *ReconcileBackup) insertUpdateAwsSecretStatus(aws *corev1.Secret, bkp *v1alpha1.Backup) error { 103 | if isAwsStatusEqual(aws, bkp) { 104 | 105 | bkp.Status.AWSSecretName = aws.Name 106 | bkp.Status.AwsCredentialsSecretNamespace = aws.Namespace 107 | 108 | if err := r.client.Status().Update(context.TODO(), bkp); err != nil { 109 | return err 110 | } 111 | } 112 | return nil 113 | } 114 | 115 | // isAwsStatusEqual return true when something related to the aws status fields changed 116 | func isAwsStatusEqual(aws *corev1.Secret, bkp *v1alpha1.Backup) bool { 117 | return aws.Name != bkp.Status.AWSSecretName || aws.Namespace != bkp.Status.AwsCredentialsSecretNamespace 118 | } 119 | 120 | // updateAWSSecretStatus returns error when was not possible update the EncryptionKey status fields in the CR successfully 121 | func (r *ReconcileBackup) updateEncSecretStatus(request reconcile.Request) error { 122 | bkp, err := service.FetchBackupCR(request.Name, request.Namespace, r.client) 123 | if err != nil { 124 | return err 125 | } 126 | 127 | isEncryptionKeyOptionConfig := utils.IsEncryptionKeyOptionConfig(bkp) 128 | if isEncryptionKeyOptionConfig { 129 | secret, err := service.FetchSecret(utils.GetEncSecretNamespace(bkp), utils.GetEncSecretName(bkp), r.client) 130 | if err != nil { 131 | return err 132 | } 133 | 134 | // Check if the Secret with the AWS data was changed, if yes update its status 135 | if err := r.insertUpdateEncKeyStatus(secret, bkp); err != nil { 136 | return err 137 | } 138 | } 139 | 140 | // Check if the config(boolean status) was changed, if yes update it 141 | if isEncryptionKeyOptionConfig != bkp.Status.HasEncryptKey { 142 | 143 | bkp.Status.HasEncryptKey = isEncryptionKeyOptionConfig 144 | if err := r.client.Status().Update(context.TODO(), bkp); err != nil { 145 | return err 146 | } 147 | } 148 | return nil 149 | } 150 | 151 | // insertUpdateEncKeyStatus will check and update the EncryptionKey Secret status if the Secret with the AWS data was changed 152 | func (r *ReconcileBackup) insertUpdateEncKeyStatus(secret *corev1.Secret, bkp *v1alpha1.Backup) error { 153 | if isEncryptKeyStatusEquals(secret, bkp) { 154 | 155 | bkp.Status.EncryptKeySecretName = secret.Name 156 | bkp.Status.EncryptKeySecretNamespace = secret.Namespace 157 | 158 | if err := r.client.Status().Update(context.TODO(), bkp); err != nil { 159 | return err 160 | } 161 | } 162 | return nil 163 | } 164 | 165 | // isEncryptKeyStatusEquals return true when something related to the aws status fields change 166 | func isEncryptKeyStatusEquals(secret *corev1.Secret, bkp *v1alpha1.Backup) bool { 167 | return secret.Name != bkp.Status.EncryptKeySecretName || secret.Namespace != bkp.Status.EncryptKeySecretNamespace 168 | } 169 | 170 | // updateDBSecretStatus returns error when was not possible update the EncryptionKey status fields in the CR successfully 171 | func (r *ReconcileBackup) updateDBSecretStatus(request reconcile.Request) error { 172 | bkp, err := service.FetchBackupCR(request.Name, request.Namespace, r.client) 173 | if err != nil { 174 | return err 175 | } 176 | 177 | dbSecret, err := service.FetchSecret(bkp.Namespace, utils.DbSecretPrefix+bkp.Name, r.client) 178 | if err != nil { 179 | return err 180 | } 181 | 182 | // Check if the Secret with the DB Secret was changed, if yes update its status 183 | if err := r.insertUpdateDBSecretStatus(dbSecret, bkp); err != nil { 184 | return err 185 | } 186 | return nil 187 | } 188 | 189 | // insertUpdateDBSecretStatus will check and update the DB Secret status if the Secret with the DB data was changed 190 | func (r *ReconcileBackup) insertUpdateDBSecretStatus(dbSecret *corev1.Secret, bkp *v1alpha1.Backup) error { 191 | if dbSecret.Name != bkp.Status.DBSecretName { 192 | bkp.Status.DBSecretName = dbSecret.Name 193 | if err := r.client.Status().Update(context.TODO(), bkp); err != nil { 194 | return err 195 | } 196 | } 197 | return nil 198 | } 199 | 200 | // updatePodDatabaseFoundStatus returns error when was not possible update the DB Pod Found status field in the CR successfully 201 | func (r *ReconcileBackup) updatePodDatabaseFoundStatus(request reconcile.Request) error { 202 | bkp, err := service.FetchBackupCR(request.Name, request.Namespace, r.client) 203 | if err != nil { 204 | return err 205 | } 206 | 207 | // Check if the Pod Database Found status changed, if yes update it 208 | if err := r.insertUpdatePodDbFoundStatus(bkp); err != nil { 209 | return err 210 | } 211 | return nil 212 | } 213 | 214 | // insertUpdatePodDbFoundStatus will check and update the Pod Found status changed and update it 215 | func (r *ReconcileBackup) insertUpdatePodDbFoundStatus(bkp *v1alpha1.Backup) error { 216 | if r.isDbPodFound() != bkp.Status.IsDatabasePodFound { 217 | bkp.Status.IsDatabasePodFound = r.isDbPodFound() 218 | 219 | if err := r.client.Status().Update(context.TODO(), bkp); err != nil { 220 | return err 221 | } 222 | } 223 | return nil 224 | } 225 | 226 | // updateDbServiceFoundStatus returns error when was not possible update the DB Service Found status field in the CR successfully 227 | func (r *ReconcileBackup) updateDbServiceFoundStatus(request reconcile.Request) error { 228 | bkp, err := service.FetchBackupCR(request.Name, request.Namespace, r.client) 229 | if err != nil { 230 | return err 231 | } 232 | 233 | // Check if the Database Found status changed, if yes update it 234 | if err := r.insertUpdateDbServiceFoundStatus(bkp); err != nil { 235 | return err 236 | } 237 | return nil 238 | } 239 | 240 | // insertUpdatePodDbFoundStatus will check and update the Database Found status changed and update it 241 | func (r *ReconcileBackup) insertUpdateDbServiceFoundStatus(bkp *v1alpha1.Backup) error { 242 | if r.isDbServiceFound() != bkp.Status.IsDatabaseServiceFound { 243 | bkp.Status.IsDatabaseServiceFound = r.isDbServiceFound() 244 | if err := r.client.Status().Update(context.TODO(), bkp); err != nil { 245 | return err 246 | } 247 | } 248 | return nil 249 | } 250 | 251 | //isDbServiceFound returns false when the database service which should be created by the Database controller was not found 252 | func (r *ReconcileBackup) isDbServiceFound() bool { 253 | return &r.dbService != nil && len(r.dbService.Name) > 0 254 | } 255 | 256 | //isDbPodFound returns false when the database pod which should be created by the Database controller was not found 257 | func (r *ReconcileBackup) isDbPodFound() bool { 258 | return &r.dbService != nil && len(r.dbService.Name) > 0 259 | } 260 | 261 | //isAllCreated returns error when some resource is missing 262 | func (r *ReconcileBackup) isAllCreated(bkp *v1alpha1.Backup) error { 263 | 264 | // Check if was possible found the DB Pod 265 | if !r.isDbPodFound() { 266 | err := fmt.Errorf("Error: Database Pod is missing") 267 | return err 268 | } 269 | 270 | // Check if was possible found the DB Service 271 | if !r.isDbServiceFound() { 272 | err := fmt.Errorf("Error: Database Service is missing") 273 | return err 274 | } 275 | 276 | // Check if DB secret was created 277 | dbSecretName := utils.DbSecretPrefix + bkp.Name 278 | _, err := service.FetchSecret(bkp.Namespace, dbSecretName, r.client) 279 | if err != nil { 280 | err = fmt.Errorf("Error: DB Secret is missing. (%v)", dbSecretName) 281 | return err 282 | } 283 | 284 | // Check if AWS secret was created 285 | awsSecretName := utils.GetAWSSecretName(bkp) 286 | awsSecretNamespace := utils.GetAwsSecretNamespace(bkp) 287 | _, err = service.FetchSecret(awsSecretNamespace, awsSecretName, r.client) 288 | if err != nil { 289 | err := fmt.Errorf("Error: AWS Secret is missing. (name:%v,namespace:%v)", awsSecretName, awsSecretNamespace) 290 | return err 291 | } 292 | 293 | // Check if Enc secret was created (if was configured to be used) 294 | if utils.IsEncryptionKeyOptionConfig(bkp) { 295 | encSecretName := utils.GetEncSecretName(bkp) 296 | encSecretNamespace := utils.GetEncSecretNamespace(bkp) 297 | _, err := service.FetchSecret(encSecretNamespace, encSecretName, r.client) 298 | if err != nil { 299 | err := fmt.Errorf("Error: Encript Key Secret is missing. (name:%v,namespace:%v)", encSecretName, encSecretNamespace) 300 | return err 301 | } 302 | } 303 | 304 | //check if the cronJob was created 305 | _, err = service.FetchCronJob(bkp.Name, bkp.Namespace, r.client) 306 | if err != nil { 307 | err := fmt.Errorf("Error: CronJob is missing") 308 | return err 309 | } 310 | 311 | return nil 312 | } 313 | -------------------------------------------------------------------------------- /pkg/controller/controller.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "sigs.k8s.io/controller-runtime/pkg/manager" 5 | ) 6 | 7 | // AddToManagerFuncs is a list of functions to add all Controllers to the Manager 8 | var AddToManagerFuncs []func(manager.Manager) error 9 | 10 | // AddToManager adds all Controllers to the Manager 11 | func AddToManager(m manager.Manager) error { 12 | for _, f := range AddToManagerFuncs { 13 | if err := f(m); err != nil { 14 | return err 15 | } 16 | } 17 | return nil 18 | } 19 | -------------------------------------------------------------------------------- /pkg/controller/database/controller.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/service" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 7 | "k8s.io/api/apps/v1" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/controller" 13 | "sigs.k8s.io/controller-runtime/pkg/handler" 14 | "sigs.k8s.io/controller-runtime/pkg/manager" 15 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 16 | "sigs.k8s.io/controller-runtime/pkg/source" 17 | ) 18 | 19 | // Add creates a new Database Controller and adds it to the Manager. The Manager will set fields on the Controller 20 | // and Start it when the Manager is Started. 21 | func Add(mgr manager.Manager) error { 22 | return add(mgr, newReconciler(mgr)) 23 | } 24 | 25 | // newReconciler returns a new reconcile.Reconciler 26 | func newReconciler(mgr manager.Manager) reconcile.Reconciler { 27 | return &ReconcileDatabase{client: mgr.GetClient(), scheme: mgr.GetScheme()} 28 | } 29 | 30 | // add adds a new Controller to mgr with r as the reconcile.Reconciler 31 | func add(mgr manager.Manager, r reconcile.Reconciler) error { 32 | // Create a new controller 33 | c, err := controller.New(utils.DatabaseControllerName, mgr, controller.Options{Reconciler: r}) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | // Watch for changes to primary resource Database 39 | if err := c.Watch(&source.Kind{Type: &v1alpha1.Database{}}, &handler.EnqueueRequestForObject{}); err != nil { 40 | return err 41 | } 42 | 43 | /** Watch for changes to secondary resource and create the owner Database **/ 44 | 45 | // Watch Deployment resource controlled and created by it 46 | if err := service.Watch(c, &v1.Deployment{}, true, &v1alpha1.Database{}); err != nil { 47 | return err 48 | } 49 | 50 | // Watch PersistenceVolumeClaim resource controlled and created by it 51 | if err := service.Watch(c, &corev1.PersistentVolumeClaim{}, true, &v1alpha1.Database{}); err != nil { 52 | return err 53 | } 54 | 55 | // Watch Service resource controlled and created by it 56 | if err := service.Watch(c, &corev1.Service{}, true, &v1alpha1.Database{}); err != nil { 57 | return err 58 | } 59 | 60 | return nil 61 | } 62 | 63 | // blank assignment to verify that ReconcileDatabase implements reconcile.Reconciler 64 | var _ reconcile.Reconciler = &ReconcileDatabase{} 65 | 66 | // ReconcileDatabase reconciles a Database object 67 | type ReconcileDatabase struct { 68 | // This client, initialized using mgr.Client() above, is a split client 69 | // that reads objects from the cache and writes to the apiserver 70 | client client.Client 71 | scheme *runtime.Scheme 72 | } 73 | 74 | // Reconcile reads that state of the cluster for a Database object and makes changes based on the state read 75 | // and what is in the Database.Spec 76 | // Note: 77 | // The Controller will requeue the Request to be processed again if the returned error is non-nil or 78 | // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. 79 | func (r *ReconcileDatabase) Reconcile(request reconcile.Request) (reconcile.Result, error) { 80 | reqLogger := utils.GetLoggerByRequestAndController(request, utils.DatabaseControllerName) 81 | reqLogger.Info("Reconciling Database ...") 82 | 83 | db, err := service.FetchDatabaseCR(request.Name, request.Namespace, r.client) 84 | if err != nil { 85 | if errors.IsNotFound(err) { 86 | // Request object not found, could have been deleted after reconcile request. 87 | // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. 88 | // Return and don't requeue 89 | reqLogger.Info("Database resource not found. Ignoring since object must be deleted.") 90 | return reconcile.Result{}, nil 91 | } 92 | // Error reading the object - requeue the request. 93 | reqLogger.Error(err, "Failed to get Database.") 94 | return reconcile.Result{}, err 95 | } 96 | 97 | // Add const values for mandatory specs 98 | utils.AddDatabaseMandatorySpecs(db) 99 | 100 | if err := r.createResources(db, request); err != nil { 101 | reqLogger.Error(err, "Failed to create the secondary resource required for the Database CR") 102 | return reconcile.Result{}, err 103 | } 104 | 105 | if err := r.manageResources(db); err != nil { 106 | reqLogger.Error(err, "Failed to manage resource required for the Database CR") 107 | return reconcile.Result{}, err 108 | } 109 | 110 | if err := r.createUpdateCRStatus(request); err != nil { 111 | reqLogger.Error(err, "Failed to create and update the status in the Database CR") 112 | return reconcile.Result{}, err 113 | } 114 | 115 | reqLogger.Info("Stop Reconciling Database ...") 116 | return reconcile.Result{}, nil 117 | } 118 | 119 | //createResources will create the secondary resource which are required in order to make works successfully the primary resource(CR) 120 | func (r *ReconcileDatabase) createResources(db *v1alpha1.Database, request reconcile.Request) error { 121 | reqLogger := utils.GetLoggerByRequestAndController(request, utils.DatabaseControllerName) 122 | reqLogger.Info("Creating secondary Database resources ...") 123 | 124 | // Check if deployment for the app exist, if not create one 125 | if err := r.createDeployment(db); err != nil { 126 | reqLogger.Error(err, "Failed to create Deployment") 127 | return err 128 | } 129 | 130 | // Check if service for the app exist, if not create one 131 | if err := r.createService(db); err != nil { 132 | reqLogger.Error(err, "Failed to create Service") 133 | return err 134 | } 135 | 136 | // Check if PersistentVolumeClaim for the app exist, if not create one 137 | if err := r.createPvc(db); err != nil { 138 | reqLogger.Error(err, "Failed to create PVC") 139 | return err 140 | } 141 | 142 | return nil 143 | } 144 | 145 | //createUpdateCRStatus will create and update the status in the CR applied in the cluster 146 | func (r *ReconcileDatabase) createUpdateCRStatus(request reconcile.Request) error { 147 | reqLogger := utils.GetLoggerByRequestAndController(request, utils.DatabaseControllerName) 148 | reqLogger.Info("Create/Update Database status ...") 149 | 150 | if err := r.updateDeploymentStatus(request); err != nil { 151 | reqLogger.Error(err, "Failed to create Deployment Status") 152 | return err 153 | } 154 | 155 | if err := r.updateServiceStatus(request); err != nil { 156 | reqLogger.Error(err, "Failed to create Service Status") 157 | return err 158 | } 159 | 160 | if err := r.updatePvcStatus(request); err != nil { 161 | reqLogger.Error(err, "Failed to create PVC Status") 162 | return err 163 | } 164 | 165 | if err := r.updateDBStatus(request); err != nil { 166 | reqLogger.Error(err, "Failed to create DB Status") 167 | return err 168 | } 169 | return nil 170 | } 171 | -------------------------------------------------------------------------------- /pkg/controller/database/controller_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/service" 7 | "testing" 8 | 9 | appsv1 "k8s.io/api/apps/v1" 10 | corev1 "k8s.io/api/core/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/types" 13 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 14 | ) 15 | 16 | func TestReconcileDatabase(t *testing.T) { 17 | type fields struct { 18 | scheme *runtime.Scheme 19 | objs []runtime.Object 20 | } 21 | type args struct { 22 | dbInstance v1alpha1.Database 23 | } 24 | tests := []struct { 25 | name string 26 | fields fields 27 | args args 28 | wantRequeue bool 29 | wantDeployment bool 30 | wantService bool 31 | wantPVC bool 32 | wantErr bool 33 | }{ 34 | { 35 | name: "Should work with default values", 36 | fields: fields{ 37 | objs: []runtime.Object{&dbInstanceWithoutSpec}, 38 | }, 39 | args: args{ 40 | dbInstance: dbInstanceWithoutSpec, 41 | }, 42 | wantErr: false, 43 | wantRequeue: false, 44 | wantDeployment: true, 45 | wantService: true, 46 | wantPVC: true, 47 | }, 48 | { 49 | name: "Should work when is using config map to create env vars", 50 | fields: fields{ 51 | objs: []runtime.Object{ 52 | &dbInstanceConfigMapSameKeys, 53 | &configMapSameKeyValues, 54 | }, 55 | }, 56 | args: args{ 57 | dbInstance: dbInstanceConfigMapSameKeys, 58 | }, 59 | wantErr: false, 60 | wantRequeue: false, 61 | wantDeployment: true, 62 | wantService: true, 63 | wantPVC: true, 64 | }, 65 | { 66 | name: "Should not fail because is missing the instance", 67 | wantErr: false, 68 | wantRequeue: false, 69 | wantDeployment: false, 70 | wantService: false, 71 | wantPVC: false, 72 | }, 73 | } 74 | for _, tt := range tests { 75 | t.Run(tt.name, func(t *testing.T) { 76 | 77 | r := buildReconcileWithFakeClientWithMocks(tt.fields.objs) 78 | 79 | // mock request to simulate Reconcile() being called on an event for a watched resource 80 | req := reconcile.Request{ 81 | NamespacedName: types.NamespacedName{ 82 | Name: tt.args.dbInstance.Name, 83 | Namespace: tt.args.dbInstance.Namespace, 84 | }, 85 | } 86 | 87 | res, err := r.Reconcile(req) 88 | if (err != nil) != tt.wantErr { 89 | t.Errorf("TestReconcileDatabase reconcile: error = %v, wantErr %v", err, tt.wantErr) 90 | return 91 | } 92 | 93 | deployment := &appsv1.Deployment{} 94 | err = r.client.Get(context.TODO(), types.NamespacedName{Name: tt.args.dbInstance.Name, Namespace: tt.args.dbInstance.Namespace}, deployment) 95 | if (err == nil) != tt.wantDeployment { 96 | t.Errorf("TestReconcileDatabase to get deployment error = %v, wantDeployment %v", err, tt.wantDeployment) 97 | return 98 | } 99 | 100 | service := &corev1.Service{} 101 | err = r.client.Get(context.TODO(), types.NamespacedName{Name: tt.args.dbInstance.Name, Namespace: tt.args.dbInstance.Namespace}, service) 102 | if (err == nil) != tt.wantService { 103 | t.Errorf("TestReconcileDatabase to get service error = %v, wantService %v", err, tt.wantService) 104 | return 105 | } 106 | 107 | pvc := &corev1.PersistentVolumeClaim{} 108 | err = r.client.Get(context.TODO(), types.NamespacedName{Name: tt.args.dbInstance.Name, Namespace: tt.args.dbInstance.Namespace}, pvc) 109 | if (err == nil) != tt.wantPVC { 110 | t.Errorf("TestReconcileDatabase to get service error = %v, wantPVC %v", err, tt.wantPVC) 111 | return 112 | } 113 | 114 | if (res.Requeue) != tt.wantRequeue { 115 | t.Errorf("TestReconcileBackup expect request to requeue res.Requeue = %v, wantRequeue %v", res.Requeue, tt.wantRequeue) 116 | return 117 | } 118 | }) 119 | } 120 | } 121 | 122 | func TestReconcileDatabase_EnsureReplicasSizeInstance(t *testing.T) { 123 | 124 | // objects to track in the fake client 125 | objs := []runtime.Object{ 126 | &dbInstanceWithoutSpec, 127 | } 128 | 129 | r := buildReconcileWithFakeClientWithMocks(objs) 130 | 131 | // mock request to simulate Reconcile() being called on an event for a watched resource 132 | req := reconcile.Request{ 133 | NamespacedName: types.NamespacedName{ 134 | Name: dbInstanceWithoutSpec.Name, 135 | Namespace: dbInstanceWithoutSpec.Namespace, 136 | }, 137 | } 138 | 139 | res, err := r.Reconcile(req) 140 | if err != nil { 141 | t.Fatalf("reconcile: (%v)", err) 142 | } 143 | 144 | deployment := &appsv1.Deployment{} 145 | err = r.client.Get(context.TODO(), req.NamespacedName, deployment) 146 | if err != nil { 147 | t.Fatalf("get deployment: (%v)", err) 148 | } 149 | 150 | //Mock Replicas wrong size 151 | size := int32(3) 152 | deployment.Spec.Replicas = &size 153 | 154 | // Update 155 | err = r.client.Update(context.TODO(), deployment) 156 | if err != nil { 157 | t.Fatalf("fails when try to update deployment replicas: (%v)", err) 158 | } 159 | 160 | res, err = r.Reconcile(req) 161 | if err != nil { 162 | t.Fatalf("reconcile: (%v)", err) 163 | } 164 | 165 | dep, err := service.FetchDeployment(dbInstanceWithoutSpec.Name, dbInstanceWithoutSpec.Namespace, r.client) 166 | if err != nil { 167 | t.Fatalf("get deployment: (%v)", err) 168 | } 169 | 170 | if *dep.Spec.Replicas != 1 { 171 | t.Errorf("Replicas size was not respected got (%v), when is expected (%v)", *dep.Spec.Replicas, 1) 172 | } 173 | 174 | if res.Requeue { 175 | t.Error("did not expect request to requeue") 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /pkg/controller/database/create_resources.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/resource" 7 | "github.com/dev4devs-com/postgresql-operator/pkg/service" 8 | ) 9 | 10 | // Check if PersistentVolumeClaim for the app exist, if not create one 11 | func (r *ReconcileDatabase) createPvc(db *v1alpha1.Database) error { 12 | if _, err := service.FetchPersistentVolumeClaim(db.Name, db.Namespace, r.client); err != nil { 13 | if err := r.client.Create(context.TODO(), resource.NewDatabasePvc(db, r.scheme)); err != nil { 14 | return err 15 | } 16 | } 17 | return nil 18 | } 19 | 20 | // Check if Service for the app exist, if not create one 21 | func (r *ReconcileDatabase) createService(db *v1alpha1.Database) error { 22 | if _, err := service.FetchService(db.Name, db.Namespace, r.client); err != nil { 23 | if err := r.client.Create(context.TODO(), resource.NewDatabaseService(db, r.scheme)); err != nil { 24 | return err 25 | } 26 | } 27 | return nil 28 | } 29 | 30 | // Check if Deployment for the app exist, if not create one 31 | func (r *ReconcileDatabase) createDeployment(db *v1alpha1.Database) error { 32 | _, err := service.FetchDeployment(db.Name, db.Namespace, r.client) 33 | if err != nil { 34 | if err := r.client.Create(context.TODO(), resource.NewDatabaseDeployment(db, r.scheme)); err != nil { 35 | return err 36 | } 37 | } 38 | return nil 39 | } 40 | -------------------------------------------------------------------------------- /pkg/controller/database/fakeclient_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "k8s.io/apimachinery/pkg/runtime" 6 | "k8s.io/client-go/kubernetes/scheme" 7 | _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" 8 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 9 | ) 10 | 11 | //buildReconcileWithFakeClientWithMocks return reconcile with fake client, schemes and mock objects 12 | func buildReconcileWithFakeClientWithMocks(objs []runtime.Object) *ReconcileDatabase { 13 | s := scheme.Scheme 14 | 15 | s.AddKnownTypes(v1alpha1.SchemeGroupVersion, &v1alpha1.Database{}) 16 | 17 | // create a fake client to mock API calls with the mock objects 18 | cl := fake.NewFakeClientWithScheme(s, objs...) 19 | 20 | // create a Database object with the scheme and fake client 21 | return &ReconcileDatabase{client: cl, scheme: s} 22 | } 23 | -------------------------------------------------------------------------------- /pkg/controller/database/manage_resources.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/service" 7 | "k8s.io/api/apps/v1" 8 | ) 9 | 10 | // manageResources will ensure that the resource are with the expected values in the cluster 11 | func (r *ReconcileDatabase) manageResources(db *v1alpha1.Database) error { 12 | // get the latest version of db deployment 13 | dep, err := service.FetchDeployment(db.Name, db.Namespace, r.client) 14 | if err != nil { 15 | return err 16 | } 17 | 18 | // Ensure the deployment size is the same as the spec 19 | r.ensureDepSize(db, dep) 20 | return nil 21 | } 22 | 23 | // ensureDepSize will ensure that the quanity of instances in the cluster for the Database deployment is the same defined in the CR 24 | func (r *ReconcileDatabase) ensureDepSize(db *v1alpha1.Database, dep *v1.Deployment) error { 25 | size := db.Spec.Size 26 | if *dep.Spec.Replicas != size { 27 | // Set the number of Replicas spec in the CR 28 | dep.Spec.Replicas = &size 29 | if err := r.client.Update(context.TODO(), dep); err != nil { 30 | return err 31 | } 32 | } 33 | return nil 34 | } 35 | -------------------------------------------------------------------------------- /pkg/controller/database/mocks_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | v1alpha1 "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | corev1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // Centralized mock objects for use in tests 10 | var ( 11 | dbInstanceWithoutSpec = v1alpha1.Database{ 12 | ObjectMeta: metav1.ObjectMeta{ 13 | Name: "database", 14 | Namespace: "postgresql-operator", 15 | }, 16 | } 17 | 18 | dbInstanceConfigMapSameKeys = v1alpha1.Database{ 19 | ObjectMeta: metav1.ObjectMeta{ 20 | Name: "database", 21 | Namespace: "postgresql-operator", 22 | }, 23 | Spec: v1alpha1.DatabaseSpec{ 24 | ConfigMapName: "config-samekeys", 25 | }, 26 | } 27 | 28 | dbInstanceConfigMapOtherKeys = v1alpha1.Database{ 29 | ObjectMeta: metav1.ObjectMeta{ 30 | Name: "database", 31 | Namespace: "postgresql-operator", 32 | }, 33 | Spec: v1alpha1.DatabaseSpec{ 34 | ConfigMapName: "config-otherkeys", 35 | ConfigMapDatabaseNameKey: "PGDATABASE", 36 | ConfigMapDatabasePasswordKey: "PGPASSWORD", 37 | ConfigMapDatabaseUserKey: "PGUSER", 38 | }, 39 | } 40 | 41 | configMapOtherKeyValues = corev1.ConfigMap{ 42 | ObjectMeta: metav1.ObjectMeta{ 43 | Name: "config-otherkeys", 44 | Namespace: "postgresql-operator", 45 | }, 46 | Data: map[string]string{ 47 | "PGDATABASE": "dbname", 48 | "PGPASSWORD": "root", 49 | "PGUSER": "root", 50 | }, 51 | } 52 | 53 | configMapSameKeyValues = corev1.ConfigMap{ 54 | ObjectMeta: metav1.ObjectMeta{ 55 | Name: "config-samekeys", 56 | Namespace: "postgresql-operator", 57 | }, 58 | Data: map[string]string{ 59 | "POSTGRESQL_DATABASE": "dbname", 60 | "POSTGRESQL_PASSWORD": "root", 61 | "POSTGRESQL_USERPOSTGRESQL_USER": "root", 62 | }, 63 | } 64 | ) 65 | -------------------------------------------------------------------------------- /pkg/controller/database/status.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 7 | "github.com/dev4devs-com/postgresql-operator/pkg/service" 8 | "k8s.io/api/apps/v1" 9 | corev1 "k8s.io/api/core/v1" 10 | "reflect" 11 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 12 | ) 13 | 14 | const statusOk = "OK" 15 | 16 | //updateDBStatus returns error when status regards the all required resource could not be updated 17 | func (r *ReconcileDatabase) updateDBStatus(request reconcile.Request) error { 18 | db, err := service.FetchDatabaseCR(request.Name, request.Namespace, r.client) 19 | if err != nil { 20 | return err 21 | } 22 | 23 | statusMsgUpdate := statusOk 24 | // Check if all required resource were created and found 25 | if err := r.isAllCreated(db); err != nil { 26 | statusMsgUpdate = err.Error() 27 | } 28 | 29 | // Check if BackupStatus was changed, if yes update it 30 | if err := r.insertUpdateDatabaseStatus(db, statusMsgUpdate); err != nil { 31 | return err 32 | } 33 | return nil 34 | } 35 | 36 | // Check if DatabaseStatus was changed, if yes update it 37 | func (r *ReconcileDatabase) insertUpdateDatabaseStatus(db *v1alpha1.Database, statusMsgUpdate string) error { 38 | if statusMsgUpdate != db.Status.DatabaseStatus { 39 | db.Status.DatabaseStatus = statusMsgUpdate 40 | if err := r.client.Status().Update(context.TODO(), db); err != nil { 41 | return err 42 | } 43 | } 44 | return nil 45 | } 46 | 47 | //updateDeploymentStatus returns error when status regards the deployment resource could not be updated 48 | func (r *ReconcileDatabase) updateDeploymentStatus(request reconcile.Request) error { 49 | db, err := service.FetchDatabaseCR(request.Name, request.Namespace, r.client) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | dep, err := service.FetchDeployment(db.Name, db.Namespace, r.client) 55 | if err != nil { 56 | return err 57 | } 58 | 59 | // Check if Deployment Status was changed, if yes update it 60 | if err := r.insertUpdateDeploymentStatus(dep, db); err != nil { 61 | return err 62 | } 63 | 64 | return nil 65 | } 66 | 67 | // insertUpdateDeploymentStatus will check if Deployment status changed, if yes then and update it 68 | func (r *ReconcileDatabase) insertUpdateDeploymentStatus(deploymentStatus *v1.Deployment, db *v1alpha1.Database) error { 69 | if !reflect.DeepEqual(deploymentStatus.Status, db.Status.DeploymentStatus) { 70 | db.Status.DeploymentStatus = deploymentStatus.Status 71 | if err := r.client.Status().Update(context.TODO(), db); err != nil { 72 | return err 73 | } 74 | } 75 | return nil 76 | } 77 | 78 | //updateServiceStatus returns error when status regards the service resource could not be updated 79 | func (r *ReconcileDatabase) updateServiceStatus(request reconcile.Request) error { 80 | db, err := service.FetchDatabaseCR(request.Name, request.Namespace, r.client) 81 | if err != nil { 82 | return err 83 | } 84 | 85 | ser, err := service.FetchService(db.Name, db.Namespace, r.client) 86 | if err != nil { 87 | return err 88 | } 89 | 90 | // Check if Service Status was changed, if yes update it 91 | if err := r.insertUpdateServiceStatus(ser, db); err != nil { 92 | return err 93 | } 94 | 95 | return nil 96 | } 97 | 98 | // insertUpdateDeploymentStatus will check if Service status changed, if yes then and update it 99 | func (r *ReconcileDatabase) insertUpdateServiceStatus(serviceStatus *corev1.Service, db *v1alpha1.Database) error { 100 | if !reflect.DeepEqual(serviceStatus.Status, db.Status.ServiceStatus) { 101 | db.Status.ServiceStatus = serviceStatus.Status 102 | if err := r.client.Status().Update(context.TODO(), db); err != nil { 103 | return err 104 | } 105 | } 106 | return nil 107 | } 108 | 109 | // updatePvcStatus returns error when status regards the PersistentVolumeClaim resource could not be updated 110 | func (r *ReconcileDatabase) updatePvcStatus(request reconcile.Request) error { 111 | db, err := service.FetchDatabaseCR(request.Name, request.Namespace, r.client) 112 | if err != nil { 113 | return err 114 | } 115 | 116 | pvc, err := service.FetchPersistentVolumeClaim(db.Name, db.Namespace, r.client) 117 | if err != nil { 118 | return err 119 | } 120 | 121 | r.insertUpdatePvcStatus(pvc, db) 122 | return nil 123 | } 124 | 125 | // insertUpdatePvcStatus will check if Service status changed, if yes then and update it 126 | func (r *ReconcileDatabase) insertUpdatePvcStatus(pvc *corev1.PersistentVolumeClaim, db *v1alpha1.Database) error { 127 | if !reflect.DeepEqual(pvc.Status, db.Status.PVCStatus) { 128 | db.Status.PVCStatus = pvc.Status 129 | if err := r.client.Status().Update(context.TODO(), db); err != nil { 130 | return err 131 | } 132 | } 133 | return nil 134 | } 135 | 136 | //validateBackup returns error when some requirement is missing 137 | func (r *ReconcileDatabase) isAllCreated(db *v1alpha1.Database) error { 138 | 139 | // Check if the PersistentVolumeClaim was created 140 | _, err := service.FetchPersistentVolumeClaim(db.Name, db.Namespace, r.client) 141 | if err != nil { 142 | err = fmt.Errorf("Error: PersistentVolumeClaim is missing.") 143 | } 144 | 145 | // Check if the Deployment was created 146 | _, err = service.FetchDeployment(db.Name, db.Namespace, r.client) 147 | if err != nil { 148 | err = fmt.Errorf("Error: Deployment is missing.") 149 | } 150 | 151 | // Check if the Service was created 152 | _, err = service.FetchService(db.Name, db.Namespace, r.client) 153 | if err != nil { 154 | err = fmt.Errorf("Error: Service is missing.") 155 | } 156 | 157 | return nil 158 | } 159 | -------------------------------------------------------------------------------- /pkg/controller/database/status_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | appsv1 "k8s.io/api/apps/v1" 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "k8s.io/apimachinery/pkg/types" 12 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 13 | ) 14 | 15 | func TestUpdateDBStatus(t *testing.T) { 16 | type fields struct { 17 | objs []runtime.Object 18 | } 19 | type args struct { 20 | request reconcile.Request 21 | } 22 | tests := []struct { 23 | name string 24 | fields fields 25 | args args 26 | wantErr bool 27 | }{ 28 | { 29 | name: "Should update status", 30 | fields: fields{ 31 | objs: []runtime.Object{&dbInstanceWithoutSpec}, 32 | }, 33 | args: args{ 34 | request: reconcile.Request{ 35 | NamespacedName: types.NamespacedName{ 36 | Name: dbInstanceWithoutSpec.Name, 37 | Namespace: dbInstanceWithoutSpec.Namespace, 38 | }, 39 | }, 40 | }, 41 | wantErr: false, 42 | }, 43 | } 44 | for _, tt := range tests { 45 | t.Run(tt.name, func(t *testing.T) { 46 | 47 | r := buildReconcileWithFakeClientWithMocks(tt.fields.objs) 48 | 49 | if err := r.updateDBStatus(tt.args.request); (err != nil) != tt.wantErr { 50 | t.Errorf("TestUpdateDBStatus error = %v, wantErr %v", err, tt.wantErr) 51 | } 52 | }) 53 | } 54 | } 55 | 56 | func TestUpdateDeploymentStatus(t *testing.T) { 57 | type fields struct { 58 | objs []runtime.Object 59 | } 60 | type args struct { 61 | request reconcile.Request 62 | } 63 | tests := []struct { 64 | name string 65 | fields fields 66 | args args 67 | want reflect.Type 68 | wantErr bool 69 | }{ 70 | { 71 | name: "Should not find the deployment", 72 | fields: fields{ 73 | objs: []runtime.Object{&dbInstanceWithoutSpec}, 74 | }, 75 | args: args{ 76 | request: reconcile.Request{ 77 | NamespacedName: types.NamespacedName{ 78 | Name: dbInstanceWithoutSpec.Name, 79 | Namespace: dbInstanceWithoutSpec.Namespace, 80 | }, 81 | }, 82 | }, 83 | wantErr: true, 84 | }, 85 | { 86 | name: "Should upddate with success", 87 | fields: fields{ 88 | objs: []runtime.Object{&dbInstanceWithoutSpec, &appsv1.Deployment{ 89 | ObjectMeta: metav1.ObjectMeta{ 90 | Name: dbInstanceWithoutSpec.Name, 91 | Namespace: dbInstanceWithoutSpec.Namespace, 92 | }, 93 | Status: appsv1.DeploymentStatus{ 94 | Replicas: 3, 95 | }, 96 | }}, 97 | }, 98 | args: args{ 99 | request: reconcile.Request{ 100 | NamespacedName: types.NamespacedName{ 101 | Name: dbInstanceWithoutSpec.Name, 102 | Namespace: dbInstanceWithoutSpec.Namespace, 103 | }, 104 | }, 105 | }, 106 | wantErr: false, 107 | }, 108 | } 109 | for _, tt := range tests { 110 | t.Run(tt.name, func(t *testing.T) { 111 | 112 | r := buildReconcileWithFakeClientWithMocks(tt.fields.objs) 113 | 114 | err := r.updateDeploymentStatus(tt.args.request) 115 | if (err != nil) != tt.wantErr { 116 | t.Errorf("TestUpdateDeploymentStatus) error = %v, wantErr %v", err, tt.wantErr) 117 | return 118 | } 119 | }) 120 | } 121 | } 122 | 123 | func TestUpdateServiceStatus(t *testing.T) { 124 | type fields struct { 125 | objs []runtime.Object 126 | } 127 | type args struct { 128 | request reconcile.Request 129 | } 130 | tests := []struct { 131 | name string 132 | fields fields 133 | args args 134 | wantErr bool 135 | }{ 136 | { 137 | name: "Should not find the service", 138 | fields: fields{ 139 | objs: []runtime.Object{&dbInstanceWithoutSpec}, 140 | }, 141 | args: args{ 142 | request: reconcile.Request{ 143 | NamespacedName: types.NamespacedName{ 144 | Name: dbInstanceWithoutSpec.Name, 145 | Namespace: dbInstanceWithoutSpec.Namespace, 146 | }, 147 | }, 148 | }, 149 | wantErr: true, 150 | }, 151 | { 152 | name: "Should update with success", 153 | fields: fields{ 154 | objs: []runtime.Object{&dbInstanceWithoutSpec, &corev1.Service{ 155 | ObjectMeta: metav1.ObjectMeta{ 156 | Name: dbInstanceWithoutSpec.Name, 157 | Namespace: dbInstanceWithoutSpec.Namespace, 158 | }, 159 | Status: corev1.ServiceStatus{ 160 | LoadBalancer: corev1.LoadBalancerStatus{ 161 | Ingress: []corev1.LoadBalancerIngress{ 162 | corev1.LoadBalancerIngress{ 163 | IP: "test", 164 | }, 165 | }, 166 | }, 167 | }, 168 | }}, 169 | }, 170 | args: args{ 171 | request: reconcile.Request{ 172 | NamespacedName: types.NamespacedName{ 173 | Name: dbInstanceWithoutSpec.Name, 174 | Namespace: dbInstanceWithoutSpec.Namespace, 175 | }, 176 | }, 177 | }, 178 | wantErr: false, 179 | }, 180 | } 181 | for _, tt := range tests { 182 | t.Run(tt.name, func(t *testing.T) { 183 | 184 | r := buildReconcileWithFakeClientWithMocks(tt.fields.objs) 185 | 186 | err := r.updateServiceStatus(tt.args.request) 187 | if (err != nil) != tt.wantErr { 188 | t.Errorf("TestUpdateServiceStatus error = %v, wantErr %v", err, tt.wantErr) 189 | return 190 | } 191 | }) 192 | } 193 | } 194 | 195 | func TestUpdatePVCStatus(t *testing.T) { 196 | type fields struct { 197 | objs []runtime.Object 198 | } 199 | type args struct { 200 | request reconcile.Request 201 | } 202 | tests := []struct { 203 | name string 204 | fields fields 205 | args args 206 | wantErr bool 207 | }{ 208 | { 209 | name: "Should not find the pvc", 210 | fields: fields{ 211 | objs: []runtime.Object{&dbInstanceWithoutSpec}, 212 | }, 213 | args: args{ 214 | request: reconcile.Request{ 215 | NamespacedName: types.NamespacedName{ 216 | Name: dbInstanceWithoutSpec.Name, 217 | Namespace: dbInstanceWithoutSpec.Namespace, 218 | }, 219 | }, 220 | }, 221 | wantErr: true, 222 | }, 223 | { 224 | name: "Should update with success", 225 | fields: fields{ 226 | objs: []runtime.Object{&dbInstanceWithoutSpec, &corev1.PersistentVolumeClaim{ 227 | ObjectMeta: metav1.ObjectMeta{ 228 | Name: "database", 229 | Namespace: "postgresql-operator", 230 | }, 231 | Status: corev1.PersistentVolumeClaimStatus{ 232 | Phase: "test", 233 | }, 234 | }}, 235 | }, 236 | args: args{ 237 | request: reconcile.Request{ 238 | NamespacedName: types.NamespacedName{ 239 | Name: dbInstanceWithoutSpec.Name, 240 | Namespace: dbInstanceWithoutSpec.Namespace, 241 | }, 242 | }, 243 | }, 244 | wantErr: false, 245 | }, 246 | } 247 | for _, tt := range tests { 248 | t.Run(tt.name, func(t *testing.T) { 249 | 250 | r := buildReconcileWithFakeClientWithMocks(tt.fields.objs) 251 | 252 | err := r.updatePvcStatus(tt.args.request) 253 | if (err != nil) != tt.wantErr { 254 | t.Errorf("TestUpdatePVCStatus error = %v, wantErr %v", err, tt.wantErr) 255 | return 256 | } 257 | }) 258 | } 259 | } 260 | -------------------------------------------------------------------------------- /pkg/resource/cronjobs.go: -------------------------------------------------------------------------------- 1 | package resource 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 6 | batchv1 "k8s.io/api/batch/v1" 7 | "k8s.io/api/batch/v1beta1" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 12 | ) 13 | 14 | //Returns the NewBackupCronJob object for the Database Backup 15 | func NewBackupCronJob(bkp *v1alpha1.Backup, scheme *runtime.Scheme) *v1beta1.CronJob { 16 | cron := &v1beta1.CronJob{ 17 | ObjectMeta: v1.ObjectMeta{ 18 | Name: bkp.Name, 19 | Namespace: bkp.Namespace, 20 | Labels: utils.GetLabels(bkp.Name), 21 | }, 22 | Spec: v1beta1.CronJobSpec{ 23 | Schedule: bkp.Spec.Schedule, 24 | JobTemplate: v1beta1.JobTemplateSpec{ 25 | Spec: batchv1.JobSpec{ 26 | Template: corev1.PodTemplateSpec{ 27 | Spec: corev1.PodSpec{ 28 | ServiceAccountName: "postgresql-operator", 29 | Containers: []corev1.Container{ 30 | { 31 | Name: bkp.Name, 32 | Image: bkp.Spec.Image, 33 | Command: []string{"/opt/intly/tools/entrypoint.sh", "-c", "postgres", "-n", bkp.Namespace, "-b", "s3", "-e", ""}, 34 | Env: []corev1.EnvVar{ 35 | { 36 | Name: "BACKEND_SECRET_NAME", 37 | Value: utils.GetAWSSecretName(bkp), 38 | }, 39 | { 40 | Name: "BACKEND_SECRET_NAMESPACE", 41 | Value: utils.GetAwsSecretNamespace(bkp), 42 | }, 43 | { 44 | Name: "ENCRYPTION_SECRET_NAME", 45 | Value: utils.GetEncSecretName(bkp), 46 | }, 47 | { 48 | Name: "ENCRYPTION_SECRET_NAMESPACE", 49 | Value: utils.GetEncSecretNamespace(bkp), 50 | }, 51 | { 52 | Name: "COMPONENT_SECRET_NAME", 53 | Value: utils.DbSecretPrefix + bkp.Name, 54 | }, 55 | { 56 | Name: "COMPONENT_SECRET_NAMESPACE", 57 | Value: bkp.Namespace, 58 | }, 59 | { 60 | Name: "PRODUCT_NAME", 61 | Value: bkp.Spec.ProductName, 62 | }, 63 | }, 64 | }, 65 | }, 66 | RestartPolicy: corev1.RestartPolicyOnFailure, 67 | }, 68 | }, 69 | }, 70 | }, 71 | }, 72 | } 73 | controllerutil.SetControllerReference(bkp, cron, scheme) 74 | return cron 75 | } 76 | -------------------------------------------------------------------------------- /pkg/resource/deployments.go: -------------------------------------------------------------------------------- 1 | package resource 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 6 | appsv1 "k8s.io/api/apps/v1" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/api/resource" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 12 | ) 13 | 14 | //buildDBDeployment returns the deployment object for the Database 15 | func NewDatabaseDeployment(db *v1alpha1.Database, scheme *runtime.Scheme) *appsv1.Deployment { 16 | ls := utils.GetLabels(db.Name) 17 | auto := true 18 | replicas := db.Spec.Size 19 | dep := &appsv1.Deployment{ 20 | ObjectMeta: metav1.ObjectMeta{ 21 | Name: db.Name, 22 | Namespace: db.Namespace, 23 | Labels: ls, 24 | }, 25 | Spec: appsv1.DeploymentSpec{ 26 | Replicas: &replicas, 27 | Strategy: appsv1.DeploymentStrategy{ 28 | Type: appsv1.RecreateDeploymentStrategyType, 29 | }, 30 | Selector: &metav1.LabelSelector{ 31 | MatchLabels: ls, 32 | }, 33 | Template: corev1.PodTemplateSpec{ 34 | ObjectMeta: metav1.ObjectMeta{ 35 | Labels: ls, 36 | }, 37 | Spec: corev1.PodSpec{ 38 | Containers: []corev1.Container{{ 39 | Image: db.Spec.Image, 40 | Name: db.Spec.ContainerName, 41 | ImagePullPolicy: db.Spec.ContainerImagePullPolicy, 42 | Ports: []corev1.ContainerPort{{ 43 | ContainerPort: db.Spec.DatabasePort, 44 | Protocol: "TCP", 45 | }}, 46 | Env: []corev1.EnvVar{ 47 | utils.BuildDatabaseNameEnvVar(db), 48 | utils.BuildDatabaseUserEnvVar(db), 49 | utils.BuildDatabasePasswordEnvVar(db), 50 | { 51 | Name: "PGDATA", 52 | Value: "/var/lib/pgsql/data", 53 | }, 54 | }, 55 | VolumeMounts: []corev1.VolumeMount{ 56 | { 57 | Name: db.Name, 58 | MountPath: "/var/lib/pgsql/data", 59 | }, 60 | }, 61 | LivenessProbe: &corev1.Probe{ 62 | Handler: corev1.Handler{ 63 | Exec: &corev1.ExecAction{ 64 | Command: []string{ 65 | "/usr/libexec/check-container", 66 | "'--live'", 67 | }, 68 | }, 69 | }, 70 | FailureThreshold: 3, 71 | InitialDelaySeconds: 120, 72 | PeriodSeconds: 10, 73 | TimeoutSeconds: 10, 74 | SuccessThreshold: 1, 75 | }, 76 | ReadinessProbe: &corev1.Probe{ 77 | Handler: corev1.Handler{ 78 | Exec: &corev1.ExecAction{ 79 | Command: []string{ 80 | "/usr/libexec/check-container", 81 | }, 82 | }, 83 | }, 84 | FailureThreshold: 3, 85 | InitialDelaySeconds: 5, 86 | PeriodSeconds: 10, 87 | TimeoutSeconds: 1, 88 | SuccessThreshold: 1, 89 | }, 90 | Resources: corev1.ResourceRequirements{ 91 | Limits: corev1.ResourceList{ 92 | corev1.ResourceMemory: resource.MustParse(db.Spec.DatabaseMemoryLimit), 93 | corev1.ResourceCPU: resource.MustParse(db.Spec.DatabaseCpuLimit), 94 | }, 95 | Requests: corev1.ResourceList{ 96 | corev1.ResourceMemory: resource.MustParse(db.Spec.DatabaseMemoryRequest), 97 | corev1.ResourceCPU: resource.MustParse(db.Spec.DatabaseCpu), 98 | }, 99 | }, 100 | TerminationMessagePath: "/dev/termination-log", 101 | }}, 102 | DNSPolicy: corev1.DNSClusterFirst, 103 | RestartPolicy: corev1.RestartPolicyAlways, 104 | Volumes: []corev1.Volume{ 105 | { 106 | Name: db.Name, 107 | VolumeSource: corev1.VolumeSource{ 108 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 109 | ClaimName: db.Name, 110 | }, 111 | }, 112 | }, 113 | }, 114 | AutomountServiceAccountToken: &auto, 115 | }, 116 | }, 117 | }, 118 | } 119 | controllerutil.SetControllerReference(db, dep, scheme) 120 | return dep 121 | } 122 | -------------------------------------------------------------------------------- /pkg/resource/pvs.go: -------------------------------------------------------------------------------- 1 | package resource 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/api/resource" 8 | "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 11 | ) 12 | 13 | //Returns the deployment object for the Database 14 | func NewDatabasePvc(db *v1alpha1.Database, scheme *runtime.Scheme) *corev1.PersistentVolumeClaim { 15 | ls := utils.GetLabels(db.Name) 16 | pv := &corev1.PersistentVolumeClaim{ 17 | ObjectMeta: v1.ObjectMeta{ 18 | Name: db.Name, 19 | Namespace: db.Namespace, 20 | Labels: ls, 21 | }, 22 | Spec: corev1.PersistentVolumeClaimSpec{ 23 | AccessModes: []corev1.PersistentVolumeAccessMode{ 24 | corev1.ReadWriteOnce, 25 | }, 26 | Resources: corev1.ResourceRequirements{ 27 | Requests: corev1.ResourceList{ 28 | corev1.ResourceStorage: resource.MustParse(db.Spec.DatabaseStorageRequest), 29 | }, 30 | }, 31 | StorageClassName: &db.Spec.DatabaseStorageClassName, 32 | }, 33 | } 34 | controllerutil.SetControllerReference(db, pv, scheme) 35 | return pv 36 | } 37 | -------------------------------------------------------------------------------- /pkg/resource/secrets.go: -------------------------------------------------------------------------------- 1 | package resource 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "k8s.io/apimachinery/pkg/runtime" 9 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 10 | ) 11 | 12 | //Returns the buildDatabaseSecret object for the Database Backup 13 | func NewBackupSecret(bkp *v1alpha1.Backup, prefix string, secretData map[string][]byte, secretStringData map[string]string, scheme *runtime.Scheme) *corev1.Secret { 14 | ls := utils.GetLabels(bkp.Name) 15 | 16 | secret := &corev1.Secret{ 17 | ObjectMeta: v1.ObjectMeta{ 18 | Name: prefix + bkp.Name, 19 | Namespace: bkp.Namespace, 20 | Labels: ls, 21 | }, 22 | Data: secretData, 23 | Type: "Opaque", 24 | } 25 | 26 | if secretStringData != nil && len(secretStringData) > 0 { 27 | secret.StringData = secretStringData 28 | } 29 | 30 | controllerutil.SetControllerReference(bkp, secret, scheme) 31 | return secret 32 | } 33 | -------------------------------------------------------------------------------- /pkg/resource/services.go: -------------------------------------------------------------------------------- 1 | package resource 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 6 | "k8s.io/apimachinery/pkg/util/intstr" 7 | 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 12 | ) 13 | 14 | // Returns the service object for the Database 15 | func NewDatabaseService(db *v1alpha1.Database, scheme *runtime.Scheme) *corev1.Service { 16 | ls := utils.GetLabels(db.Name) 17 | ser := &corev1.Service{ 18 | ObjectMeta: metav1.ObjectMeta{ 19 | Name: db.Name, 20 | Namespace: db.Namespace, 21 | Labels: ls, 22 | }, 23 | Spec: corev1.ServiceSpec{ 24 | Selector: ls, 25 | Type: corev1.ServiceTypeClusterIP, 26 | Ports: []corev1.ServicePort{ 27 | { 28 | Name: db.Name, 29 | TargetPort: intstr.IntOrString{ 30 | Type: intstr.Int, 31 | IntVal: db.Spec.DatabasePort, 32 | }, 33 | Port: db.Spec.DatabasePort, 34 | Protocol: "TCP", 35 | }, 36 | }, 37 | }, 38 | } 39 | // Set Database db as the owner and controller 40 | controllerutil.SetControllerReference(db, ser, scheme) 41 | return ser 42 | } 43 | -------------------------------------------------------------------------------- /pkg/service/crs_fetches.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 6 | "k8s.io/apimachinery/pkg/types" 7 | "sigs.k8s.io/controller-runtime/pkg/client" 8 | ) 9 | 10 | // Request object not found, could have been deleted after reconcile request. 11 | // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. 12 | func FetchDatabaseCR(name, namespace string, client client.Client) (*v1alpha1.Database, error) { 13 | db := &v1alpha1.Database{} 14 | err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, db) 15 | return db, err 16 | } 17 | 18 | func FetchBackupCR(name, namespace string, client client.Client) (*v1alpha1.Backup, error) { 19 | bkp := &v1alpha1.Backup{} 20 | err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, bkp) 21 | return bkp, err 22 | } 23 | -------------------------------------------------------------------------------- /pkg/service/generic_fetches.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | appsv1 "k8s.io/api/apps/v1" 6 | "k8s.io/api/batch/v1beta1" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/types" 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | //FetchService returns the Service resource with the name in the namespace 13 | func FetchService(name, namespace string, client client.Client) (*corev1.Service, error) { 14 | service := &corev1.Service{} 15 | err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, service) 16 | return service, err 17 | } 18 | 19 | //FetchService returns the Deployment resource with the name in the namespace 20 | func FetchDeployment(name, namespace string, client client.Client) (*appsv1.Deployment, error) { 21 | deployment := &appsv1.Deployment{} 22 | err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, deployment) 23 | return deployment, err 24 | } 25 | 26 | //FetchPersistentVolumeClaim returns the PersistentVolumeClaim resource with the name in the namespace 27 | func FetchPersistentVolumeClaim(name, namespace string, client client.Client) (*corev1.PersistentVolumeClaim, error) { 28 | pvc := &corev1.PersistentVolumeClaim{} 29 | err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, pvc) 30 | return pvc, err 31 | } 32 | 33 | //FetchCronJob returns the CronJob resource with the name in the namespace 34 | func FetchCronJob(name, namespace string, client client.Client) (*v1beta1.CronJob, error) { 35 | cronJob := &v1beta1.CronJob{} 36 | err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, cronJob) 37 | return cronJob, err 38 | } 39 | 40 | //FetchSecret returns the Secret resource with the name in the namespace 41 | func FetchSecret(namespace, name string, client client.Client) (*corev1.Secret, error) { 42 | secret := &corev1.Secret{} 43 | err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, secret) 44 | return secret, err 45 | } 46 | 47 | //FetchSecret returns the ConfigMap resource with the name in the namespace 48 | func FetchConfigMap(name, namespace string, client client.Client) (*corev1.ConfigMap, error) { 49 | cfg := &corev1.ConfigMap{} 50 | err := client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, cfg) 51 | return cfg, err 52 | } 53 | -------------------------------------------------------------------------------- /pkg/service/postgresql_fetches.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/labels" 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | // FetchDatabasePod search in the cluster for 1 Pod managed by the Database Controller 13 | func FetchDatabasePod(bkp *v1alpha1.Backup, db *v1alpha1.Database, client client.Client) (*corev1.Pod, error) { 14 | listOps := buildDatabaseCriteria(bkp, db) 15 | dbPodList := &corev1.PodList{} 16 | err := client.List(context.TODO(), dbPodList, listOps) 17 | if err != nil { 18 | return nil, err 19 | } 20 | 21 | if len(dbPodList.Items) == 0 { 22 | return nil, err 23 | } 24 | 25 | pod := dbPodList.Items[0] 26 | return &pod, nil 27 | } 28 | 29 | //FetchDatabaseService search in the cluster for 1 Service managed by the Database Controller 30 | func FetchDatabaseService(bkp *v1alpha1.Backup, db *v1alpha1.Database, client client.Client) (*corev1.Service, error) { 31 | listOps := buildDatabaseCriteria(bkp, db) 32 | dbServiceList := &corev1.ServiceList{} 33 | err := client.List(context.TODO(), dbServiceList, listOps) 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | if len(dbServiceList.Items) == 0 { 39 | return nil, err 40 | } 41 | 42 | srv := dbServiceList.Items[0] 43 | return &srv, nil 44 | } 45 | 46 | //buildDatabaseCreteria returns client.ListOptions required to fetch the secondary resource created by 47 | func buildDatabaseCriteria(bkp *v1alpha1.Backup, db *v1alpha1.Database) *client.ListOptions { 48 | labelSelector := labels.SelectorFromSet(utils.GetLabels(db.Name)) 49 | listOps := &client.ListOptions{Namespace: db.Namespace, LabelSelector: labelSelector} 50 | return listOps 51 | } 52 | -------------------------------------------------------------------------------- /pkg/service/watches.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/runtime" 5 | "sigs.k8s.io/controller-runtime/pkg/controller" 6 | "sigs.k8s.io/controller-runtime/pkg/handler" 7 | "sigs.k8s.io/controller-runtime/pkg/source" 8 | ) 9 | 10 | //Watch for changes to secondary resource and create the owner Backup 11 | 12 | func Watch(c controller.Controller, obj runtime.Object, isConttroller bool, owner runtime.Object) error { 13 | err := c.Watch(&source.Kind{Type: obj}, &handler.EnqueueRequestForOwner{ 14 | IsController: isConttroller, 15 | OwnerType: owner, 16 | }) 17 | return err 18 | } 19 | -------------------------------------------------------------------------------- /pkg/utils/backup_mandatory_specs.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/config" 6 | ) 7 | 8 | var defaultBackupConfig = config.NewDefaultBackupConfig() 9 | 10 | // AddBackupMandatorySpecs will add the specs which are mandatory for Backup CR in the case them 11 | // not be applied 12 | func AddBackupMandatorySpecs(bkp *v1alpha1.Backup) { 13 | 14 | /* 15 | Backup Container 16 | --------------------- 17 | See https://github.com/integr8ly/backup-container-image 18 | */ 19 | 20 | if bkp.Spec.Schedule == "" { 21 | bkp.Spec.Schedule = defaultBackupConfig.Schedule 22 | } 23 | 24 | if bkp.Spec.DatabaseCRName == "" { 25 | bkp.Spec.DatabaseCRName = defaultBackupConfig.DatabaseCRName 26 | } 27 | 28 | if bkp.Spec.Image == "" { 29 | bkp.Spec.Image = defaultBackupConfig.Image 30 | } 31 | 32 | if bkp.Spec.DatabaseVersion == "" { 33 | bkp.Spec.DatabaseVersion = defaultBackupConfig.DatabaseVersion 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /pkg/utils/constants.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | const ( 4 | AwsSecretPrefix = "aws-" 5 | DbSecretPrefix = "db-" 6 | EncSecretPrefix = "encryption-" 7 | BackupControllerName = "controller_backup" 8 | DatabaseControllerName = "controller_database" 9 | ) 10 | -------------------------------------------------------------------------------- /pkg/utils/database_envvars.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | corev1 "k8s.io/api/core/v1" 6 | ) 7 | 8 | //BuildDatabaseNameEnvVar return the corev1.EnvVar object wth the key:value for the database name 9 | func BuildDatabaseNameEnvVar(db *v1alpha1.Database) corev1.EnvVar { 10 | if len(db.Spec.ConfigMapName) > 0 { 11 | return corev1.EnvVar{ 12 | Name: db.Spec.DatabaseNameKeyEnvVar, 13 | ValueFrom: &corev1.EnvVarSource{ 14 | ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ 15 | LocalObjectReference: corev1.LocalObjectReference{ 16 | Name: db.Spec.ConfigMapName, 17 | }, 18 | Key: GetEnvVarKey(db.Spec.ConfigMapDatabaseNameKey, db.Spec.DatabaseNameKeyEnvVar), 19 | }, 20 | }, 21 | } 22 | } 23 | 24 | return corev1.EnvVar{ 25 | Name: db.Spec.DatabaseNameKeyEnvVar, 26 | Value: db.Spec.DatabaseName, 27 | } 28 | } 29 | 30 | //BuildDatabaseUserEnvVar return the corev1.EnvVar object wth the key:value for the database user 31 | func BuildDatabaseUserEnvVar(db *v1alpha1.Database) corev1.EnvVar { 32 | if len(db.Spec.ConfigMapName) > 0 { 33 | return corev1.EnvVar{ 34 | Name: db.Spec.DatabaseUserKeyEnvVar, 35 | ValueFrom: &corev1.EnvVarSource{ 36 | ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ 37 | LocalObjectReference: corev1.LocalObjectReference{ 38 | Name: db.Spec.ConfigMapName, 39 | }, 40 | Key: GetEnvVarKey(db.Spec.ConfigMapDatabaseUserKey, db.Spec.DatabaseUserKeyEnvVar), 41 | }, 42 | }, 43 | } 44 | } 45 | 46 | return corev1.EnvVar{ 47 | Name: db.Spec.DatabaseUserKeyEnvVar, 48 | Value: db.Spec.DatabaseUser, 49 | } 50 | } 51 | 52 | //BuildDatabasePasswordEnvVar return the corev1.EnvVar object wth the key:value for the database pwd 53 | func BuildDatabasePasswordEnvVar(db *v1alpha1.Database) corev1.EnvVar { 54 | if len(db.Spec.ConfigMapName) > 0 { 55 | return corev1.EnvVar{ 56 | Name: db.Spec.DatabasePasswordKeyEnvVar, 57 | ValueFrom: &corev1.EnvVarSource{ 58 | ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ 59 | LocalObjectReference: corev1.LocalObjectReference{ 60 | Name: db.Spec.ConfigMapName, 61 | }, 62 | Key: GetEnvVarKey(db.Spec.ConfigMapDatabasePasswordKey, db.Spec.DatabasePasswordKeyEnvVar), 63 | }, 64 | }, 65 | } 66 | } 67 | 68 | return corev1.EnvVar{ 69 | Name: db.Spec.DatabasePasswordKeyEnvVar, 70 | Value: db.Spec.DatabasePassword, 71 | } 72 | } 73 | 74 | //GetEnvVarKey check if the customized key is in place for the configMap and returned the valid key 75 | func GetEnvVarKey(cgfKey, defaultKey string) string { 76 | if len(cgfKey) > 0 { 77 | return cgfKey 78 | } 79 | return defaultKey 80 | } 81 | -------------------------------------------------------------------------------- /pkg/utils/database_mandatory_specs.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/dev4devs-com/postgresql-operator/pkg/config" 6 | ) 7 | 8 | var defaulDatabaseConfig = config.NewDatabaseConfig() 9 | 10 | // AddDatabaseMandatorySpecs will add the specs which are mandatory for Database CR in the case them 11 | // not be applied 12 | func AddDatabaseMandatorySpecs(db *v1alpha1.Database) { 13 | 14 | /* 15 | CR DB Resource 16 | --------------------- 17 | */ 18 | 19 | if db.Spec.Size == 0 { 20 | db.Spec.Size = defaulDatabaseConfig.Size 21 | } 22 | 23 | /* 24 | Environment Variables 25 | --------------------- 26 | The following values are used to create the ConfigMap and the Environment Variables which will use these values 27 | */ 28 | 29 | if db.Spec.DatabaseName == "" { 30 | db.Spec.DatabaseName = defaulDatabaseConfig.DatabaseName 31 | } 32 | 33 | if db.Spec.DatabasePassword == "" { 34 | db.Spec.DatabasePassword = defaulDatabaseConfig.DatabasePassword 35 | } 36 | 37 | if db.Spec.DatabaseUser == "" { 38 | db.Spec.DatabaseUser = defaulDatabaseConfig.DatabaseUser 39 | } 40 | 41 | /* 42 | Database Container 43 | --------------------------------- 44 | */ 45 | 46 | //Following are the values which will be used as the key label for the environment variable of the database image. 47 | if db.Spec.DatabaseNameKeyEnvVar == "" { 48 | db.Spec.DatabaseNameKeyEnvVar = defaulDatabaseConfig.DatabaseNameKeyEnvVar 49 | } 50 | 51 | if db.Spec.DatabasePasswordKeyEnvVar == "" { 52 | db.Spec.DatabasePasswordKeyEnvVar = defaulDatabaseConfig.DatabasePasswordKeyEnvVar 53 | } 54 | 55 | if db.Spec.DatabaseUserKeyEnvVar == "" { 56 | db.Spec.DatabaseUserKeyEnvVar = defaulDatabaseConfig.DatabaseUserKeyEnvVar 57 | } 58 | 59 | if db.Spec.Image == "" { 60 | db.Spec.Image = defaulDatabaseConfig.Image 61 | } 62 | 63 | if db.Spec.ContainerName == "" { 64 | db.Spec.ContainerName = defaulDatabaseConfig.ContainerName 65 | } 66 | 67 | if db.Spec.DatabaseMemoryLimit == "" { 68 | db.Spec.DatabaseMemoryLimit = defaulDatabaseConfig.DatabaseMemoryLimit 69 | } 70 | 71 | if db.Spec.DatabaseMemoryRequest == "" { 72 | db.Spec.DatabaseMemoryRequest = defaulDatabaseConfig.DatabaseMemoryRequest 73 | } 74 | 75 | if db.Spec.DatabaseStorageRequest == "" { 76 | db.Spec.DatabaseStorageRequest = defaulDatabaseConfig.DatabaseStorageRequest 77 | } 78 | 79 | if db.Spec.DatabaseCpu == "" { 80 | db.Spec.DatabaseCpu = defaulDatabaseConfig.DatabaseCpu 81 | } 82 | 83 | if db.Spec.DatabaseCpuLimit == "" { 84 | db.Spec.DatabaseCpuLimit = defaulDatabaseConfig.DatabaseCpuLimit 85 | } 86 | 87 | if db.Spec.DatabasePort == 0 { 88 | db.Spec.DatabasePort = defaulDatabaseConfig.DatabasePort 89 | } 90 | 91 | if len(db.Spec.DatabaseStorageClassName) < 1 { 92 | db.Spec.DatabaseStorageClassName = defaulDatabaseConfig.DatabaseStorageClassName 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /pkg/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 5 | "github.com/go-logr/logr" 6 | logf "sigs.k8s.io/controller-runtime/pkg/log" 7 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 8 | ) 9 | 10 | func GetLabels(name string) map[string]string { 11 | return map[string]string{"owner": "postgresqloperator", "cr": name} 12 | } 13 | 14 | // GetAWSSecretName returns the name of the secret 15 | // NOTE: The user can just inform the name and namespace of the Secret which is already applied in the cluster OR 16 | // the data required for the operator be able to create one in the same namespace where the backup is applied 17 | func GetAWSSecretName(bkp *v1alpha1.Backup) string { 18 | if IsAwsKeySetupByName(bkp) { 19 | return bkp.Spec.AwsSecretName 20 | } 21 | return AwsSecretPrefix + bkp.Name 22 | } 23 | 24 | // GetAwsSecretNamespace returns the namespace where the secret is applied already 25 | // NOTE: The user can just inform the name and namespace of the Secret which is already applied in the cluster OR 26 | // the data required for the operator be able to create one in the same namespace where the backup is applied 27 | func GetAwsSecretNamespace(bkp *v1alpha1.Backup) string { 28 | if IsAwsKeySetupByName(bkp) && bkp.Spec.AwsSecretNamespace != "" { 29 | return bkp.Spec.AwsSecretNamespace 30 | } 31 | return bkp.Namespace 32 | } 33 | 34 | // GetEncSecretNamespace returns the namespace where the secret is applied already 35 | // NOTE: The user can just inform the name and namespace of the Secret which is already applied in the cluster OR 36 | // the data required for the operator be able to create one in the same namespace where the backup is applied 37 | func GetEncSecretNamespace(bkp *v1alpha1.Backup) string { 38 | if IsEncKeySetupByNameAndNamespace(bkp) { 39 | return bkp.Spec.EncryptKeySecretNamespace 40 | } 41 | return bkp.Namespace 42 | } 43 | 44 | // GetEncSecretName returns the name of the secret 45 | // NOTE: The user can just inform the name and namespace of the Secret which is already applied in the cluster OR 46 | // the data required for the operator be able to create one in the same namespace where the backup is applied 47 | func GetEncSecretName(bkp *v1alpha1.Backup) string { 48 | if IsEncKeySetupByName(bkp) { 49 | return bkp.Spec.EncryptKeySecretName 50 | } 51 | return EncSecretPrefix + bkp.Name 52 | } 53 | 54 | // IsEncryptionKeyOptionConfig returns true when the CR has the configuration to allow it be used 55 | func IsEncryptionKeyOptionConfig(bkp *v1alpha1.Backup) bool { 56 | return bkp.Spec.AwsSecretName != "" || 57 | (bkp.Spec.GpgTrustModel != "" && bkp.Spec.GpgEmail != "" && bkp.Spec.GpgPublicKey != "") 58 | } 59 | 60 | // IsEncKeySetupByName returns true when it is setup to get an pre-existing secret applied in the cluster. 61 | // NOTE: The user can just inform the name of the Secret which is already applied in the cluster OR 62 | // the data required for the operator be able to create one 63 | func IsEncKeySetupByName(bkp *v1alpha1.Backup) bool { 64 | return bkp.Spec.EncryptKeySecretName != "" 65 | } 66 | 67 | // IsAwsKeySetupByName returns true when it is setup to get an pre-existing secret applied in the cluster. 68 | // NOTE: The user can just inform the name of the Secret which is already applied in the cluster OR 69 | // the data required for the operator be able to create one 70 | func IsAwsKeySetupByName(bkp *v1alpha1.Backup) bool { 71 | return bkp.Spec.AwsSecretName != "" 72 | } 73 | 74 | // IsEncKeySetupByNameAndNamespace it will return true when the Enc Key is setup by using an preexisting 75 | // secret applied in the cluster. 76 | func IsEncKeySetupByNameAndNamespace(bkp *v1alpha1.Backup) bool { 77 | return IsEncKeySetupByName(bkp) && bkp.Spec.EncryptKeySecretNamespace != "" 78 | } 79 | 80 | func GetLoggerByRequestAndController(request reconcile.Request, controllerName string) logr.Logger { 81 | var log = logf.Log.WithName(controllerName) 82 | return log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) 83 | } 84 | -------------------------------------------------------------------------------- /scripts/export_local_envvars.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export OPERATOR_NAME=postgresql-operator 4 | export WATCH_NAMESPACE=postgresql-operator 5 | -------------------------------------------------------------------------------- /test/e2e/main_test.go: -------------------------------------------------------------------------------- 1 | package e2e 2 | 3 | import ( 4 | "github.com/operator-framework/operator-sdk/pkg/test" 5 | "testing" 6 | ) 7 | 8 | func TestMain(m *testing.M) { 9 | test.MainEntry(m) 10 | } 11 | -------------------------------------------------------------------------------- /test/e2e/oper_test.go: -------------------------------------------------------------------------------- 1 | package e2e 2 | 3 | import ( 4 | goctx "context" 5 | "fmt" 6 | "github.com/dev4devs-com/postgresql-operator/pkg/apis" 7 | "github.com/dev4devs-com/postgresql-operator/pkg/apis/postgresql/v1alpha1" 8 | "github.com/dev4devs-com/postgresql-operator/pkg/service" 9 | "github.com/dev4devs-com/postgresql-operator/pkg/utils" 10 | "k8s.io/apimachinery/pkg/types" 11 | "k8s.io/apimachinery/pkg/util/wait" 12 | 13 | framework "github.com/operator-framework/operator-sdk/pkg/test" 14 | "github.com/operator-framework/operator-sdk/pkg/test/e2eutil" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | "testing" 17 | "time" 18 | ) 19 | 20 | var ( 21 | retryInterval = time.Second * 30 22 | timeout = time.Second * 120 23 | cleanupRetryInterval = time.Second * 4 24 | cleanupTimeout = time.Second * 60 25 | ) 26 | 27 | func TestDatabase(t *testing.T) { 28 | databaseList := &v1alpha1.DatabaseList{} 29 | err := framework.AddToFrameworkScheme(apis.AddToScheme, databaseList) 30 | if err != nil { 31 | t.Fatalf("failed to add custom resource scheme to framework: %v", err) 32 | } 33 | 34 | backupList := &v1alpha1.BackupList{} 35 | err = framework.AddToFrameworkScheme(apis.AddToScheme, backupList) 36 | if err != nil { 37 | t.Fatalf("failed to add custom resource scheme to framework: %v", err) 38 | } 39 | 40 | t.Run("FullTest", FullTest) 41 | } 42 | 43 | func FullTest(t *testing.T) { 44 | ctx := framework.NewTestCtx(t) 45 | defer ctx.Cleanup() 46 | err := ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval}) 47 | if err != nil { 48 | t.Fatalf("failed to initialize cluster resource: %v", err) 49 | } 50 | t.Log("Initialized cluster resource") 51 | namespace, err := ctx.GetNamespace() 52 | if err != nil { 53 | t.Fatal(err) 54 | } 55 | // get global framework variables 56 | f := framework.Global 57 | 58 | // wait for postgresql-operator to be ready 59 | err = e2eutil.WaitForOperatorDeployment(t, f.KubeClient, namespace, "postgresql-operator", 1, retryInterval, timeout) 60 | if err != nil { 61 | t.Fatal(err) 62 | } 63 | 64 | // create database custom resource 65 | db := &v1alpha1.Database{ 66 | ObjectMeta: metav1.ObjectMeta{ 67 | Name: "database", 68 | Namespace: namespace, 69 | }, 70 | } 71 | 72 | t.Log("Add database mandatory specs") 73 | utils.AddDatabaseMandatorySpecs(db) 74 | 75 | // use TestCtx's create helper to create the object and add a cleanup function for the new object 76 | err = f.Client.Create(goctx.TODO(), db, &framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval}) 77 | if err != nil { 78 | t.Fatal(err) 79 | } 80 | 81 | t.Log("wait for database to reach 1 replica") 82 | err = e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "database", 1, retryInterval, timeout) 83 | if err != nil { 84 | t.Fatal(err) 85 | } 86 | 87 | err = f.Client.Get(goctx.TODO(), types.NamespacedName{Name: "database", Namespace: namespace}, db) 88 | if err != nil { 89 | t.Fatal(err) 90 | } 91 | 92 | t.Log("wait for database status == OK") 93 | err = wait.Poll(retryInterval, timeout, func() (done bool, err error) { 94 | cr, err := service.FetchDatabaseCR(db.Name, db.Namespace, f.Client.Client) 95 | if err != nil { 96 | return false, err 97 | } 98 | 99 | if cr.Status.DatabaseStatus == "OK" { 100 | return true, nil 101 | } 102 | return false, nil 103 | }) 104 | if err != nil { 105 | t.Fatal(fmt.Errorf("could not get Database Status == OK: %v", err)) 106 | } 107 | 108 | // create database custom resource 109 | bkp := &v1alpha1.Backup{ 110 | ObjectMeta: metav1.ObjectMeta{ 111 | Name: "backup", 112 | Namespace: namespace, 113 | }, 114 | } 115 | 116 | t.Log("Add bkp mandatory specs") 117 | utils.AddBackupMandatorySpecs(bkp) 118 | 119 | // use TestCtx's create helper to create the object and add a cleanup function for the new object 120 | err = f.Client.Create(goctx.TODO(), bkp, &framework.CleanupOptions{TestContext: ctx, Timeout: cleanupTimeout, RetryInterval: cleanupRetryInterval}) 121 | if err != nil { 122 | t.Fatal(err) 123 | } 124 | 125 | t.Log("wait for backup status == OK") 126 | err = wait.Poll(retryInterval, timeout, func() (done bool, err error) { 127 | cr, err := service.FetchBackupCR(bkp.Name, bkp.Namespace, f.Client.Client) 128 | if err != nil { 129 | return false, err 130 | } 131 | 132 | if cr.Status.BackupStatus == "OK" { 133 | return true, nil 134 | } 135 | return false, nil 136 | }) 137 | if err != nil { 138 | t.Fatal(fmt.Errorf("could not get Backup Status == OK: %v", err)) 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | // Place any runtime dependencies as imports in this file. 4 | // Go modules will be forced to download and install them. 5 | package tools 6 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | var ( 4 | Version = "0.2.0" 5 | ) 6 | --------------------------------------------------------------------------------