├── .github └── workflows │ ├── release.yml │ ├── snapshot.yml │ ├── website.yml │ └── website_snapshot.yml ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── apperr ├── error.go ├── error_test.go ├── kind.go ├── level.go └── op.go ├── cmd ├── destination.go ├── origin.go ├── root.go ├── root_test.go ├── util.go └── util_test.go ├── configs ├── dest.v1.json ├── dest.v2.json ├── dest.v3.json ├── dest_loop.v2.json └── origin.json ├── consul ├── check.go ├── check_test.go └── consul.go ├── go.mod ├── go.sum ├── goreleaser.yml ├── main.go ├── scripts ├── destinationV_config.json ├── local_bootstrap.sh └── originV_config.json ├── syncer ├── destination.go ├── info.go ├── info_test.go ├── origin.go └── syncer.go ├── transformer ├── named_regex_test.go ├── named_regexp.go ├── nil.go ├── transformer.go └── transformer_test.go ├── vault ├── check.go └── vault.go └── website ├── .gitignore ├── docs ├── contribution │ ├── build.md │ ├── ci.md │ └── goodparts.md ├── deploy │ └── options.md ├── faq │ ├── deployment.md │ ├── failures.md │ └── general.md ├── getstarted │ ├── config.md │ ├── install.md │ └── why.md └── internals │ ├── destination.md │ ├── keywords.md │ └── origin.md ├── docusaurus.config.js ├── package-lock.json ├── package.json ├── sidebars.js ├── src ├── css │ └── custom.css └── pages │ ├── index.js │ └── styles.module.css ├── static └── img │ ├── favicon.ico │ ├── icon.png │ ├── vsync_text_animation.gif │ └── vsync_text_morelight_lowres.png └── yarn.lock /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | push: 5 | tags: "**" 6 | 7 | jobs: 8 | build_test_release: 9 | runs-on: ubuntu-latest 10 | services: 11 | source_consul: 12 | image: consul 13 | ports: 14 | - 6500:8500 15 | destination_consul: 16 | image: consul 17 | ports: 18 | - 7500:8500 19 | source_vault: 20 | image: vault 21 | ports: 22 | - 6200:8200 23 | destination_vault: 24 | image: vault 25 | ports: 26 | - 7200:8200 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@v2 30 | with: 31 | fetch-depth: 0 32 | - name: Set up Go 33 | uses: actions/setup-go@v1 34 | with: 35 | go-version: 1.13.x 36 | - name: Build vars 37 | id: build_vars 38 | run: | 39 | echo ::set-output name=build_commit::${{ github.sha }} 40 | echo ::set-output name=build_time::$(date -u +"%Y-%m-%dT%H:%M:%S") 41 | echo ::set-output name=build_version::$(git describe --tags 2>/dev/null || echo "v0.0.0") 42 | - name: Build 43 | run: | 44 | go mod download 45 | go generate ./... 46 | go build -o vsync -ldflags="-X github.com/ExpediaGroup/vsync/cmd.buildCommit=${{ steps.build_vars.outputs.build_commit }} -X github.com/ExpediaGroup/vsync/cmd.buildTime=${{ steps.build_vars.outputs.build_time }} -X github.com/ExpediaGroup/vsync/cmd.buildVersion=${{ steps.build_vars.outputs.build_version }}" 47 | - name: Unit tests 48 | run: | 49 | go test ./... 50 | - name: Integration tests 51 | run: | 52 | echo hi 53 | echo ${{ github.ref }} 54 | curl http://127.0.0.1:7200/v1/sys/seal-status 55 | - name: Release using goreleaser 56 | uses: goreleaser/goreleaser-action@v1 57 | with: 58 | version: latest 59 | args: release --rm-dist 60 | env: 61 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 62 | BUILD_COMMIT: ${{ steps.build_vars.outputs.build_commit }} 63 | BUILD_TIME: ${{ steps.build_vars.outputs.build_time }} 64 | BUILD_VERSION: ${{ steps.build_vars.outputs.build_version }} 65 | -------------------------------------------------------------------------------- /.github/workflows/snapshot.yml: -------------------------------------------------------------------------------- 1 | name: snapshot 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: "**" 7 | 8 | jobs: 9 | build_test_snapshot: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout 13 | uses: actions/checkout@v2 14 | with: 15 | fetch-depth: 0 16 | - name: Set up Go 17 | uses: actions/setup-go@v1 18 | with: 19 | go-version: 1.13.x 20 | - name: Build vars 21 | id: build_vars 22 | run: | 23 | echo ::set-output name=build_commit::${{ github.sha }} 24 | echo ::set-output name=build_time::$(date -u +"%Y-%m-%dT%H:%M:%S") 25 | echo ::set-output name=build_version::$(git describe --tags 2>/dev/null || echo "v0.0.0") 26 | - name: Build 27 | run: | 28 | go mod download 29 | go generate ./... 30 | go build -o vsync -ldflags="-X github.com/ExpediaGroup/vsync/cmd.buildCommit=${{ steps.build_vars.outputs.build_commit }} -X github.com/ExpediaGroup/vsync/cmd.buildTime=${{ steps.build_vars.outputs.build_time }} -X github.com/ExpediaGroup/vsync/cmd.buildVersion=${{ steps.build_vars.outputs.build_version }}" 31 | - name: Unit tests 32 | run: | 33 | ./vsync --version 34 | go test ./... 35 | - name: Snapshot using goreleaser 36 | uses: goreleaser/goreleaser-action@v1 37 | with: 38 | version: latest 39 | args: release --snapshot --rm-dist 40 | env: 41 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 42 | BUILD_COMMIT: ${{ steps.build_vars.outputs.build_commit }} 43 | BUILD_TIME: ${{ steps.build_vars.outputs.build_time }} 44 | BUILD_VERSION: ${{ steps.build_vars.outputs.build_version }} 45 | -------------------------------------------------------------------------------- /.github/workflows/website.yml: -------------------------------------------------------------------------------- 1 | name: website 2 | 3 | on: 4 | push: 5 | branches: "master" 6 | paths: 7 | - ".github/workflows/website.yml" 8 | - "docs/**" 9 | - "website/**" 10 | 11 | jobs: 12 | publish_website: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | - name: Use Node.js 12 18 | uses: actions/setup-node@v1 19 | with: 20 | node-version: 12 21 | - name: Build 22 | run: | 23 | cd website 24 | npm install 25 | npm run build 26 | - name: Deploy 27 | uses: peaceiris/actions-gh-pages@v3 28 | with: 29 | github_token: ${{ secrets.GITHUB_TOKEN }} 30 | publish_dir: ./website/build 31 | -------------------------------------------------------------------------------- /.github/workflows/website_snapshot.yml: -------------------------------------------------------------------------------- 1 | name: website_snapshot 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches-ignore: 7 | - "master" 8 | paths: 9 | - ".github/workflows/website.yml" 10 | - "docs/**" 11 | - "website/**" 12 | 13 | jobs: 14 | publish_website: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v2 19 | - name: Use Node.js 12 20 | uses: actions/setup-node@v1 21 | with: 22 | node-version: 12 23 | - name: Build 24 | run: | 25 | cd website 26 | npm install 27 | npm run build 28 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # ide 2 | .vscode/ 3 | .idea/ 4 | .DS_Store 5 | 6 | # binaries 7 | vsync 8 | debug 9 | dist/ 10 | 11 | # quality 12 | .tmp_* 13 | .quality/ 14 | *.pprof 15 | *.out 16 | work/ -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## v0.3.0 - Dec 15 2021 8 | ### Add 9 | 10 | - adding `ignoreDeletes` boolean flag for stopping vsync destination from deleting all secrets at once and then recreating them, some times its scary even though its a soft delete ( deleting latest version ) 11 | - This issue is caused when vsync origin uploads empty sync info in case there is some issue with origin vault. 12 | - Once a delete is ignored, its also not stored in destination sync info so that we can pull up the changes easily between origin and destination 13 | 14 | ## v0.2.1 - Aug 16 2021 15 | ### Updated 16 | 17 | - A cleaner way we check permissions of vault token on a path by using sys/capabilities-self api call to get a list of capabilities 18 | 19 | 20 | ## v0.2.0 - May 27 2021 21 | ### Updated [BREAKING CHANGE] 22 | 23 | - Sync Path were initially on the config root, its moved to origin and destination separately so that we can have different paths to store sync data even with same consul 24 | - `syncPath` -> `origin.syncPath` & `destination.syncPath` 25 | - Variable `origin.dc` and `destination.dc` was ambiguous, so moved to `origin.consul.dc` & `destination.consul.dc` 26 | 27 | ## v0.1.1 - Mar 30 2021 28 | ### Added 29 | - Approle support for getting vault token. 30 | 31 | ## v0.1.0 - Jan 25 2021 32 | ### Removed [BREAKING CHANGE] 33 | - Assumption that first word in datapath is the mount like `secret/` but not the case in real world. 34 | - Removed functions in vault.path.go and corresponding tests as they were having assumption about first word as mount. 35 | ### Added [FIX for BREAKING CHANGE] 36 | - `mounts` key in origin and destination configs, its a list of mounts which needs to be synced. Take a look at [config](./website/docs/getstarted/config.md) and [examples](./configs/origin.json) to get an idea 37 | 38 | ## v0.0.1 - Feb 18 2020 39 | ### Added 40 | - Initial release. 41 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | education, socio-economic status, nationality, personal appearance, race, 10 | religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting a member of project team [https://github.com/orgs/ExpediaGroup/teams/vsync-committers]. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution 2 | 3 | First of all, thank you for trying to contribute. We felt it would be easy for the community for housekeeping purposes to use a process. 4 | 5 | Use github pull requests for code merge activities. 6 | 7 | Use github issues for raising bugs and feature requests -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vsync 2 | 3 | vsync, an easy, efficient way to sync credentials across from one origin to multiple destinations 4 | 5 | Developers might have their apps in multiple datacenters, each having its own vault. Its difficult for developers to update secrets in each datacenter for their apps to pickup updated secrets like database passwords. Instead we can have single origin vault, where developer will update and we can replicate the secrets to other vaults. This is where vsync fits in. 6 | 7 | * Parallel workers to finish the job faster 8 | * No need of cron jobs to trigger syncing 9 | Cleanly closes the cycles 10 | * Exposes telemetry data (OpenTelemetry integration in future) 11 | * Clean vault audit log, as it uses only kv metadata for comparison and they are not clogged because of secret distribution 12 | * Transform the path between origin and destination while syncing eg: secret/data/runner/stage/app1 => runnerv2/data/stage/app1/secrets without impacting apps / users 13 | * Loopback to have origin and destination in the same vault 14 | * Meta sync information is stored in consul 15 | 16 | Website: https://expediagroup.github.io/vsync 17 | 18 | [Release](https://github.com/ExpediaGroup/vsync/releases) 19 | 20 | [Changelog](./CHANGELOG.md) -------------------------------------------------------------------------------- /apperr/error.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package apperr 16 | 17 | import ( 18 | "fmt" 19 | "log" 20 | ) 21 | 22 | // a := apperr.New(fmt.Sprintf("transformer name %q", mpaasV2Name), err, op, apperr.Fatal) 23 | // return p, apperr.New(fmt.Sprintf("wrapper %q", mpaasV2Name), err, op, apperr.Fatal, ErrInitialize) 24 | 25 | var Seperator = "\n\t" 26 | 27 | type Error struct { 28 | Op Op 29 | Level Level 30 | Kind error 31 | Context string 32 | Root error 33 | //Stack 34 | } 35 | 36 | // There is going to be a Format method in future (Go 2) 37 | 38 | // TODO: may be we can use errStr to cache the subErr instead of calling Error func recursively 39 | func (e *Error) Error() string { 40 | if e == nil { 41 | return "" 42 | } 43 | 44 | //return fmt.Sprintf("op - %s\n kind - %s\n context - %s\n root - %v\n", e.Op, e.Kind, e.Context, e.RootCause) 45 | return fmt.Sprintf("%s: %s %v %v", e.Op, e.Context, Seperator, e.Root) 46 | } 47 | 48 | func (e *Error) Unwrap() error { 49 | if e == nil { 50 | return nil 51 | } 52 | 53 | return e.Root 54 | } 55 | 56 | func (e *Error) Is(target error) bool { 57 | if e.Kind == target { 58 | return true 59 | } 60 | 61 | err, ok := target.(*Error) 62 | if !ok { 63 | return false 64 | } 65 | 66 | return e.Kind == err.Kind 67 | } 68 | 69 | func New(context string, root error, args ...interface{}) error { 70 | e := &Error{ 71 | Context: context, 72 | Root: root, 73 | } 74 | for _, arg := range args { 75 | switch arg := arg.(type) { 76 | case Op: 77 | e.Op = arg 78 | case Level: 79 | e.Level = arg 80 | case Kind: 81 | e.Kind = arg 82 | default: 83 | log.Panicf("unhandled arg to E type: %T, value: %v", arg, arg) 84 | } 85 | } 86 | 87 | // to mark root as kind 88 | // useful for matching errors.Is() which are thrown by stdlib and other libraries 89 | if e.Kind == nil && root != nil { 90 | e.Kind = root 91 | } 92 | 93 | return e 94 | } 95 | 96 | // TODO: may be this needs to be in for loop instead of recursion 97 | func Ops(e error) []Op { 98 | if e == nil { 99 | return []Op{} 100 | } 101 | 102 | err, ok := e.(*Error) 103 | if !ok { 104 | return []Op{} 105 | } 106 | 107 | ops := []Op{ 108 | err.Op, 109 | } 110 | 111 | ops = append(ops, Ops(err.Root)...) 112 | 113 | return ops 114 | } 115 | 116 | func ShouldPanic(e error) bool { 117 | if e == nil { 118 | return false 119 | } 120 | 121 | err, ok := e.(*Error) 122 | if !ok { 123 | return false 124 | } 125 | 126 | if err.Level == Panic { 127 | return true 128 | } 129 | return ShouldPanic(err.Root) 130 | } 131 | 132 | func ShouldStop(e error) bool { 133 | if e == nil { 134 | return false 135 | } 136 | 137 | err, ok := e.(*Error) 138 | if !ok { 139 | return false 140 | } 141 | 142 | if err.Level == Fatal || err.Level == Panic { 143 | return true 144 | } 145 | return ShouldStop(err.Root) 146 | } 147 | -------------------------------------------------------------------------------- /apperr/error_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package apperr 16 | 17 | import ( 18 | "testing" 19 | 20 | "github.com/stretchr/testify/assert" 21 | ) 22 | 23 | func TestDummy(t *testing.T) { 24 | assert.Equal(t, "", "") 25 | } 26 | -------------------------------------------------------------------------------- /apperr/kind.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package apperr 16 | 17 | import "errors" 18 | 19 | type Kind error 20 | 21 | var ErrPermissionDenied = errors.New("permission denied") 22 | var ErrInvalidPath = errors.New("invalid path") 23 | var ErrInitialization = errors.New("wrong initialization") 24 | var ErrCorrupted = errors.New("corrupted state") 25 | -------------------------------------------------------------------------------- /apperr/level.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package apperr 16 | 17 | // Level defines severity of error. 18 | // None of these levels will not abruptly halt the program, use panic() for that usecase 19 | // Error levels actually start from Warn 20 | // Use Fatal to gracefully stop the program 21 | // Use Panic to print stack trace 22 | // Trace, Debug, Info are there for future just in case we need them 23 | type Level uint8 24 | 25 | const ( 26 | Warn Level = iota 27 | Fatal 28 | Panic // prints stack 29 | Trace 30 | Debug 31 | Info 32 | ) 33 | -------------------------------------------------------------------------------- /apperr/op.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package apperr 16 | 17 | type Op string 18 | -------------------------------------------------------------------------------- /cmd/destination.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package cmd 16 | 17 | import ( 18 | "context" 19 | "crypto/sha256" 20 | "fmt" 21 | "hash" 22 | "os" 23 | "os/signal" 24 | "strings" 25 | "sync" 26 | "syscall" 27 | "time" 28 | 29 | "github.com/ExpediaGroup/vsync/apperr" 30 | "github.com/ExpediaGroup/vsync/consul" 31 | "github.com/ExpediaGroup/vsync/syncer" 32 | "github.com/ExpediaGroup/vsync/transformer" 33 | "github.com/ExpediaGroup/vsync/vault" 34 | "github.com/hashicorp/consul/api/watch" 35 | "github.com/rs/zerolog/log" 36 | "github.com/spf13/cobra" 37 | "github.com/spf13/viper" 38 | ) 39 | 40 | func init() { 41 | viper.SetDefault("name", "destination") // name is required for mount checks and telemetry 42 | viper.SetDefault("numBuckets", 1) // we need atleast one bucket to store info 43 | viper.SetDefault("destination.tick", "10s") 44 | viper.SetDefault("destination.timeout", "5m") 45 | viper.SetDefault("destination.syncPath", "vsync/") 46 | viper.SetDefault("destination.numWorkers", 1) // we need atleast 1 worker or else the sync routine will be blocked 47 | viper.SetDefault("origin.syncPath", "vsync/") 48 | viper.SetDefault("origin.renewToken", true) 49 | 50 | if err := viper.BindPFlags(destinationCmd.PersistentFlags()); err != nil { 51 | log.Panic(). 52 | Err(err). 53 | Str("command", "destination"). 54 | Str("flags", "persistent"). 55 | Msg("cannot bind flags with viper") 56 | } 57 | 58 | if err := viper.BindPFlags(destinationCmd.Flags()); err != nil { 59 | log.Panic(). 60 | Err(err). 61 | Str("command", "destination"). 62 | Str("flags", "transient"). 63 | Msg("cannot bind flags with viper") 64 | } 65 | 66 | rootCmd.AddCommand(destinationCmd) 67 | } 68 | 69 | var destinationCmd = &cobra.Command{ 70 | Use: "destination", 71 | Short: "Performs comparisons of sync data structures and copies data from origin to destination for nullifying the diffs", 72 | Long: `Watchs sync data structure, compares with local and asks origin vault for paths required to nullify the diffs`, 73 | SilenceUsage: true, 74 | SilenceErrors: true, 75 | 76 | RunE: func(cmd *cobra.Command, args []string) error { 77 | const op = apperr.Op("cmd.destination") 78 | 79 | ctx, cancel := context.WithCancel(context.Background()) 80 | defer cancel() 81 | 82 | // initial configs 83 | name := viper.GetString("name") 84 | numBuckets := viper.GetInt("numBuckets") 85 | tick := viper.GetDuration("destination.tick") 86 | timeout := viper.GetDuration("destination.timeout") 87 | numWorkers := viper.GetInt("destination.numWorkers") 88 | originSyncPath := viper.GetString("origin.syncPath") 89 | originMounts := viper.GetStringSlice("origin.mounts") 90 | destinationSyncPath := viper.GetString("destination.syncPath") 91 | destinationMounts := viper.GetStringSlice("destination.mounts") 92 | hasher := sha256.New() 93 | 94 | // deprecated 95 | syncPathDepr := viper.GetString("syncPath") 96 | if syncPathDepr != "" { 97 | log.Error().Str("mode", "destination").Msg("syncPath variable is deprecated, use origin.syncPath and destination.syncPath, they can be same value") 98 | return apperr.New(fmt.Sprintf("parameter %q deprecated, please use %q and %q; they can be same value", "syncPath", "destination.syncPath", "origin.syncPath"), ErrInitialize, op, apperr.Fatal) 99 | } 100 | originDcDepr := viper.GetString("origin.dc") 101 | if originDcDepr != "" { 102 | log.Error().Str("mode", "origin").Str("origin.dc", originDcDepr).Msg("origin.dc variable is deprecated, please use origin.consul.dc") 103 | return apperr.New(fmt.Sprintf("parameter %q deprecated, use %q", "origin.dc", "origin.consul.dc"), ErrInitialize, op, apperr.Fatal) 104 | } 105 | destinationDcDepr := viper.GetString("destination.dc") 106 | if destinationDcDepr != "" { 107 | log.Error().Str("mode", "destination").Str("destination.dc", destinationDcDepr).Msg("destination.dc variable is deprecated, please use destination.consul.dc") 108 | return apperr.New(fmt.Sprintf("parameter %q deprecated, use %q", "destination.dc", "destination.consul.dc"), ErrInitialize, op, apperr.Fatal) 109 | } 110 | 111 | // telemetry client 112 | telemetryClient.AddTags("mpaas_application_name:vsync_" + name) 113 | 114 | // get destination consul and vault 115 | destinationConsul, destinationVault, err := getEssentials("destination") 116 | if err != nil { 117 | log.Debug().Err(err).Str("mode", "destination").Msg("cannot get essentials") 118 | return apperr.New(fmt.Sprintf("cannot get clients for mode %q", "destination"), err, op, apperr.Fatal, ErrInitialize) 119 | } 120 | 121 | // get origin consul and vault 122 | originConsul, originVault, err := getEssentials("origin") 123 | if err != nil { 124 | log.Debug().Err(err).Str("mode", "origin").Msg("cannot get essentials") 125 | return apperr.New(fmt.Sprintf("cannot get clients for mode %q", "origin"), err, op, apperr.Fatal, ErrInitialize) 126 | } 127 | 128 | // setup channels and context 129 | errCh := make(chan error, numWorkers) // equal to number of go routines so that we can close it and dont worry about nil channel panic 130 | triggerCh := make(chan bool) 131 | sigCh := make(chan os.Signal, 3) // 3 -> number of signals it may need to handle at single point in time 132 | signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) 133 | 134 | // transformations from config 135 | pack, err := getTransfomerPack() 136 | if err != nil { 137 | return apperr.New(fmt.Sprintf("cannot get transformer packs"), err, op, apperr.Fatal, ErrInitialize) 138 | } 139 | 140 | // perform inital checks on sync path, check kv and token permissions 141 | if originSyncPath[len(originSyncPath)-1:] != "/" { 142 | originSyncPath = originSyncPath + "/" 143 | } 144 | if destinationSyncPath[len(destinationSyncPath)-1:] != "/" { 145 | destinationSyncPath = destinationSyncPath + "/" 146 | } 147 | // adds type into sync path, useful in case we use same syncPath in same consul 148 | originSyncPath = originSyncPath + "origin/" 149 | destinationSyncPath = destinationSyncPath + "destination/" 150 | 151 | err = destinationConsul.SyncPathChecks(destinationSyncPath, consul.StdCheck) 152 | if err != nil { 153 | log.Debug().Err(err).Msg("failures on sync path checks on destination") 154 | return apperr.New(fmt.Sprintf("sync path checks failed for %q", destinationSyncPath), err, op, apperr.Fatal, ErrInitialize) 155 | } 156 | log.Info().Str("path", destinationSyncPath).Msg("sync path passed initial checks on destination") 157 | 158 | err = originConsul.SyncPathChecks(originSyncPath, consul.StdCheck) 159 | if err != nil { 160 | log.Debug().Err(err).Msg("failures on sync path checks on origin") 161 | return apperr.New(fmt.Sprintf("sync path checks failed for %q", originSyncPath), err, op, apperr.Fatal, ErrInitialize) 162 | } 163 | log.Info().Str("path", originSyncPath).Msg("sync path passed initial checks on origin") 164 | 165 | // initialize destination sync path 166 | initialized, err := destinationConsul.IsSyncPathInitialized(destinationSyncPath) 167 | if err != nil { 168 | log.Debug().Err(err).Str("path", destinationSyncPath).Msg("failures on checking if sync path is initalized on destination") 169 | return apperr.New(fmt.Sprintf("sync path %q already initialized check failed", destinationSyncPath), err, op, apperr.Fatal, ErrInitialize) 170 | } 171 | if initialized { 172 | log.Info().Str("path", destinationSyncPath).Msg("path is already initialized") 173 | } else { 174 | destinationInfo, err := syncer.NewInfo(numBuckets, hasher) 175 | if err != nil { 176 | log.Debug().Err(err).Int("numBuckets", numBuckets).Str("path", destinationSyncPath).Msg("failure in creating new destination sync info, while checking if destination sync path exists") 177 | return apperr.New(fmt.Sprintf("sync path %q not initialized already, could not create new destination info with buckets %q", destinationSyncPath, numBuckets), err, op, apperr.Fatal, ErrInitialize) 178 | } 179 | 180 | err = syncer.InfoToConsul(destinationConsul, destinationInfo, destinationSyncPath) 181 | if err != nil { 182 | log.Debug().Err(err).Str("path", destinationSyncPath).Msg("cannot initialize sync info in destination consul") 183 | return apperr.New(fmt.Sprintf("sync path %q not initialized already, could not initialize now", destinationSyncPath), err, op, apperr.Fatal, ErrInitialize) 184 | } 185 | 186 | log.Info().Str("path", destinationSyncPath).Msg("path is initialized") 187 | } 188 | 189 | destinationChecks := vault.CheckDestination 190 | // sync or ignore deletes? 191 | // some times origin submits an empty sync data {} esp when origin vault is not responding as expected 192 | // which makes destination think origin has deleted all secrets and then 193 | // destination soft deletes them too. Scary 194 | if viper.GetBool("ignoreDeletes") { 195 | log.Info().Msg("ignore deletes is true, so we cannot soft delete ( delete latest version ) in destination vault") 196 | syncer.IgnoreDeletes = true 197 | destinationChecks = vault.CheckDestinationWithoutDelete 198 | } 199 | 200 | // perform intial checks on mounts, check kv v2 and token permissions 201 | // check origin token permissions 202 | if len(originMounts) == 0 { 203 | return apperr.New(fmt.Sprintf("no %q mounts found for syncing, specify mounts in config", "origin"), err, op, apperr.Fatal, ErrInitialize) 204 | } 205 | for _, mount := range originMounts { 206 | if !strings.HasSuffix(mount, "/") { 207 | log.Debug().Err(err).Msg("failures on mount checks on origin, missing a / at last for each mount") 208 | return apperr.New(fmt.Sprintf("failures on mount checks on origin, missing a / at last for each mount"), err, op, apperr.Fatal, ErrInitialize) 209 | } 210 | err = originVault.MountChecks(mount, vault.CheckOrigin, name) 211 | if err != nil { 212 | log.Debug().Err(err).Msg("failures on data paths checks on origin") 213 | return apperr.New(fmt.Sprintf("failures on data paths checks on origin"), err, op, apperr.Fatal, ErrInitialize) 214 | } 215 | } 216 | log.Info().Strs("mounts", originMounts).Msg("mounts passed initial checks on origin") 217 | 218 | // check destination token permissions 219 | if len(destinationMounts) == 0 { 220 | return apperr.New(fmt.Sprintf("no %q mounts found for syncing, specify mounts in config", "destination"), err, op, apperr.Fatal, ErrInitialize) 221 | } 222 | for _, mount := range destinationMounts { 223 | if !strings.HasSuffix(mount, "/") { 224 | log.Debug().Err(err).Msg("failures on mount checks on destination, missing a / at last for each mount") 225 | return apperr.New(fmt.Sprintf("failures on mount checks on destination, missing a / at last for each mount"), err, op, apperr.Fatal, ErrInitialize) 226 | } 227 | err = destinationVault.MountChecks(mount, destinationChecks, name) 228 | if err != nil { 229 | log.Debug().Err(err).Msg("failures on mount checks on destination") 230 | return apperr.New(fmt.Sprintf("failures on mount checks on destination"), err, op, apperr.Fatal, ErrInitialize) 231 | } 232 | } 233 | log.Info().Strs("mounts", destinationMounts).Msg("mounts passed initial checks on destination") 234 | 235 | log.Info().Msg("********** starting destination sync **********\n") 236 | 237 | // prepare for getting sync data from origin 238 | go prepareWatch(ctx, originConsul, originSyncPath, triggerCh, errCh) 239 | go prepareTicker(ctx, originConsul, originSyncPath, tick, triggerCh, errCh) 240 | go destinationSync(ctx, name, 241 | originConsul, originSyncPath, originVault, originMounts, 242 | destinationConsul, destinationSyncPath, destinationVault, destinationMounts, 243 | pack, 244 | hasher, numBuckets, timeout, numWorkers, 245 | triggerCh, errCh) 246 | 247 | // origin token renewer go routine 248 | if viper.GetBool("origin.renewToken") { 249 | go originVault.TokenRenewer(ctx, errCh) 250 | } 251 | // destination token renewer go routine 252 | if viper.GetBool("destination.renewToken") { 253 | go destinationVault.TokenRenewer(ctx, errCh) 254 | } 255 | 256 | // lock the main go routine in for select until we get os signals 257 | for { 258 | select { 259 | case err := <-errCh: 260 | if apperr.ShouldPanic(err) { 261 | telemetryClient.Count("vsync.destination.error", 1, "type:panic") 262 | cancel() 263 | time.Sleep(1 * time.Second) 264 | close(errCh) 265 | close(sigCh) 266 | log.Panic().Interface("ops", apperr.Ops(err)).Msg(err.Error()) 267 | return err 268 | } else if apperr.ShouldStop(err) { 269 | telemetryClient.Count("vsync.destination.error", 1, "type:fatal") 270 | cancel() 271 | time.Sleep(1 * time.Second) 272 | close(errCh) 273 | close(sigCh) 274 | log.Error().Interface("ops", apperr.Ops(err)).Msg(err.Error()) 275 | return err 276 | } else { 277 | telemetryClient.Count("vsync.destination.error", 1, "type:fatal") 278 | log.Warn().Interface("ops", apperr.Ops(err)).Msg(err.Error()) 279 | } 280 | case sig := <-sigCh: 281 | telemetryClient.Count("vsync.destination.interrupt", 1) 282 | log.Error().Interface("signal", sig).Msg("signal received, closing all go routines") 283 | cancel() 284 | time.Sleep(1 * time.Second) 285 | close(errCh) 286 | close(sigCh) 287 | return apperr.New(fmt.Sprintf("signal received %q, closing all go routines", sig), err, op, apperr.Fatal, ErrInterrupted) 288 | } 289 | } 290 | }, 291 | } 292 | 293 | func prepareWatch(ctx context.Context, originConsul *consul.Client, originSyncPath string, triggerCh chan bool, errCh chan error) { 294 | const op = apperr.Op("cmd.destination.prepareWatch") 295 | syncIndex := originSyncPath + "index" 296 | 297 | // prepare the watch 298 | plan, err := watch.Parse(map[string]interface{}{ 299 | "type": "key", 300 | "stale": true, 301 | "key": syncIndex, 302 | "datacenter": originConsul.Dc, 303 | }) 304 | if err != nil { 305 | log.Debug().Err(err). 306 | Str("key", syncIndex).Str("origin", originConsul.Dc). 307 | Msg("cannot make plan for key watch in origin from destination") 308 | errCh <- apperr.New(fmt.Sprintf("cannot make plan for key %q watch in origin %q from destination", syncIndex, originConsul.Dc), err, op, apperr.Fatal, ErrInvalidCPath) 309 | } 310 | 311 | // handler to send data to another kv channel 312 | plan.HybridHandler = func(blockParamVal watch.BlockingParamVal, val interface{}) { 313 | // TODO: test blockParamVal https://github.com/hashicorp/consul/blob/master/api/watch/plan_test.go 314 | if val == nil { 315 | log.Debug().Msg("nil value received from consul watch") 316 | return 317 | } 318 | 319 | triggerCh <- true 320 | telemetryClient.Count("vsync.destination.watch.triggered", 1) 321 | log.Info().Msg("consul watch triggered for getting sync index from origin consul") 322 | } 323 | 324 | // create a new go routine because plan run will block 325 | go func() { 326 | err = plan.Run(originConsul.Address) 327 | log.Debug().Str("trigger", "context done").Str("path", syncIndex).Msg("closed consul watch") 328 | if err != nil { 329 | log.Debug().Err(err).Msg("failure while performing consul watch run") 330 | errCh <- apperr.New(fmt.Sprintf("failure while performing consul watch from destination to origin %q", originConsul.Dc), err, op, apperr.Fatal, ErrInitialize) 331 | } 332 | }() 333 | 334 | // lock the current go routine 335 | // if context is done then stop the plan 336 | <-ctx.Done() 337 | plan.Stop() 338 | time.Sleep(100 * time.Microsecond) 339 | log.Debug().Str("trigger", "context done").Str("path", syncIndex).Msg("closed prepare watch") 340 | } 341 | 342 | func prepareTicker(ctx context.Context, originConsul *consul.Client, originSyncPath string, tick time.Duration, triggerCh chan bool, errCh chan error) { 343 | syncIndex := originSyncPath + "index" 344 | ticker := time.NewTicker(tick) 345 | 346 | for { 347 | select { 348 | case <-ctx.Done(): 349 | ticker.Stop() 350 | time.Sleep(100 * time.Microsecond) 351 | log.Debug().Str("trigger", "context done").Str("path", syncIndex).Msg("closed consul get sync index timer for path") 352 | return 353 | case <-ticker.C: 354 | telemetryClient.Count("vsync.destination.timer.triggered", 1) 355 | log.Info().Msg("timer triggered for getting sync index from origin consul") 356 | triggerCh <- true 357 | } 358 | } 359 | } 360 | 361 | // destinationSync compares sync entries then update actual and sync entries 362 | func destinationSync(ctx context.Context, name string, 363 | originConsul *consul.Client, originSyncPath string, originVault *vault.Client, originMounts []string, 364 | destinationConsul *consul.Client, destinationSyncPath string, destinationVault *vault.Client, destinationMounts []string, 365 | pack transformer.Pack, 366 | hasher hash.Hash, numBuckets int, timeout time.Duration, numWorkers int, 367 | triggerCh chan bool, errCh chan error) { 368 | 369 | const op = apperr.Op("cmd.destinationSync") 370 | 371 | for { 372 | select { 373 | case <-ctx.Done(): 374 | time.Sleep(100 * time.Microsecond) 375 | telemetryClient.Count("vsync.destination.cycle", 1, "status:stopped") 376 | log.Debug().Str("trigger", "context done").Msg("closed destination sync") 377 | return 378 | case _, ok := <-triggerCh: 379 | if !ok { 380 | time.Sleep(100 * time.Microsecond) 381 | log.Debug().Str("trigger", "nil channel").Msg("closed destination sync") 382 | return 383 | } 384 | 385 | telemetryClient.Count("vsync.destination.cycle", 1, "status:started") 386 | log.Info().Msg("") 387 | log.Debug().Msg("sync info changed in origin") 388 | 389 | syncCtx, syncCancel := context.WithTimeout(ctx, timeout) 390 | 391 | // check origin token permission before starting each cycle 392 | for _, oMount := range originMounts { 393 | err := originVault.MountChecks(oMount, vault.CheckOrigin, name) 394 | if err != nil { 395 | log.Debug().Err(err).Msg("failures on data paths checks on origin") 396 | errCh <- apperr.New(fmt.Sprintf("failures on data paths checks on origin"), err, op, apperr.Fatal, ErrInitialize) 397 | 398 | syncCancel() 399 | time.Sleep(500 * time.Microsecond) 400 | telemetryClient.Count("vsync.destination.cycle", 1, "status:failure") 401 | log.Info().Msg("incomplete sync cycle, failure in vault connectivity or token permission\n") 402 | return 403 | } 404 | } 405 | 406 | destinationChecks := vault.CheckDestination 407 | if syncer.IgnoreDeletes { 408 | destinationChecks = vault.CheckDestinationWithoutDelete 409 | } 410 | 411 | // check destination token permission before starting each cycle 412 | for _, dMount := range destinationMounts { 413 | err := destinationVault.MountChecks(dMount, destinationChecks, name) 414 | if err != nil { 415 | log.Debug().Err(err).Msg("failures on data paths checks on destination") 416 | errCh <- apperr.New(fmt.Sprintf("failures on data paths checks on destination"), err, op, apperr.Fatal, ErrInitialize) 417 | 418 | syncCancel() 419 | time.Sleep(500 * time.Microsecond) 420 | telemetryClient.Count("vsync.destination.cycle", 1, "status:failure") 421 | log.Info().Msg("incomplete sync cycle, failure in vault connectivity or token permission\n") 422 | return 423 | } 424 | } 425 | 426 | // origin sync info 427 | originfo, err := syncer.NewInfo(numBuckets, hasher) 428 | if err != nil { 429 | log.Debug().Err(err).Int("numBuckets", numBuckets).Str("path", originSyncPath).Msg("failure in initializing origin sync info") 430 | errCh <- apperr.New(fmt.Sprintf("cannot create new sync info in path %q", originSyncPath), err, apperr.Fatal, op, ErrInitialize) 431 | 432 | syncCancel() 433 | time.Sleep(100 * time.Microsecond) 434 | log.Warn().Msg("incomplete sync cycle, failure in creating new origin sync info\n") 435 | continue 436 | } 437 | 438 | err = syncer.InfoFromConsul(originConsul, originfo, originSyncPath) 439 | if err != nil { 440 | log.Debug().Err(err).Str("path", originSyncPath).Msg("cannot get sync info from origin consul") 441 | errCh <- apperr.New(fmt.Sprintf("cannot get sync info in path %q", originSyncPath), err, apperr.Fatal, op, ErrInvalidInfo) 442 | 443 | syncCancel() 444 | time.Sleep(100 * time.Microsecond) 445 | log.Warn().Msg("incomplete sync cycle, failure in getting origin sync info\n") 446 | continue 447 | } 448 | log.Info().Msg("retrieved origin sync info") 449 | 450 | // destination sync info 451 | destinationInfo, err := syncer.NewInfo(numBuckets, hasher) 452 | if err != nil { 453 | log.Debug().Err(err).Int("numBuckets", numBuckets).Str("path", destinationSyncPath).Msg("failure in initializing destination sync info") 454 | errCh <- apperr.New(fmt.Sprintf("cannot create new sync info in path %q", destinationSyncPath), err, apperr.Fatal, op, ErrInitialize) 455 | 456 | syncCancel() 457 | time.Sleep(100 * time.Microsecond) 458 | log.Warn().Msg("incomplete sync cycle, failure in creating new destination sync info\n") 459 | continue 460 | } 461 | 462 | err = syncer.InfoFromConsul(destinationConsul, destinationInfo, destinationSyncPath) 463 | if err != nil { 464 | log.Debug().Err(err).Str("path", destinationSyncPath).Msg("cannot get sync info from destination consul") 465 | errCh <- apperr.New(fmt.Sprintf("cannot get sync info in path %q", destinationSyncPath), err, op, apperr.Fatal, ErrInvalidInfo) 466 | 467 | syncCancel() 468 | time.Sleep(100 * time.Microsecond) 469 | log.Warn().Msg("incomplete sync cycle, failure in getting destination sync info\n") 470 | continue 471 | } 472 | log.Info().Msg("retrieved destination sync info") 473 | 474 | // compare sync info 475 | addTasks, updateTasks, deleteTasks, errs := originfo.Compare(destinationInfo) 476 | for _, err := range errs { 477 | errCh <- apperr.New(fmt.Sprintf("cannot compare origin and destination infos"), err, op, ErrInvalidInsight) 478 | } 479 | 480 | telemetryClient.Gauge("vsync.destination.paths.to_be_processed", float64(len(addTasks)), "operation:add") 481 | telemetryClient.Gauge("vsync.destination.paths.to_be_processed", float64(len(updateTasks)), "operation:update") 482 | telemetryClient.Gauge("vsync.destination.paths.to_be_processed", float64(len(deleteTasks)), "operation:delete") 483 | log.Info().Int("count", len(addTasks)).Msg("paths to be added to destination") 484 | log.Info().Int("count", len(updateTasks)).Msg("paths to be updated to destination") 485 | log.Info().Int("count", len(deleteTasks)).Msg("paths to be deleted from destination") 486 | 487 | // create go routines for fetch and save and inturn saves to destination sync info 488 | var wg sync.WaitGroup 489 | inTaskCh := make(chan syncer.Task, numWorkers) 490 | for i := 0; i < numWorkers; i++ { 491 | wg.Add(1) 492 | go syncer.FetchAndSave(syncCtx, 493 | &wg, i, 494 | originVault, destinationVault, 495 | destinationInfo, pack, 496 | inTaskCh, 497 | errCh) 498 | } 499 | 500 | // create go routine to save sync info to consul 501 | // 1 buffer to unblock this main routine in case timeout closes gather go routine 502 | // so no one exists to send data in saved channel which blocks the main routine 503 | saveCh := make(chan bool, 1) 504 | doneCh := make(chan bool, 1) 505 | go saveInfoToConsul(syncCtx, 506 | destinationInfo, destinationConsul, destinationSyncPath, 507 | saveCh, doneCh, errCh) 508 | 509 | // no changes 510 | if len(addTasks) == 0 && len(updateTasks) == 0 && len(deleteTasks) == 0 { 511 | log.Info().Msg("no changes from origin") 512 | 513 | syncCancel() 514 | time.Sleep(500 * time.Microsecond) 515 | log.Info().Msg("completed sync cycle, no changes\n") 516 | continue 517 | } 518 | 519 | // we need to send tasks to workers as well as watch for context done 520 | // in case of more paths and a timeout the worker will exit but we would be waiting forever for some worker to recieve the job 521 | go sendTasks(syncCtx, inTaskCh, addTasks, updateTasks, deleteTasks) 522 | 523 | // close the inTaskCh and wait for all the workers and sync info to finish 524 | // in case of timeout the workers 525 | // mostly perform the current processing and then die, so we have to wait till they die 526 | // which takes at most 1 minute * number of retries per client call 527 | wg.Wait() 528 | 529 | err = destinationInfo.Reindex() 530 | if err != nil { 531 | errCh <- apperr.New(fmt.Sprintf("cannot reindex destination info"), err, op, ErrInvalidInfo) 532 | } 533 | 534 | // trigger save info to consul and wait for done 535 | saveCh <- true 536 | close(saveCh) 537 | 538 | if ok := <-doneCh; ok { 539 | log.Info().Int("buckets", numBuckets).Msg("saved origin sync info in consul") 540 | } else { 541 | errCh <- apperr.New(fmt.Sprintf("cannot save origin, mostly due to timeout"), ErrTimout, op, apperr.Fatal) 542 | } 543 | 544 | // cancel any go routine and free context memory 545 | syncCancel() 546 | time.Sleep(500 * time.Microsecond) 547 | telemetryClient.Count("vsync.destination.cycle", 1, "status:success") 548 | log.Info().Msg("completed sync cycle\n") 549 | } 550 | } 551 | } 552 | 553 | func getTransfomerPack() (transformer.Pack, error) { 554 | const op = apperr.Op("cmd.getTransfomerPack") 555 | p := transformer.Pack{} 556 | 557 | ts := []struct { 558 | Name string `json:"name"` 559 | From string `json:"from"` 560 | To string `json:"to"` 561 | }{} 562 | err := viper.UnmarshalKey("destination.transforms", &ts) 563 | if err != nil { 564 | log.Debug().Err(err).Str("lookup", "destination.transforms").Msg("cannot get or unmarshal transformers from config") 565 | return p, apperr.New(fmt.Sprintf("cannot get or unmarshal transformers from config %q", "destination.transforms"), err, op, ErrInitialize) 566 | } 567 | 568 | for _, t := range ts { 569 | namedRegexp, err := transformer.NewNamedRegexpTransformer(t.Name, t.From, t.To) 570 | if err != nil { 571 | log.Debug().Err(err).Str("from", t.From).Str("to", t.To).Msg("cannot get named regexp transformer") 572 | return p, apperr.New(fmt.Sprintf("cannot get transformer %q into pack with regexp %q", t.Name, t.From), err, op, ErrInitialize) 573 | } 574 | p = append(p, namedRegexp) 575 | } 576 | 577 | dp, err := transformer.DefaultPack() 578 | if err != nil { 579 | log.Debug().Err(err).Msg("cannot get default transformer pack") 580 | return p, apperr.New(fmt.Sprintf("cannot get default transformer pack"), err, op, ErrInitialize) 581 | } 582 | 583 | p = append(p, dp...) 584 | log.Info().Int("len", len(p)).Msg("transformers are initialized and packed") 585 | log.Debug().Interface("pack", p).Msg("transformers in destination") 586 | return p, nil 587 | } 588 | 589 | // tasks to update destination based on origin 590 | func sendTasks(ctx context.Context, taskCh chan syncer.Task, addTasks []syncer.Task, updateTasks []syncer.Task, deleteTasks []syncer.Task) { 591 | defer close(taskCh) 592 | 593 | for i, t := range addTasks { 594 | select { 595 | case <-ctx.Done(): 596 | telemetryClient.Gauge("vsync.destination.paths.skipped", float64(len(addTasks)-i), "operation:add") 597 | log.Info().Str("trigger", "context done").Int("left", len(addTasks)-i).Msg("add tasks skipped") 598 | return 599 | default: 600 | taskCh <- t 601 | } 602 | } 603 | 604 | for i, t := range updateTasks { 605 | select { 606 | case <-ctx.Done(): 607 | telemetryClient.Gauge("vsync.destination.paths.skipped", float64(len(updateTasks)-i), "operation:update") 608 | log.Info().Str("trigger", "context done").Int("left", len(updateTasks)-i).Msg("update tasks skipped") 609 | return 610 | default: 611 | taskCh <- t 612 | } 613 | } 614 | 615 | for i, t := range deleteTasks { 616 | select { 617 | case <-ctx.Done(): 618 | telemetryClient.Gauge("vsync.destination.paths.skipped", float64(len(deleteTasks)-i), "operation:delete") 619 | log.Info().Str("trigger", "context done").Int("left", len(deleteTasks)-i).Msg("delete tasks skipped") 620 | return 621 | default: 622 | taskCh <- t 623 | } 624 | } 625 | } 626 | -------------------------------------------------------------------------------- /cmd/origin.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package cmd 16 | 17 | import ( 18 | "context" 19 | "crypto/sha256" 20 | "fmt" 21 | "hash" 22 | "os" 23 | "os/signal" 24 | "strings" 25 | "sync" 26 | "syscall" 27 | "time" 28 | 29 | "github.com/ExpediaGroup/vsync/apperr" 30 | "github.com/ExpediaGroup/vsync/consul" 31 | "github.com/ExpediaGroup/vsync/syncer" 32 | "github.com/ExpediaGroup/vsync/vault" 33 | "github.com/rs/zerolog/log" 34 | "github.com/spf13/cobra" 35 | "github.com/spf13/viper" 36 | ) 37 | 38 | func init() { 39 | viper.SetDefault("name", "origin") // name is required for mount checks and telemetry 40 | viper.SetDefault("numBuckets", 1) // we need atleast one bucket to store info 41 | viper.SetDefault("origin.tick", "10s") 42 | viper.SetDefault("origin.timeout", "5m") 43 | viper.SetDefault("origin.syncPath", "vsync/") 44 | viper.SetDefault("origin.numWorkers", 1) // we need atleast 1 worker or else the sync routine will be blocked 45 | 46 | if err := viper.BindPFlags(originCmd.PersistentFlags()); err != nil { 47 | log.Panic(). 48 | Err(err). 49 | Str("command", "origin"). 50 | Str("flags", "persistent"). 51 | Msg("cannot bind flags with viper") 52 | } 53 | 54 | if err := viper.BindPFlags(originCmd.Flags()); err != nil { 55 | log.Panic(). 56 | Err(err). 57 | Str("command", "origin"). 58 | Str("flags", "transient"). 59 | Msg("cannot bind flags with viper") 60 | } 61 | 62 | rootCmd.AddCommand(originCmd) 63 | } 64 | 65 | var originCmd = &cobra.Command{ 66 | Use: "origin", 67 | Short: "Generate sync data structure in consul kv for entities that we need to distribute", 68 | Long: `For every entity (secrets) in the path, we get metadata and prepare sync data structure save it in consul kv sync path so that other clients can watch for changes`, 69 | SilenceUsage: true, 70 | SilenceErrors: true, 71 | 72 | RunE: func(cmd *cobra.Command, args []string) error { 73 | const op = apperr.Op("cmd.origin") 74 | 75 | ctx, cancel := context.WithCancel(context.Background()) 76 | defer cancel() 77 | 78 | // initial configs 79 | name := viper.GetString("name") 80 | numBuckets := viper.GetInt("numBuckets") 81 | tick := viper.GetDuration("origin.tick") 82 | timeout := viper.GetDuration("origin.timeout") 83 | numWorkers := viper.GetInt("origin.numWorkers") 84 | originSyncPath := viper.GetString("origin.syncPath") 85 | originMounts := viper.GetStringSlice("origin.mounts") 86 | hasher := sha256.New() 87 | 88 | // deprecated 89 | syncPathDepr := viper.GetString("syncPath") 90 | if syncPathDepr != "" { 91 | log.Error().Str("mode", "origin").Str("syncPath", syncPathDepr).Msg("syncPath variable is deprecated, use origin.syncPath and destination.syncPath, they can be same value") 92 | return apperr.New(fmt.Sprintf("parameter %q deprecated, use %q", "syncPath", "origin.syncPath"), ErrInitialize, op, apperr.Fatal) 93 | } 94 | originDcDepr := viper.GetString("origin.dc") 95 | if originDcDepr != "" { 96 | log.Error().Str("mode", "origin").Str("origin.dc", originDcDepr).Msg("origin.dc variable is deprecated, use origin.consul.dc") 97 | return apperr.New(fmt.Sprintf("parameter %q deprecated, use %q", "origin.dc", "origin.consul.dc"), ErrInitialize, op, apperr.Fatal) 98 | } 99 | 100 | // telemetry client 101 | telemetryClient.AddTags("mpaas_application_name:vsync_" + name) 102 | 103 | // get origin consul and vault 104 | originConsul, originVault, err := getEssentials("origin") 105 | if err != nil { 106 | log.Debug().Err(err).Str("mode", "origin").Msg("cannot get essentials") 107 | return apperr.New(fmt.Sprintf("cannot get clients for mode %q", "origin"), err, op, apperr.Fatal, ErrInitialize) 108 | } 109 | 110 | // perform inital checks on sync path, check kv and token permissions 111 | if originSyncPath[len(originSyncPath)-1:] != "/" { 112 | originSyncPath = originSyncPath + "/" 113 | } 114 | originSyncPath = originSyncPath + "origin/" // adds type into sync path, useful in case we use same syncPath in same consul 115 | 116 | err = originConsul.SyncPathChecks(originSyncPath, consul.StdCheck) 117 | if err != nil { 118 | log.Debug().Err(err).Str("path", originSyncPath).Msg("failures on sync path checks on origin") 119 | return apperr.New(fmt.Sprintf("sync path checks failed for %q", originSyncPath), err, op, apperr.Fatal, ErrInitialize) 120 | } 121 | log.Info().Str("path", originSyncPath).Msg("sync path passed initial checks") 122 | 123 | // perform intial checks on mounts, check kv v2 and token permissions 124 | // check origin token permissions 125 | if len(originMounts) == 0 { 126 | return apperr.New(fmt.Sprintf("no %q mounts found for syncing, specify mounts in config", "origin"), err, op, apperr.Fatal, ErrInitialize) 127 | } 128 | for _, mount := range originMounts { 129 | if !strings.HasSuffix(mount, "/") { 130 | log.Debug().Err(err).Msg("failures on mount checks on origin, missing a / at last for each mount") 131 | return apperr.New(fmt.Sprintf("failures on mount checks on origin, missing a / at last for each mount"), err, op, apperr.Fatal, ErrInitialize) 132 | } 133 | err = originVault.MountChecks(mount, vault.CheckOrigin, name) 134 | if err != nil { 135 | log.Debug().Err(err).Msg("failures on mount checks on origin") 136 | return apperr.New(fmt.Sprintf("failures on mount checks on origin"), err, op, apperr.Fatal, ErrInitialize) 137 | } 138 | } 139 | log.Info().Strs("mounts", originMounts).Msg("mounts passed initial checks on origin") 140 | 141 | log.Info().Msg("********** starting origin sync **********\n") 142 | 143 | // setup channels 144 | errCh := make(chan error, numWorkers) // equal to number of go routines so that we can close it and dont worry about nil channel panic 145 | sigCh := make(chan os.Signal, 3) // 3 -> number of signals it may need to handle at single point in time 146 | signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) 147 | 148 | // start the sync go routine 149 | go originSync(ctx, name, 150 | originConsul, originVault, 151 | tick, timeout, 152 | originSyncPath, originMounts, 153 | hasher, numBuckets, numWorkers, 154 | errCh) 155 | 156 | // origin token renewer go routine 157 | if viper.GetBool("origin.renewToken") { 158 | go originVault.TokenRenewer(ctx, errCh) 159 | } 160 | 161 | // lock the main go routine in for select until we get os signals 162 | for { 163 | select { 164 | case err := <-errCh: 165 | 166 | if apperr.ShouldPanic(err) { 167 | telemetryClient.Count("vsync.origin.error", 1, "type:panic") 168 | cancel() 169 | time.Sleep(1 * time.Second) 170 | close(errCh) 171 | close(sigCh) 172 | log.Panic().Interface("ops", apperr.Ops(err)).Msg(err.Error()) 173 | return err 174 | } else if apperr.ShouldStop(err) { 175 | telemetryClient.Count("vsync.origin.error", 1, "type:fatal") 176 | cancel() 177 | time.Sleep(1 * time.Second) 178 | close(errCh) 179 | close(sigCh) 180 | log.Error().Interface("ops", apperr.Ops(err)).Msg(err.Error()) 181 | return err 182 | } else { 183 | telemetryClient.Count("vsync.origin.error", 1, "type:warn") 184 | log.Warn().Interface("ops", apperr.Ops(err)).Msg(err.Error()) 185 | } 186 | case sig := <-sigCh: 187 | telemetryClient.Count("vsync.origin.interrupt", 1) 188 | log.Error().Interface("signal", sig).Msg("signal received, closing all go routines") 189 | cancel() 190 | time.Sleep(1 * time.Second) 191 | close(errCh) 192 | close(sigCh) 193 | return apperr.New(fmt.Sprintf("signal received %q, closing all go routines", sig), err, op, apperr.Fatal, ErrInterrupted) 194 | } 195 | } 196 | }, 197 | } 198 | 199 | func originSync(ctx context.Context, name string, 200 | originConsul *consul.Client, originVault *vault.Client, 201 | tick time.Duration, timeout time.Duration, 202 | originSyncPath string, originMounts []string, 203 | hasher hash.Hash, numBuckets int, numWorkers int, 204 | errCh chan error) { 205 | const op = apperr.Op("cmd.originSync") 206 | 207 | metaPaths := []string{} 208 | for _, mount := range originMounts { 209 | metaPaths = append(metaPaths, fmt.Sprintf("%smetadata", mount)) 210 | } 211 | 212 | ticker := time.NewTicker(tick) 213 | 214 | // sync cycle 215 | for { 216 | select { 217 | case <-ctx.Done(): 218 | ticker.Stop() 219 | time.Sleep(100 * time.Microsecond) 220 | telemetryClient.Count("vsync.origin.cycle", 1, "status:stopped") 221 | log.Debug().Str("trigger", "context done").Msg("closed origin sync") 222 | return 223 | case <-ticker.C: 224 | 225 | telemetryClient.Count("vsync.origin.cycle", 1, "status:started") 226 | log.Info().Msg("") 227 | log.Info().Msg("timer triggered for origin sync") 228 | 229 | syncCtx, syncCancel := context.WithTimeout(ctx, timeout) 230 | 231 | // check origin token permission before starting each cycle 232 | for _, oMount := range originMounts { 233 | err := originVault.MountChecks(oMount, vault.CheckOrigin, name) 234 | if err != nil { 235 | log.Debug().Err(err).Msg("failures on data paths checks on origin") 236 | errCh <- apperr.New(fmt.Sprintf("failures on data paths checks on origin"), err, op, apperr.Fatal, ErrInitialize) 237 | 238 | syncCancel() 239 | time.Sleep(500 * time.Microsecond) 240 | telemetryClient.Count("vsync.destination.cycle", 1, "status:failure") 241 | log.Info().Msg("incomplete sync cycle, failure in vault connectivity or token permission\n") 242 | return 243 | } 244 | } 245 | 246 | // create new sync info 247 | originfo, err := syncer.NewInfo(numBuckets, hasher) 248 | if err != nil { 249 | errCh <- apperr.New(fmt.Sprintf("cannot create new sync info in path %q", originSyncPath), err, op, apperr.Fatal, ErrInitialize) 250 | } 251 | 252 | // walk recursively to get all secret absolute paths 253 | paths, errs := originVault.GetAllPaths(metaPaths) 254 | for _, err := range errs { 255 | // TODO: make sure this does not print the same last error because we are using range 256 | errCh <- apperr.New(fmt.Sprintf("cannot recursively walk through paths %q", metaPaths), err, op, apperr.Fatal, ErrInitialize) 257 | } 258 | telemetryClient.Gauge("vsync.origin.paths.to_be_processed", float64(len(paths))) 259 | log.Info().Int("numPaths", len(paths)).Msg("generating origin sync info for paths") 260 | 261 | // create go routines for generating insights and inturn saves to sync info 262 | var wg sync.WaitGroup 263 | inPathCh := make(chan string, numWorkers) 264 | for i := 0; i < numWorkers; i++ { 265 | wg.Add(1) 266 | go syncer.GenerateInsight(syncCtx, 267 | &wg, i, 268 | originVault, originfo, 269 | inPathCh, 270 | errCh) 271 | } 272 | 273 | // create go routine to save sync info to consul 274 | // 1 buffer to unblock this main routine in case timeout closes gather go routine 275 | // so no one exists to send data in saved channel which blocks the main routine 276 | saveCh := make(chan bool, 1) 277 | doneCh := make(chan bool, 1) 278 | go saveInfoToConsul(syncCtx, 279 | originfo, originConsul, originSyncPath, 280 | saveCh, doneCh, errCh) 281 | 282 | // we need to send path to workers as well as watch for context done 283 | // in case of more paths and a timeout the worker will exit but we would be waiting forever for some worker to recieve the job 284 | go sendPaths(syncCtx, inPathCh, paths) 285 | 286 | // sent all keys so close the input channel and wait for all generate insights workers to say done 287 | // in case of timeout the workers 288 | // mostly perform the current processing and then die, so we have to wait till they die 289 | // which takes at most 1 minute * number of retries per client call 290 | wg.Wait() 291 | 292 | err = originfo.Reindex() 293 | if err != nil { 294 | errCh <- apperr.New(fmt.Sprintf("cannot reindex origin info"), err, op, ErrInvalidInfo) 295 | } 296 | 297 | // trigger save info to consul and wait for done 298 | saveCh <- true 299 | close(saveCh) 300 | if ok := <-doneCh; ok { 301 | log.Info().Int("buckets", numBuckets).Msg("saved origin sync info in consul") 302 | } else { 303 | errCh <- apperr.New(fmt.Sprintf("cannot save origin sync info, mostly due to timeout"), ErrTimout, op, apperr.Fatal) 304 | } 305 | 306 | // cancel any go routine and free context memory 307 | syncCancel() 308 | time.Sleep(500 * time.Microsecond) 309 | telemetryClient.Count("vsync.origin.cycle", 1, "status:success") 310 | log.Info().Msg("completed sync cycle\n") 311 | } 312 | } 313 | } 314 | 315 | func sendPaths(ctx context.Context, pathCh chan string, paths []string) { 316 | defer close(pathCh) 317 | 318 | for i, path := range paths { 319 | select { 320 | case <-ctx.Done(): 321 | telemetryClient.Gauge("vsync.origin.paths.skipped", float64(len(paths)-i)) 322 | log.Info().Str("trigger", "context done").Int("left", len(paths)-i).Msg("paths skipped") 323 | return 324 | default: 325 | pathCh <- path 326 | } 327 | } 328 | } 329 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package cmd 16 | 17 | import ( 18 | "errors" 19 | "net" 20 | "net/http" 21 | _ "net/http/pprof" 22 | "os" 23 | "strings" 24 | "time" 25 | 26 | "github.com/rs/xstats" 27 | "github.com/rs/xstats/dogstatsd" 28 | "github.com/rs/zerolog" 29 | "github.com/rs/zerolog/log" 30 | "github.com/spf13/cobra" 31 | "github.com/spf13/viper" 32 | ) 33 | 34 | var ( 35 | buildCommit string 36 | buildTime string 37 | buildVersion string 38 | ) 39 | 40 | var ( 41 | ErrInvalidCPath = errors.New("invalid consul kv path") 42 | ErrInvalidVPath = errors.New("invalid vault path") 43 | ErrInvalidInfo = errors.New("invalid sync info") 44 | ErrInvalidInsight = errors.New("invalid insight") 45 | ErrUnknownOp = errors.New("unknown operation") 46 | ErrInitialize = errors.New("invalid config, not initialized") 47 | ErrInterrupted = errors.New("interrupted") 48 | ErrTimout = errors.New("time expired") 49 | ) 50 | 51 | var telemetryClient xstats.XStater 52 | 53 | // init is executed as first function for running the command line 54 | func init() { 55 | zerolog.TimestampFunc = func() time.Time { 56 | return time.Now().UTC() 57 | } 58 | zerolog.SetGlobalLevel(zerolog.InfoLevel) 59 | log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339}).With().Logger() 60 | 61 | cobra.OnInitialize(initConfig, pprofServer) 62 | 63 | rootCmd.PersistentFlags().StringP("config", "c", "", "load the config file along with path (default is $HOME/.vsync.json)") 64 | rootCmd.PersistentFlags().Bool("version", false, "version information") 65 | 66 | rootCmd.PersistentFlags().String("log.level", "", "logger level (info|debug)") 67 | rootCmd.PersistentFlags().String("log.type", "", "logger type (console|json)") 68 | 69 | rootCmd.PersistentFlags().String("origin.consul.dc", "", "origin consul datacenter") 70 | rootCmd.PersistentFlags().String("origin.consul.address", "", "origin consul address") 71 | rootCmd.PersistentFlags().String("origin.vault.address", "", "origin vault address") 72 | rootCmd.PersistentFlags().String("origin.vault.token", "", "origin vault token") 73 | rootCmd.PersistentFlags().String("origin.vault.role_id", "", "origin vault approle role_id") 74 | rootCmd.PersistentFlags().String("origin.vault.secret_id", "", "origin vault approle secret_id") 75 | 76 | rootCmd.PersistentFlags().String("destination.consul.dc", "", "destination consul datacenter") 77 | rootCmd.PersistentFlags().String("destination.consul.address", "", "destination consul address") 78 | rootCmd.PersistentFlags().String("destination.vault.address", "", "destination vault address") 79 | rootCmd.PersistentFlags().String("destination.vault.token", "", "destination vault token") 80 | rootCmd.PersistentFlags().String("destination.vault.role_id", "", "destination vault approle role_id") 81 | rootCmd.PersistentFlags().String("destination.vault.secret_id", "", "destination vault approle secret_id") 82 | 83 | if err := viper.BindPFlags(rootCmd.PersistentFlags()); err != nil { 84 | log.Panic().Err(err).Str("command", "root").Str("flags", "persistent").Msg("cannot bind flags with viper") 85 | } 86 | if err := viper.BindPFlags(rootCmd.Flags()); err != nil { 87 | log.Panic().Err(err).Str("command", "root").Str("flags", "transient").Msg("cannot bind flags with viper") 88 | } 89 | } 90 | 91 | var rootCmd = &cobra.Command{ 92 | Use: "vsync", 93 | Short: "A tool that sync secrets between different vaults", 94 | Long: `A tool that sync secrets between different vaults using consul to store metadata`, 95 | SilenceUsage: true, 96 | SilenceErrors: true, 97 | RunE: func(cmd *cobra.Command, args []string) error { 98 | // version info 99 | if viper.GetBool("version") { 100 | log.Info(). 101 | Str("buildVersion", buildVersion). 102 | Str("buildCommit", buildCommit). 103 | Str("buildTime", buildTime). 104 | Msg("build info") 105 | 106 | return nil 107 | } 108 | 109 | return cmd.Help() 110 | }, 111 | } 112 | 113 | // Execute is the exposed entry point from main 114 | func Execute() error { 115 | 116 | // // profile 117 | // s := profile.Start(profile.TraceProfile, profile.ProfilePath("."), profile.NoShutdownHook) 118 | // defer s.Stop() 119 | 120 | // execute 121 | if err := rootCmd.Execute(); err != nil { 122 | return err 123 | } 124 | return nil 125 | } 126 | 127 | func initConfig() { 128 | if viper.GetString("config") != "" { 129 | viper.SetConfigFile(viper.GetString("config")) 130 | } else { 131 | viper.SetConfigName("config") 132 | viper.AddConfigPath(".") 133 | viper.AddConfigPath("/etc/vsync") 134 | } 135 | err := viper.ReadInConfig() 136 | if err == nil { 137 | log.Info().Str("config file", viper.ConfigFileUsed()).Msg("loaded config file") 138 | } else if viper.GetString("config") != "" { 139 | log.Fatal().Str("config file", viper.GetString("config")).Msg("cannot load config file") 140 | } 141 | 142 | viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) 143 | viper.SetEnvPrefix("VSYNC") 144 | viper.AutomaticEnv() 145 | 146 | if viper.GetString("log.type") == "json" { 147 | log.Logger = log.Output(os.Stdout) 148 | } 149 | 150 | if viper.GetString("log.level") == "debug" { 151 | zerolog.SetGlobalLevel(zerolog.DebugLevel) 152 | } 153 | // } else if viper.GetString("log.level") == "trace" { 154 | // zerolog.SetGlobalLevel(zerolog.TraceLevel) 155 | // } 156 | 157 | // // watch config for switching on debug even while server is running 158 | // // NOTE: it will cause 1 data race 159 | // viper.WatchConfig() 160 | // viper.OnConfigChange(func(e fsnotify.Event) { 161 | // log.Info().Str("file", e.Name).Msg("config changed") 162 | // 163 | // // check for debug 164 | // if viper.GetString("log.level") == "debug" { 165 | // zerolog.SetGlobalLevel(zerolog.DebugLevel) 166 | // } else { 167 | // } 168 | // }) 169 | 170 | // telemetry 171 | writer, err := net.Dial("udp", "127.0.0.1:8125") 172 | if err != nil { 173 | log.Fatal().Str("ip:port", "udp, 127.0.0.1:8125").Msg("writer could not be initialized") 174 | } 175 | telemetryClient = xstats.New(dogstatsd.New(writer, 10*time.Second)) 176 | } 177 | 178 | func pprofServer() { 179 | if viper.GetBool("pprof") { 180 | go func() { 181 | log.Info().Str("url", "http://localhost:6060/debug/pprof/").Msg("starting pprof server") 182 | err := http.ListenAndServe(":6060", nil) 183 | if err != nil { 184 | log.Error().Err(err).Msg("Failed to start pprof server") 185 | } 186 | log.Debug().Str("trigger", "context done").Msg("Stopping pprof server") 187 | }() 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /cmd/root_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package cmd 16 | -------------------------------------------------------------------------------- /cmd/util.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package cmd 16 | 17 | import ( 18 | "context" 19 | "fmt" 20 | "strings" 21 | "time" 22 | 23 | "github.com/ExpediaGroup/vsync/apperr" 24 | "github.com/ExpediaGroup/vsync/consul" 25 | "github.com/ExpediaGroup/vsync/syncer" 26 | "github.com/ExpediaGroup/vsync/vault" 27 | "github.com/rs/zerolog/log" 28 | "github.com/spf13/viper" 29 | ) 30 | 31 | // getEssentials will return consul and vault after reading required parameters from config 32 | func getEssentials(mode string) (*consul.Client, *vault.Client, error) { 33 | const op = apperr.Op("cmd.getEssentials") 34 | var vaultApprolePath string 35 | var vaultRoleID string 36 | var vaultSecretID string 37 | 38 | // consul client 39 | consulAddress := viper.GetString(mode + "." + "consul.address") 40 | if consulAddress != "" { 41 | log.Debug().Str("consulAddress", consulAddress).Str("mode", mode).Msg("got consul address") 42 | } else { 43 | return nil, nil, apperr.New(fmt.Sprintf("cannot get %s consul address", mode), ErrInitialize, op, apperr.Fatal) 44 | } 45 | 46 | dc := viper.GetString(mode + "." + "consul.dc") 47 | if dc != "" { 48 | log.Debug().Str("dc", dc).Str("mode", mode).Msg("datacenter from config") 49 | } else { 50 | return nil, nil, apperr.New(fmt.Sprintf("cannot get %s datacenter from config", mode), ErrInitialize, op, apperr.Fatal) 51 | } 52 | 53 | c, err := consul.NewClient(consulAddress, dc) 54 | if err != nil { 55 | log.Debug().Err(err).Str("mode", mode).Msg("cannot get consul client") 56 | return nil, nil, apperr.New(fmt.Sprintf("cannot get %s consul client", mode), err, op, apperr.Fatal, ErrInitialize) 57 | } 58 | 59 | // vault client 60 | vaultToken := viper.GetString(mode + "." + "vault.token") 61 | if vaultToken != "" { 62 | log.Debug().Str("mode", mode).Msg("got vault token") 63 | } else { 64 | log.Info().Str("mode", mode).Msg("cannot get vault token") 65 | 66 | vaultApprolePath = viper.GetString(mode + "." + "vault.approle.path") 67 | if vaultApprolePath != "" { 68 | vaultApprolePath = strings.TrimPrefix(vaultApprolePath, "/") 69 | vaultApprolePath = strings.TrimSuffix(vaultApprolePath, "/") 70 | log.Debug().Str("mode", mode).Msg("got vault approle path") 71 | } else { 72 | log.Debug().Str("mode", mode).Msg("selecting default vault approle path approle/") 73 | vaultApprolePath = "approle" 74 | } 75 | 76 | vaultRoleID = viper.GetString(mode + "." + "vault.approle.role_id") 77 | if vaultRoleID != "" { 78 | log.Debug().Str("mode", mode).Msg("got vault role id") 79 | } else { 80 | return nil, nil, apperr.New(fmt.Sprintf("cannot get %s vault role id", mode), ErrInitialize, op, apperr.Fatal) 81 | } 82 | 83 | vaultSecretID = viper.GetString(mode + "." + "vault.approle.secret_id") 84 | if vaultSecretID != "" { 85 | log.Debug().Str("mode", mode).Msg("got vault secret id") 86 | } else { 87 | return nil, nil, apperr.New(fmt.Sprintf("cannot get %s vault secret id", mode), ErrInitialize, op, apperr.Fatal) 88 | } 89 | } 90 | 91 | vaultAddress := viper.GetString(mode + "." + "vault.address") 92 | if vaultAddress != "" { 93 | log.Debug().Str("vaultAddress", vaultAddress).Str("mode", mode).Msg("got vault address") 94 | } else { 95 | return nil, nil, apperr.New(fmt.Sprintf("cannot get %s vault address", mode), ErrInitialize, op, apperr.Fatal) 96 | } 97 | 98 | v, err := vault.NewClient(vaultAddress, vaultToken, vaultApprolePath, vaultRoleID, vaultSecretID) 99 | if err != nil { 100 | log.Debug().Err(err).Str("mode", mode).Msg("cannot get vault client") 101 | return c, nil, apperr.New(fmt.Sprintf("cannot get %s vault client", mode), err, op, apperr.Fatal, ErrInitialize) 102 | } 103 | v.Mode = mode 104 | 105 | return c, v, nil 106 | } 107 | 108 | func saveInfoToConsul(ctx context.Context, 109 | info *syncer.Info, c *consul.Client, syncPath string, 110 | saveCh chan bool, doneCh chan bool, errCh chan error) { 111 | const op = apperr.Op("cmd.saveInfoToConsul") 112 | select { 113 | case <-ctx.Done(): 114 | doneCh <- false 115 | time.Sleep(50 * time.Microsecond) 116 | log.Debug().Str("trigger", "context done").Msg("closed save info to consul") 117 | return 118 | case _, ok := <-saveCh: 119 | if !ok { 120 | doneCh <- false 121 | time.Sleep(50 * time.Microsecond) 122 | log.Debug().Str("trigger", "nil channel").Msg("closed save info to consul") 123 | return 124 | } 125 | log.Debug().Str("path", syncPath).Msg("info to be saved in consul") 126 | 127 | err := syncer.InfoToConsul(c, info, syncPath) 128 | if err != nil { 129 | log.Debug().Err(err).Msg("cannot save info to consul") 130 | errCh <- apperr.New(fmt.Sprintf("cannot save info to consul in path %q", syncPath), ErrInitialize, op, apperr.Fatal) 131 | doneCh <- false 132 | return 133 | } 134 | doneCh <- true 135 | } 136 | } 137 | -------------------------------------------------------------------------------- /cmd/util_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package cmd 16 | 17 | import ( 18 | "testing" 19 | 20 | "github.com/stretchr/testify/assert" 21 | ) 22 | 23 | func TestDummy(t *testing.T) { 24 | assert.Equal(t, "", "") 25 | } 26 | -------------------------------------------------------------------------------- /configs/dest.v1.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dest_v1", 3 | "log": { 4 | "level": "debug", 5 | "type": "console" 6 | }, 7 | "numBuckets": 19, 8 | "origin": { 9 | "dc": "dc1", 10 | "vault": { 11 | "address": "http://127.0.0.1:6200", 12 | "token": "s.g1ew6c5qfpHASxWJsR2YJKXP" 13 | }, 14 | "consul": { 15 | "address": "http://127.0.0.1:6500" 16 | }, 17 | "mounts": [ 18 | "secret/" 19 | ], 20 | "syncPath": "vsync/", 21 | "numWorkers": 5, 22 | "tick": "10s", 23 | "timeout": "10s", 24 | "renewToken": false 25 | }, 26 | "destination": { 27 | "vault": { 28 | "address": "http://127.0.0.1:7200", 29 | "token": "s.09Pyj90P186pQVp60zC1CCN7" 30 | }, 31 | "consul": { 32 | "dc": "dc1", 33 | "address": "http://127.0.0.1:6500" 34 | }, 35 | "mounts": [ 36 | "secret/" 37 | ], 38 | "syncPath": "vsync/", 39 | "numWorkers": 10, 40 | "tick": "10s", 41 | "timeout": "10s" 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /configs/dest.v2.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dest_v2", 3 | "log": { 4 | "level": "debug", 5 | "type": "console" 6 | }, 7 | "numBuckets": 19, 8 | "origin": { 9 | "vault": { 10 | "address": "http://127.0.0.1:6200", 11 | "token": "s.gtxa88U2wgdwWsZ7osxnjztO" 12 | }, 13 | "consul": { 14 | "dc": "dc1", 15 | "address": "http://127.0.0.1:6500" 16 | }, 17 | "mounts": [ 18 | "secret/" 19 | ], 20 | "syncPath": "vsync/", 21 | "numWorkers": 5, 22 | "tick": "10s", 23 | "timeout": "10s", 24 | "renewToken": false 25 | }, 26 | "destination": { 27 | "vault": { 28 | "address": "http://127.0.0.1:7200", 29 | "token": "s.n4s8CjzwsHA5FA65yEXTbdZK" 30 | }, 31 | "consul": { 32 | "dc": "dc2", 33 | "address": "http://127.0.0.1:7500" 34 | }, 35 | "mounts": [ 36 | "secret/" 37 | ], 38 | "syncPath": "vsync/", 39 | "numWorkers": 10, 40 | "tick": "10s", 41 | "timeout": "10s", 42 | "transforms": [ 43 | { 44 | "name": "v1->v2", 45 | "from": "(?Psecret)/(?P((meta)?data))?/(?Pmultipaas)/(?P(dev|test|stage|prod))?/?(?P\\w+)?/?", 46 | "to": "multipaas/meta/env/app/secrets" 47 | } 48 | ] 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /configs/dest.v3.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dest_v3", 3 | "log": { 4 | "level": "debug", 5 | "type": "console" 6 | }, 7 | "numBuckets": 19, 8 | "origin": { 9 | "vault": { 10 | "address": "http://127.0.0.1:6200", 11 | "approle": { 12 | "role_id": "36cd0b35-a87b-24a9-06f6-e19ba6c75588", 13 | "secret_id": "5e48f6de-5a07-e8a2-51b5-2dd5b5097dc7" 14 | } 15 | }, 16 | "consul": { 17 | "dc": "dc1", 18 | "address": "http://127.0.0.1:6500" 19 | }, 20 | "mounts": ["secret/"], 21 | "syncPath": "vsync/", 22 | "numWorkers": 5, 23 | "tick": "10s", 24 | "timeout": "10s", 25 | "renewToken": true 26 | }, 27 | "destination": { 28 | "vault": { 29 | "address": "http://127.0.0.1:7200", 30 | "approle": { 31 | "role_id": "8c69f0d3-ef13-cc48-347f-2fdb3da80a43", 32 | "secret_id": "d131f5a7-c0f5-870c-c224-3584e23c9ee6" 33 | } 34 | }, 35 | "consul": { 36 | "dc": "dc2", 37 | "address": "http://127.0.0.1:7500" 38 | }, 39 | "mounts": ["secret/"], 40 | "syncPath": "vsync/", 41 | "numWorkers": 10, 42 | "tick": "10s", 43 | "timeout": "10s" 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /configs/dest_loop.v2.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "dest_loop_v2", 3 | "log": { 4 | "level": "debug", 5 | "type": "console" 6 | }, 7 | "numBuckets": 19, 8 | "origin": { 9 | "vault": { 10 | "address": "http://127.0.0.1:6200", 11 | "token": "s.MDLmK6gOVLL33bB5TkdnJPOB" 12 | }, 13 | "consul": { 14 | "dc": "dc1", 15 | "address": "http://127.0.0.1:6500" 16 | }, 17 | "mounts": [ 18 | "secret/" 19 | ], 20 | "syncPath": "vsync/", 21 | "numWorkers": 5, 22 | "tick": "10s", 23 | "timeout": "10s", 24 | "renewToken": false 25 | }, 26 | "destination": { 27 | "vault": { 28 | "address": "http://127.0.0.1:6200", 29 | "token": "s.MDLmK6gOVLL33bB5TkdnJPOB" 30 | }, 31 | "consul": { 32 | "dc": "dc2", 33 | "address": "http://127.0.0.1:6500" 34 | }, 35 | "mounts": [ 36 | "secret/" 37 | ], 38 | "syncPath": "vsync/", 39 | "numWorkers": 10, 40 | "tick": "10s", 41 | "timeout": "10s", 42 | "transforms": [ 43 | { 44 | "name": "v1->v2", 45 | "from": "(?Psecret)/(?P((meta)?data))?/(?Pmultipaas)/(?P(dev|test|stage|prod))?/?(?P\\w+)?/?", 46 | "to": "multipaas/meta/env/app/secrets" 47 | } 48 | ] 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /configs/origin.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "origin", 3 | "log": { 4 | "level": "debug", 5 | "type": "console" 6 | }, 7 | "numBuckets": 19, 8 | "origin": { 9 | "vault": { 10 | "address": "http://127.0.0.1:6200", 11 | "token": "s.gtxa88U2wgdwWsZ7osxnjztO", 12 | "approle": { 13 | "role_id": "36cd0b35-a87b-24a9-06f6-e19ba6c75588", 14 | "secret_id": "5e48f6de-5a07-e8a2-51b5-2dd5b5097dc7" 15 | } 16 | }, 17 | "consul": { 18 | "dc": "dc1", 19 | "address": "http://127.0.0.1:6500" 20 | }, 21 | "mounts": [ 22 | "secret/" 23 | ], 24 | "syncPath": "vsync/", 25 | "numWorkers": 5, 26 | "tick": "10s", 27 | "timeout": "10s" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /consul/check.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package consul 16 | 17 | import ( 18 | "fmt" 19 | 20 | "github.com/ExpediaGroup/vsync/apperr" 21 | uuid "github.com/gofrs/uuid" 22 | "github.com/hashicorp/consul/api" 23 | "github.com/rs/zerolog/log" 24 | ) 25 | 26 | // TODO: this needs to be modified, like vault checks. see pull request #28 https://github.com/ExpediaGroup/vsync/pull/28 27 | const ( 28 | ReadCheck = 1 << iota 29 | WriteCheck 30 | ListCheck 31 | DeleteCheck 32 | StdCheck = ReadCheck | WriteCheck | ListCheck | DeleteCheck 33 | ) 34 | 35 | // SyncPathChecks checks if path not present, else create along with permissions to create, read, list, delete 36 | func (c *Client) SyncPathChecks(path string, checks int) error { 37 | const op = apperr.Op("consul.SyncPathChecks") 38 | 39 | // create 40 | 41 | id, _ := uuid.NewV4() 42 | keyPath := path + "vsyncChecks/" + id.String() 43 | key := &api.KVPair{ 44 | Key: keyPath, 45 | Value: []byte(id.String()), 46 | } 47 | 48 | if checks&(WriteCheck) != 0 { 49 | _, err := c.KV().Put(key, nil) 50 | if err != nil { 51 | log.Debug().Err(err).Str("key", keyPath).Msg("cannot create kv for key") 52 | return apperr.New(fmt.Sprintf("connot create dummy kv in path %q", keyPath), err, op, ErrInvalidToken) 53 | } 54 | log.Debug().Str("path", path).Msg("sync path is writable") 55 | } 56 | 57 | // list 58 | if checks&(WriteCheck|ListCheck) != 0 { 59 | kvs, _, err := c.KV().List(path, nil) 60 | if err != nil { 61 | log.Debug().Err(err).Str("path", path).Msg("cannot check the above created kv in path") 62 | return apperr.New(fmt.Sprintf("connot list kvs from consul path %q", path), err, op, ErrInvalidToken) 63 | } 64 | if len(kvs) > 0 { 65 | log.Debug().Str("path", path).Msg("sync path is listable") 66 | } else { 67 | log.Debug().Str("path", path).Msg("cannot find the above created kv, cannot list from kv") 68 | return apperr.New(fmt.Sprintf("connot find dummy kv in path %q", path), ErrInvalidToken, op) 69 | } 70 | } 71 | 72 | // get 73 | if checks&(WriteCheck|ListCheck|ReadCheck) != 0 { 74 | kv, _, err := c.KV().Get(keyPath, nil) 75 | if err != nil { 76 | log.Debug().Err(err).Str("key", keyPath).Msg("cannot get the above created kv in path") 77 | return apperr.New(fmt.Sprintf("connot find dummy kv in path %q", keyPath), err, op, ErrInvalidToken) 78 | } 79 | if kv.Key == key.Key && string(kv.Value) == id.String() { 80 | log.Debug().Str("path", path).Msg("sync path is readable") 81 | } else { 82 | log.Debug().Str("path", path).Msg("cannot find the above created kv, cannot read to kv") 83 | return apperr.New(fmt.Sprintf("connot read the created dummy kv in path %q", keyPath), ErrInvalidToken, op) 84 | } 85 | } 86 | 87 | // delete 88 | if checks&(WriteCheck|ListCheck|ReadCheck|DeleteCheck) != 0 { 89 | _, err := c.KV().Delete(keyPath, nil) 90 | if err != nil { 91 | log.Debug().Err(err).Str("key", keyPath).Msg("cannot delete kv in path") 92 | return apperr.New(fmt.Sprintf("connot delete the dummy kv in path %q", keyPath), err, op, ErrInvalidToken) 93 | } 94 | 95 | // read again 96 | kv, _, err := c.KV().Get(keyPath, nil) 97 | if err != nil { 98 | log.Debug().Err(err).Str("key", keyPath).Msg("cannot get the above created kv in path") 99 | return apperr.New(fmt.Sprintf("connot find dummy kv in path %q", keyPath), err, op, ErrInvalidToken) 100 | } 101 | if kv == nil { 102 | log.Debug().Str("path", path).Msg("sync path is deletable") 103 | } else { 104 | log.Debug().Str("path", keyPath).Msg("could find the above deleted kv, cannot delete from kv") 105 | return apperr.New(fmt.Sprintf("connot delete kv in path %q", keyPath), ErrInvalidToken, op) 106 | } 107 | } 108 | 109 | return nil 110 | } 111 | 112 | func (c *Client) IsSyncPathInitialized(path string) (bool, error) { 113 | const op = apperr.Op("consul.IsSyncPathInitialized") 114 | kvs, _, err := c.KV().List(path, nil) 115 | if err != nil { 116 | log.Debug().Err(err).Str("path", path).Msg("cannot check if path is already present in consul") 117 | return false, apperr.New(fmt.Sprintf("connot check if consul kv exists %q", path), err, op, apperr.Fatal, ErrInitialize) 118 | } 119 | 120 | if len(kvs) == 0 { 121 | return false, nil 122 | } 123 | return true, nil 124 | } 125 | -------------------------------------------------------------------------------- /consul/check_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package consul 16 | 17 | import ( 18 | "testing" 19 | 20 | "github.com/stretchr/testify/assert" 21 | ) 22 | 23 | func TestDummy(t *testing.T) { 24 | assert.Equal(t, "", "") 25 | } 26 | -------------------------------------------------------------------------------- /consul/consul.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package consul 16 | 17 | import ( 18 | "fmt" 19 | 20 | "github.com/hashicorp/consul/api" 21 | "github.com/rs/zerolog/log" 22 | "github.com/ExpediaGroup/vsync/apperr" 23 | ) 24 | 25 | var ErrInitialize = fmt.Errorf("cannot initialize consul client") 26 | var ErrInvalidToken = fmt.Errorf("check token permission") 27 | var ErrConnection = fmt.Errorf("consul connection refused") 28 | var ErrInvalidPath = fmt.Errorf("invalid consul path") 29 | var ErrCastPathData = fmt.Errorf("type cast errors on data from path") 30 | 31 | type Client struct { 32 | *api.Client 33 | Dc string 34 | Address string 35 | } 36 | 37 | func NewClient(address string, dc string) (*Client, error) { 38 | const op = apperr.Op("consul.NewClient") 39 | config := api.DefaultConfig() 40 | if address != "" { 41 | config.Address = address 42 | } 43 | 44 | client, err := api.NewClient(config) 45 | if err != nil { 46 | return nil, apperr.New(fmt.Sprintf("cannot create consul client, address %q", address), err, op, apperr.Fatal, ErrInitialize) 47 | } 48 | 49 | _, err = client.Agent().Self() 50 | if err != nil { 51 | log.Debug().Interface("config", config).Msg("consul config used for connecting to client") 52 | return nil, apperr.New(fmt.Sprintf("cannot connect to consul %q", address), err, op, apperr.Fatal, ErrConnection) 53 | } 54 | 55 | return &Client{ 56 | client, 57 | dc, 58 | address, 59 | }, nil 60 | } 61 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ExpediaGroup/vsync 2 | 3 | go 1.12 4 | 5 | require ( 6 | github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect 7 | github.com/gofrs/uuid v3.2.0+incompatible 8 | github.com/hashicorp/consul/api v1.1.0 9 | github.com/hashicorp/go-msgpack v0.5.5 // indirect 10 | github.com/hashicorp/memberlist v0.1.4 // indirect 11 | github.com/hashicorp/vault/api v1.0.4 12 | github.com/rs/cors v1.7.0 // indirect 13 | github.com/rs/xhandler v0.0.0-20170707052532-1eb70cf1520d // indirect 14 | github.com/rs/xstats v0.0.0-20170813190920-c67367528e16 15 | github.com/rs/zerolog v1.14.3 16 | github.com/spf13/cobra v0.0.5 17 | github.com/spf13/viper v1.4.0 18 | github.com/stretchr/testify v1.3.0 19 | golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4 // indirect 20 | golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect 21 | golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb // indirect 22 | golang.org/x/text v0.3.2 // indirect 23 | ) 24 | -------------------------------------------------------------------------------- /goreleaser.yml: -------------------------------------------------------------------------------- 1 | before: 2 | hooks: 3 | - go mod download 4 | - go generate ./... 5 | 6 | builds: 7 | - 8 | main: ./main.go 9 | ldflags: 10 | - "-X github.com/ExpediaGroup/vsync/cmd.buildCommit={{ .Env.BUILD_COMMIT }} -X github.com/ExpediaGroup/vsync/cmd.buildTime={{ .Env.BUILD_TIME }} -X github.com/ExpediaGroup/vsync/cmd.buildVersion={{ .Env.BUILD_VERSION }}" 11 | env: 12 | - CGO_ENABLED=0 13 | goos: 14 | - linux 15 | goarch: 16 | - amd64 17 | 18 | archives: 19 | - 20 | name_template: "{{ .ProjectName }}_{{ .Env.BUILD_VERSION }}_{{ .Os }}" 21 | 22 | snapshot: 23 | name_template: "{{ .Env.BUILD_VERSION }}" 24 | 25 | checksum: 26 | name_template: "{{ .ProjectName }}_{{ .Env.BUILD_VERSION }}_checksums.txt" 27 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import ( 18 | "os" 19 | "time" 20 | 21 | "github.com/ExpediaGroup/vsync/cmd" 22 | "github.com/rs/zerolog" 23 | "github.com/rs/zerolog/log" 24 | ) 25 | 26 | func main() { 27 | 28 | err := cmd.Execute() 29 | if err != nil { 30 | errLog := log.Output(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.RFC3339}).With().Logger() 31 | 32 | errLog.Error().Msg(err.Error()) 33 | 34 | // wait for the telemetry flush interval to timout 35 | // TODO: make it more effecient 36 | time.Sleep(80 * time.Second) 37 | os.Exit(1) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /scripts/destinationV_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "storage": 3 | { 4 | "consul": 5 | { 6 | "address": "destinationC:8500", 7 | "path": "multipaas-vault/" 8 | } 9 | }, 10 | "listener": 11 | { 12 | "tcp": 13 | { 14 | "address": "0.0.0.0:8200", 15 | "tls_disable": 1 16 | } 17 | }, 18 | "ui": true, 19 | "log_level": "DEBUG" 20 | } -------------------------------------------------------------------------------- /scripts/local_bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function vault_unseal() { 4 | sleep 10 5 | INIT_DATA=$(vault operator init --key-shares=1 --key-threshold=1 --format json) 6 | UNSEAL_KEY=$(echo "${INIT_DATA}" | jq -r '.unseal_keys_b64[0]') 7 | ROOT_TOKEN=$(echo "${INIT_DATA}" | jq -r '.root_token') 8 | vault operator unseal ${UNSEAL_KEY} > /dev/null 9 | echo ${ROOT_TOKEN} 10 | } 11 | 12 | function origin_token() { 13 | echo ' 14 | # TODO: remove create update delete when vsync code is not checking for all capabilities of a token 15 | path "secret/*" { 16 | capabilities = ["read","list"] 17 | } 18 | path "multipaas/*" { 19 | capabilities = ["read","list"] 20 | } 21 | path "sys/mounts" { 22 | capabilities = ["read","list"] 23 | } 24 | ' > /tmp/vsync_origin 25 | vault policy write vsync_origin /tmp/vsync_origin 26 | vault token create --policy vsync_origin --ttl 2h 27 | echo "Copy the token and place in config file" 28 | } 29 | 30 | function destination_token() { 31 | echo ' 32 | # TODO: remove create update delete when vsync code is not checking for all capabilities of a token 33 | path "secret/*" { 34 | capabilities = ["create","update","read","list","delete"] 35 | } 36 | path "multipaas/*" { 37 | capabilities = ["create","update","read","list","delete"] 38 | } 39 | path "sys/mounts" { 40 | capabilities = ["read","list"] 41 | } 42 | ' > /tmp/vsync_destination 43 | vault policy write vsync_destination /tmp/vsync_destination 44 | vault token create --policy vsync_destination --ttl 2h 45 | echo "Copy the token and place in config file" 46 | } 47 | 48 | 49 | # destroy 50 | set +e 51 | docker stop originC && docker rm originC 52 | docker stop agent1 && docker rm agent1 53 | docker stop originV && docker rm originV 54 | 55 | docker stop destinationC && docker rm destinationC 56 | docker stop agent2 && docker rm agent2 57 | docker stop destinationV && docker rm destinationV 58 | docker network rm vsync 59 | set -e 60 | # create 61 | docker network create vsync 62 | 63 | # origin 64 | docker run -d --name originC --network vsync -p 6500:8500 -p 6600:8600 consul agent --node originC --server --ui --bootstrap --client 0.0.0.0 --datacenter dc1 65 | docker run -d --name agent1 --network vsync consul agent --node agent1 --retry-join originC 66 | docker run -d --name originV --cap-add IPC_LOCK --volume "${PWD}"/scripts/originV_config.json:/tmp/originV_config.json --network vsync -p 6200:8200 vault server --config /tmp/originV_config.json 67 | export VAULT_ADDR=http://localhost:6200 68 | origin_ROOT_TOKEN=$(vault_unseal) 69 | echo "root token for http://localhost:6200 : ${origin_ROOT_TOKEN}" 70 | vault login ${origin_ROOT_TOKEN} > /dev/null 71 | vault audit enable file file_path=/vault/logs/vault_audit.log 72 | vault secrets enable -path=multipaas --version 2 kv 73 | vault secrets enable -path=secret --version 2 kv 74 | vault auth enable approle 75 | vault write auth/approle/role/origin token_policies=vsync_origin 76 | echo ======= Origin Approle Creds ======== 77 | echo $(vault read auth/approle/role/origin/role-id) 78 | echo $(vault write -f auth/approle/role/origin/secret-id) 79 | echo ===================================== 80 | origin_token 81 | 82 | # destination 83 | docker run -d --name destinationC --network vsync -p 7500:8500 -p 7600:8600 consul agent --node destinationC --server --ui --bootstrap --retry-join-wan originC --client 0.0.0.0 --datacenter dc2 84 | docker run -d --name agent2 --network vsync consul agent --node agent2 --retry-join destinationC 85 | docker run -d --name destinationV --cap-add IPC_LOCK --volume "${PWD}"/scripts/destinationV_config.json:/tmp/destinationV_config.json --network vsync -p 7200:8200 vault server --config /tmp/destinationV_config.json 86 | export VAULT_ADDR=http://localhost:7200 87 | destination_ROOT_TOKEN=$(vault_unseal) 88 | echo "root token for http://localhost:7200 : ${destination_ROOT_TOKEN}" 89 | vault login ${destination_ROOT_TOKEN} > /dev/null 90 | vault audit enable file file_path=/vault/logs/vault_audit.log 91 | vault secrets enable -path=multipaas --version 2 kv 92 | vault secrets enable -path=secret --version 2 kv 93 | vault auth enable approle 94 | vault write auth/approle/role/destination token_policies=vsync_destination 95 | echo ======= Destination Approle Creds ======== 96 | echo $(vault read auth/approle/role/destination/role-id) 97 | echo $(vault write -f auth/approle/role/destination/secret-id) 98 | echo ===================================== 99 | destination_token 100 | 101 | # populate data 102 | # install: brew install parallel 103 | # update seq 2 -> seq 10000 for load testing 104 | seq 2 | parallel --eta -j+0 "curl -Ssl -H 'X-Vault-Token: ${origin_ROOT_TOKEN}' -H 'Content-Type: application/json' http://127.0.0.1:6200/v1/secret/data/multipaas/test/{} -X POST -d '{\"data\":{\"chumma\":\"bar\"}}' > /dev/null" -------------------------------------------------------------------------------- /scripts/originV_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "storage": 3 | { 4 | "consul": 5 | { 6 | "address": "originC:8500", 7 | "path": "multipaas-vault/" 8 | } 9 | }, 10 | "listener": 11 | { 12 | "tcp": 13 | { 14 | "address": "0.0.0.0:8200", 15 | "tls_disable": 1 16 | } 17 | }, 18 | "ui": true, 19 | "log_level": "DEBUG" 20 | } -------------------------------------------------------------------------------- /syncer/destination.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package syncer 16 | 17 | import ( 18 | "context" 19 | "fmt" 20 | "sync" 21 | 22 | "github.com/ExpediaGroup/vsync/apperr" 23 | "github.com/ExpediaGroup/vsync/transformer" 24 | "github.com/ExpediaGroup/vsync/vault" 25 | "github.com/rs/zerolog/log" 26 | ) 27 | 28 | func FetchAndSave(ctx context.Context, 29 | wg *sync.WaitGroup, workerId int, 30 | originVault *vault.Client, destinationVault *vault.Client, 31 | info *Info, pack transformer.Pack, 32 | inTaskCh chan Task, errCh chan error) { 33 | const op = apperr.Op("syncer.FetchAndSave") 34 | for { 35 | select { 36 | case <-ctx.Done(): 37 | log.Debug().Str("trigger", "context done").Int("workerId", workerId).Msg("closed fetch and save worker") 38 | wg.Done() 39 | return 40 | case task, ok := <-inTaskCh: 41 | if !ok { 42 | log.Debug().Str("trigger", "nil channel").Int("workerId", workerId).Msg("closed fetch and save worker") 43 | wg.Done() 44 | return 45 | } 46 | log.Debug().Str("path", task.Path).Str("operation", task.Op).Int("workerId", workerId).Msg("task received by fetch and save worker") 47 | 48 | switch task.Op { 49 | case "add", "update": 50 | // fetch from origin 51 | originSecret, err := originVault.Logical().Read(task.Path) 52 | if err != nil { 53 | log.Debug().Err(err).Str("path", task.Path).Str("operation", task.Op).Int("workerId", workerId).Msg("error while fetching a path from origin vault") 54 | errCh <- apperr.New(fmt.Sprintf("worker %q performed %q operation, cannot fetch path %q from origin vault", workerId, task.Op, task.Path), err, op, ErrInvalidPath) 55 | } 56 | 57 | // transform 58 | newPath, ok := pack.Transform(task.Path) 59 | if ok { 60 | log.Info().Str("oldPath", task.Path).Str("newPath", newPath).Msg("transformed secret path to be added or updated") 61 | } else { 62 | log.Error().Str("path", task.Path).Str("operation", task.Op).Int("workerId", workerId).Msg("cannot transforming the path") 63 | errCh <- apperr.New(fmt.Sprintf("worker %q performed %q operation, cannot transform path %q", workerId, task.Op, task.Path), err, op, ErrTransform) 64 | } 65 | 66 | // save to destination 67 | _, err = destinationVault.Logical().Write(newPath, originSecret.Data) 68 | if err != nil { 69 | log.Debug().Err(err).Str("path", task.Path).Str("operation", task.Op).Int("workerId", workerId).Msg("error while saving a path to destination vault") 70 | errCh <- apperr.New(fmt.Sprintf("worker %q performed %q operation, cannot save path %q to destination vault", workerId, task.Op, task.Path), err, op, ErrInvalidPath) 71 | } else { 72 | // save info with origin path and not transformed path 73 | id, err := info.Put(task.Path, task.Insight) 74 | if err != nil { 75 | log.Debug().Err(err).Str("path", task.Path).Str("operation", task.Op).Int("bucketId", id).Int("workerId", workerId).Msg("cannot save insight in bucket") 76 | errCh <- apperr.New(fmt.Sprintf("worker %q performed %q operation, cannot save path %q insight to bucket %q", workerId, task.Op, task.Path, id), err, op, ErrInvalidBucket) 77 | } 78 | } 79 | 80 | case "delete": 81 | newPath, ok := pack.Transform(task.Path) 82 | if ok { 83 | log.Info().Str("oldPath", task.Path).Str("newPath", newPath).Msg("transformed secret path to be deleted") 84 | } else { 85 | log.Debug().Str("path", task.Path).Str("operation", task.Op).Int("workerId", workerId).Msg("cannot transforming the path") 86 | errCh <- apperr.New(fmt.Sprintf("worker %q performed %q operation, cannot transform path %q", workerId, task.Op, task.Path), ErrTransform, op) 87 | } 88 | 89 | if IgnoreDeletes { 90 | log.Info().Str("path", newPath).Msg("ignore deletes is true, so not deleting this path. Not saving this in destination sync info too") 91 | continue 92 | } 93 | 94 | _, err := destinationVault.Logical().Delete(newPath) 95 | if err != nil { 96 | log.Debug().Err(err).Str("path", task.Path).Str("operation", task.Op).Int("workerId", workerId).Msg("error while saving a path to destination vault") 97 | errCh <- apperr.New(fmt.Sprintf("worker %q performed %q operation, cannot save path %q to destination vault", workerId, task.Op, task.Path), err, op, ErrInvalidPath) 98 | } else { 99 | id, err := info.Delete(task.Path) 100 | if err != nil { 101 | log.Debug().Err(err).Str("path", task.Path).Str("operation", task.Op).Int("bucketId", id).Int("workerId", workerId).Msg("cannot delete insight in bucket") 102 | errCh <- apperr.New(fmt.Sprintf("worker %q performed %q operation, cannot delete path %q insight in bucket %q", workerId, task.Op, task.Path, id), err, op, ErrInvalidBucket) 103 | } 104 | } 105 | default: 106 | log.Debug().Str("path", task.Path).Str("operation", task.Op).Int("workerId", workerId).Msg("unknown operation for fetch and save worker on path") 107 | errCh <- apperr.New(fmt.Sprintf("worker %q performed %q operation, unknown op for fetch and save on path %q", workerId, task.Op, task.Path), ErrUnknownOp, op, apperr.Fatal) 108 | } 109 | } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /syncer/info.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package syncer 16 | 17 | import ( 18 | "encoding/binary" 19 | "encoding/hex" 20 | "fmt" 21 | "hash" 22 | "sync" 23 | 24 | "github.com/ExpediaGroup/vsync/apperr" 25 | "github.com/rs/zerolog/log" 26 | ) 27 | 28 | type Info struct { 29 | index []string 30 | buckets map[int]Bucket 31 | rw sync.RWMutex 32 | hasher hash.Hash 33 | } 34 | 35 | type Bucket map[string]Insight 36 | 37 | type Insight struct { 38 | Version int64 `json:"version"` 39 | UpdateTime string `json:"updateTime"` 40 | Type string `json:"type"` 41 | } 42 | 43 | func NewInfo(size int, h hash.Hash) (*Info, error) { 44 | const op = apperr.Op("syncer.NewInfo") 45 | if size < 0 { 46 | return nil, apperr.New(fmt.Sprintf("cannot initialize info with negative number of buckets %q", size), ErrInitialize, op, apperr.Fatal) 47 | } 48 | 49 | i := &Info{ 50 | index: make([]string, 0, size), 51 | buckets: map[int]Bucket{}, 52 | rw: sync.RWMutex{}, 53 | hasher: h, 54 | } 55 | 56 | i.hasher.Reset() 57 | _, err := i.hasher.Write([]byte(fmt.Sprintf("%v", Bucket{}))) 58 | if err != nil { 59 | return i, apperr.New(fmt.Sprintf("cannot hash dummy bucket"), err, op, apperr.Fatal, ErrInitialize) 60 | } 61 | hash := hex.EncodeToString((i.hasher.Sum(nil))) 62 | 63 | for j := 0; j < size; j++ { 64 | i.index = append(i.index, hash) 65 | i.buckets[j] = Bucket{} 66 | } 67 | 68 | return i, nil 69 | } 70 | 71 | func (i *Info) generateBucketId(path string) (int, error) { 72 | i.rw.Lock() 73 | defer i.rw.Unlock() 74 | 75 | i.hasher.Reset() 76 | _, err := i.hasher.Write([]byte(path)) 77 | if err != nil { 78 | return 0, err 79 | } 80 | 81 | pathB := i.hasher.Sum(nil) 82 | pathI := binary.BigEndian.Uint16(pathB[:]) 83 | return int(pathI % uint16(len(i.index))), nil 84 | } 85 | 86 | func (i *Info) Put(path string, insight Insight) (int, error) { 87 | const op = apperr.Op("syncer.Info.Put") 88 | 89 | // bucket id 90 | id, err := i.generateBucketId(path) 91 | if err != nil { 92 | return 0, apperr.New(fmt.Sprintf("cannot generate bucket id for path %q", path), err, op, ErrInvalidPath) 93 | } 94 | 95 | i.rw.Lock() 96 | defer i.rw.Unlock() 97 | 98 | // bucket content 99 | bucket, ok := i.buckets[id] 100 | if !ok { 101 | log.Debug().Int("bucketId", id).Int("lenBuckets", len(i.buckets)).Msg("bucket not found") 102 | return 0, apperr.New(fmt.Sprintf("cannot find bucket %q from %q buckets", id, len(i.buckets)), ErrInvalidBucket, op) 103 | } 104 | // } else { 105 | // log.Debug().Int("bucketId", id).Str("path", path).Interface("old", bucket[path]).Interface("new", insight).Msg("old insight replaced with new insight") 106 | // } 107 | 108 | bucket[path] = insight 109 | 110 | return id, nil 111 | } 112 | 113 | func (i *Info) Delete(path string) (int, error) { 114 | const op = apperr.Op("syncer.Delete") 115 | 116 | // bucket id 117 | id, err := i.generateBucketId(path) 118 | if err != nil { 119 | return 0, apperr.New(fmt.Sprintf("cannot generate bucket id for path %q", path), err, op, ErrInvalidPath) 120 | } 121 | 122 | i.rw.Lock() 123 | defer i.rw.Unlock() 124 | 125 | // bucket content 126 | if _, ok := i.buckets[id]; !ok { 127 | log.Debug().Int("bucketId", id).Int("lenBuckets", len(i.buckets)).Msg("bucket not found") 128 | return 0, apperr.New(fmt.Sprintf("cannot find bucket %q from %q buckets", id, len(i.buckets)), ErrInvalidBucket, op) 129 | } 130 | 131 | delete(i.buckets[id], path) 132 | 133 | return id, nil 134 | } 135 | 136 | func (i *Info) Reindex() error { 137 | const op = apperr.Op("syncer.Reindex") 138 | i.rw.Lock() 139 | defer i.rw.Unlock() 140 | 141 | for id := 0; id < len(i.index); id++ { 142 | i.hasher.Reset() 143 | content := fmt.Sprint(i.buckets[id]) 144 | _, err := i.hasher.Write([]byte(content)) 145 | if err != nil { 146 | log.Debug().Int("bucketId", id).Interface("content", content).Msg("cannot hash contents") 147 | return apperr.New(fmt.Sprintf("cannot hash contents for bucket %q", id), err, op, ErrInvalidInsight) 148 | } 149 | contentHash := hex.EncodeToString((i.hasher.Sum(nil))) 150 | i.index[id] = contentHash 151 | log.Debug().Str("contentHash", fmt.Sprint(contentHash)).Int("bucketId", id).Msg("index updated") 152 | } 153 | return nil 154 | } 155 | 156 | func (i *Info) GetIndex() ([]string, error) { 157 | const op = apperr.Op("syncer.GetIndex") 158 | i.rw.RLock() 159 | defer i.rw.RUnlock() 160 | 161 | if len(i.index) != len(i.buckets) { 162 | return []string{}, apperr.New(fmt.Sprintf("corrupted sync info %q index with %q buckets", len(i.index), len(i.buckets)), ErrCorrupted, op, apperr.Fatal) 163 | } 164 | 165 | return i.index, nil 166 | } 167 | 168 | func (i *Info) GetBucket(id int) (Bucket, error) { 169 | const op = apperr.Op("syncer.GetBucket") 170 | i.rw.RLock() 171 | defer i.rw.RUnlock() 172 | 173 | if len(i.index) != len(i.buckets) { 174 | return Bucket{}, apperr.New(fmt.Sprintf("corrupted sync info %q index with %q buckets", len(i.index), len(i.buckets)), ErrCorrupted, op, apperr.Fatal) 175 | } 176 | 177 | if id > len(i.buckets) { 178 | return Bucket{}, apperr.New(fmt.Sprintf("cannot find bucket %q in %q buckets", id, len(i.buckets)), ErrInvalidBucket, op) 179 | } 180 | 181 | return i.buckets[id], nil 182 | } 183 | -------------------------------------------------------------------------------- /syncer/info_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package syncer 16 | 17 | import ( 18 | "crypto/sha256" 19 | "fmt" 20 | "math" 21 | "math/rand" 22 | "testing" 23 | "time" 24 | 25 | "github.com/stretchr/testify/assert" 26 | "github.com/stretchr/testify/require" 27 | ) 28 | 29 | func TestGenerateBucketID(t *testing.T) { 30 | buckets := map[int]float64{} 31 | numBuckets := 19 32 | numPaths := 100000 33 | hasher := sha256.New() 34 | info, err := NewInfo(numBuckets, hasher) 35 | require.NoError(t, err) 36 | 37 | rand.Seed(time.Now().UnixNano()) 38 | for i := 0; i < numPaths; i++ { 39 | id, err := info.generateBucketId(fmt.Sprint(i)) 40 | assert.NoError(t, err) 41 | buckets[id] = buckets[id] + 1 42 | } 43 | sum := 0.0 44 | for _, v := range buckets { 45 | sum = sum + v 46 | } 47 | mean := sum / float64(numBuckets) 48 | 49 | // mean 50 | assert.Equal(t, numPaths/numBuckets, int(mean), "average must be equal") 51 | 52 | // variance 53 | sd := 0.0 54 | for j := 0; j < numBuckets; j++ { 55 | sd += math.Pow(buckets[j]-mean, 2) 56 | } 57 | variance := math.Sqrt(sd / float64(numPaths)) 58 | t.Log(buckets) 59 | assert.InDelta(t, 0.7, variance, 0.2, "standard deviation OR spread of filled buckets is not within the limits of delta") 60 | } 61 | -------------------------------------------------------------------------------- /syncer/origin.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package syncer 16 | 17 | import ( 18 | "context" 19 | "encoding/json" 20 | "fmt" 21 | "strings" 22 | "sync" 23 | 24 | "github.com/ExpediaGroup/vsync/apperr" 25 | "github.com/ExpediaGroup/vsync/vault" 26 | "github.com/hashicorp/vault/api" 27 | "github.com/rs/zerolog/log" 28 | ) 29 | 30 | type KVV2Meta struct { 31 | CurrentVersion int64 32 | UpdatedTime string 33 | CurrentDeletionTime string 34 | Destroyed bool 35 | } 36 | 37 | func GenerateInsight(ctx context.Context, 38 | wg *sync.WaitGroup, workerId int, 39 | v *vault.Client, i *Info, 40 | inPathCh chan string, errCh chan error) { 41 | const op = apperr.Op("syncer.GenerateInsight") 42 | 43 | for { 44 | select { 45 | case <-ctx.Done(): 46 | log.Debug().Str("trigger", "context done").Int("workerId", workerId).Msg("closed generate insight") 47 | wg.Done() 48 | return 49 | case path, ok := <-inPathCh: 50 | if !ok { 51 | log.Debug().Str("trigger", "nil channel").Int("workerId", workerId).Msg("closed generate insight") 52 | wg.Done() 53 | return 54 | } 55 | log.Debug().Str("path", path).Int("workerId", workerId).Msg("path received for generating sync info") 56 | 57 | secret, err := v.Logical().Read(path) 58 | if err != nil { 59 | log.Debug().Err(err).Str("path", path).Int("workerId", workerId).Msg("cannot read metadata for path") 60 | errCh <- apperr.New(fmt.Sprintf("cannot read metadata for path %q", path), err, op, ErrInvalidPath) 61 | continue 62 | } 63 | 64 | meta, err := getKVV2Meta(secret) 65 | if err != nil { 66 | log.Debug().Err(err).Str("path", path).Int("workerId", workerId).Msg("cannot get insight of metadata for path") 67 | errCh <- apperr.New(fmt.Sprintf("cannot gather meta info for path %q", path), err, op, ErrInvalidMeta) 68 | continue 69 | } 70 | 71 | if meta.CurrentDeletionTime != "" || meta.Destroyed { 72 | // this print will bloat the log because end users will not delete the metadata and we keep track of it that it was deleted 73 | log.Debug().Str("path", path).Int("workerId", workerId).Str("deletionTime", meta.CurrentDeletionTime).Msg("current version of path was deleted") 74 | continue 75 | } 76 | 77 | path = strings.Replace(path, "/metadata", "/data", 1) 78 | 79 | id, err := i.Put(path, Insight{ 80 | Type: "kvV2", 81 | Version: meta.CurrentVersion, 82 | UpdateTime: meta.UpdatedTime, 83 | }) 84 | if err != nil { 85 | log.Debug().Err(err).Str("path", path).Int("workerId", workerId).Msg("cannot save insight in info") 86 | errCh <- apperr.New(fmt.Sprintf("cannot save insight for path %q", path), err, op, ErrInvalidMeta) 87 | } 88 | log.Debug().Str("path", path).Int("workerId", workerId).Int("bucketId", id).Msg("saved path in info under bucket") 89 | } 90 | } 91 | } 92 | 93 | // getMetaInsight will get insights given a secret from vault. 94 | // It will try to recover from panic because we must not stop the sync for 1 bad secret 95 | // use named return values so that we can recover from panic and convert to error 96 | // 97 | // expected format for secret 98 | // &{be80b194-cc6c-76ca-66a1-4f48915a98ac 0 false 99 | // map[cas_required:false created_time:2019-09-15T00:58:20.680948367Z current_version:3 delete_version_after:0s max_versions:0 old_version:0 updated_time:2019-09-15T01:10:28.275769286Z 100 | // versions:map[ 101 | // 1:map[created_time:2019-09-15T00:58:20.680948367Z deletion_time:2019-09-15T00:58:20.693039991Z destroyed:false] 102 | // 2:map[created_time:2019-09-15T00:58:42.568394811Z deletion_time:2019-09-15T00:58:42.582605115Z destroyed:false] 103 | // 3:map[created_time:2019-09-15T01:10:28.275769286Z deletion_time: destroyed:false] // or 104 | // ]] [] 105 | // } 106 | func getKVV2Meta(secret *api.Secret) (meta KVV2Meta, err error) { 107 | const op = apperr.Op("syncer.getSecretMeta") 108 | 109 | defer func() { 110 | var ok bool 111 | if r := recover(); r != nil { 112 | log.Debug().Interface("secret", secret).Msg("panic while getting meta") 113 | err, ok = r.(error) 114 | if !ok { 115 | err = apperr.New(fmt.Sprintf("panic while gathering meta info (%v)", r), ErrInvalidMeta, op) 116 | } 117 | err = apperr.New(fmt.Sprintf("panic while gathering meta info (%v)", r), err, op, ErrInvalidMeta) 118 | } 119 | }() 120 | 121 | if secret == nil { 122 | return meta, apperr.New(fmt.Sprintf("no secret to gather meta"), ErrInvalidMeta, op) 123 | } 124 | 125 | v, err := secret.Data["current_version"].(json.Number).Int64() 126 | if err != nil { 127 | return meta, apperr.New(fmt.Sprintf("cannot type cast %q %q to %q", secret.Data["current_version"], "json number", "int64"), err, op, ErrInvalidMeta) 128 | } 129 | 130 | if secret.Data["versions"] == nil { 131 | return meta, apperr.New(fmt.Sprintf("cannot get version details if path is deleted"), ErrInvalidMeta, op) 132 | } 133 | versions, ok := secret.Data["versions"].(map[string]interface{}) 134 | if !ok { 135 | return meta, apperr.New(fmt.Sprintf("cannot type cast %q %q to %q", secret.Data["versions"], "secret data", "map[string]interface{}"), ErrInvalidMeta, op) 136 | } 137 | 138 | vs := fmt.Sprintf("%d", v) 139 | if versions[vs] == nil { 140 | return meta, apperr.New(fmt.Sprintf("cannot get version details for current version %q", v), ErrInvalidMeta, op) 141 | } 142 | current := versions[vs] 143 | c, ok := current.(map[string]interface{}) 144 | if !ok { 145 | return meta, apperr.New(fmt.Sprintf("cannot type cast %q to %q", "current", "map[string]interface{}"), ErrInvalidMeta, op) 146 | } 147 | 148 | meta.CurrentVersion = v 149 | meta.UpdatedTime = fmt.Sprintf("%s", secret.Data["updated_time"]) 150 | meta.CurrentDeletionTime = fmt.Sprintf("%s", c["deletion_time"]) 151 | meta.Destroyed, ok = c["destroyed"].(bool) 152 | if !ok { 153 | meta.Destroyed = false 154 | } 155 | 156 | return meta, nil 157 | } 158 | -------------------------------------------------------------------------------- /syncer/syncer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package syncer 16 | 17 | import ( 18 | "encoding/json" 19 | "fmt" 20 | "reflect" 21 | "time" 22 | 23 | "github.com/ExpediaGroup/vsync/apperr" 24 | "github.com/ExpediaGroup/vsync/consul" 25 | "github.com/hashicorp/consul/api" 26 | "github.com/rs/zerolog/log" 27 | ) 28 | 29 | var ( 30 | ErrInvalidPath = fmt.Errorf("invalid vault kv path") 31 | ErrTransform = fmt.Errorf("transform error") 32 | ErrInvalidMeta = fmt.Errorf("invalid vault meta") 33 | ErrInvalidInfo = fmt.Errorf("invalid sync info") 34 | ErrInvalidBucket = fmt.Errorf("invalid sync info bucket") 35 | ErrInvalidIndex = fmt.Errorf("invalid sync info index") 36 | ErrInvalidInsight = fmt.Errorf("invalid insight") 37 | ErrUnknownOp = fmt.Errorf("unknown operation") 38 | ErrCorrupted = fmt.Errorf("I got ¡™£¢∞NeuRALyzED§¶•ªº! Sync info in corrupted state") 39 | ErrInitialize = fmt.Errorf("Nope, not gonna work! Sync info not initialized") 40 | ) 41 | 42 | var IgnoreDeletes = false 43 | 44 | type Task struct { 45 | Path string 46 | Op string 47 | Insight Insight 48 | } 49 | 50 | func (origin *Info) Compare(destination *Info) ([]Task, []Task, []Task, []error) { 51 | const op = apperr.Op("syncer.Compare") 52 | 53 | add := []Task{} 54 | update := []Task{} 55 | delete := []Task{} 56 | errs := []error{} 57 | 58 | // get indexes 59 | origindex, err := origin.GetIndex() 60 | if err != nil { 61 | errs = append(errs, apperr.New(fmt.Sprintf("cannot find origin index"), err, op, ErrInvalidIndex)) 62 | return add, update, delete, errs 63 | } 64 | 65 | destinationIndex, err := destination.GetIndex() 66 | if err != nil { 67 | errs = append(errs, apperr.New(fmt.Sprintf("cannot find destination index"), err, op, ErrInvalidIndex)) 68 | return add, update, delete, errs 69 | } 70 | 71 | if len(origindex) != len(destinationIndex) { 72 | errs = append(errs, apperr.New(fmt.Sprintf("non comparable indexes origin & destination %q != %q", len(origindex), len(destinationIndex)), ErrInitialize, op, apperr.Fatal)) 73 | return add, update, delete, errs 74 | } 75 | 76 | // compare indexes 77 | for i, hash := range origindex { 78 | if hash != destinationIndex[i] { 79 | log.Debug().Int("bucketId", i).Str("originHash", hash).Str("destinationHash", destinationIndex[i]).Msg("bucket's index different") 80 | 81 | originBucket, err := origin.GetBucket(i) 82 | if err != nil { 83 | errs = append(errs, apperr.New(fmt.Sprintf("cannot find origin bucket %q", i), err, op, ErrInvalidBucket)) 84 | } 85 | 86 | destinationBucket, err := destination.GetBucket(i) 87 | if err != nil { 88 | errs = append(errs, apperr.New(fmt.Sprintf("cannot find destination bucket %q", i), err, op, ErrInvalidBucket)) 89 | } 90 | 91 | newAdd, newUpdate, newDelete, newErrs := CompareBuckets(originBucket, destinationBucket) 92 | add = append(add, newAdd...) 93 | update = append(update, newUpdate...) 94 | delete = append(delete, newDelete...) 95 | errs = append(errs, newErrs...) 96 | } else { 97 | log.Debug().Int("bucketId", i).Str("hash", hash).Msg("bucket's index matched") 98 | } 99 | } 100 | 101 | return add, update, delete, errs 102 | } 103 | 104 | func CompareBuckets(origin Bucket, destination Bucket) ([]Task, []Task, []Task, []error) { 105 | const op = apperr.Op("syncer.CompareBuckets") 106 | 107 | add := []Task{} 108 | update := []Task{} 109 | delete := []Task{} 110 | errs := []error{} 111 | processed := map[string]bool{} 112 | 113 | for key, origInsight := range origin { 114 | 115 | destinationInsight, ok := destination[key] 116 | if !ok { 117 | // new key 118 | add = append(add, Task{ 119 | Path: key, 120 | Op: "add", 121 | Insight: origInsight, 122 | }) 123 | continue 124 | } 125 | 126 | processed[key] = true 127 | 128 | if origInsight.Type != destinationInsight.Type { 129 | // type itself got changed 130 | update = append(update, Task{ 131 | Path: key, 132 | Op: "update", 133 | Insight: origInsight, 134 | }) 135 | continue 136 | } 137 | 138 | if origInsight.Version > destinationInsight.Version { 139 | // origin key updated 140 | update = append(update, Task{ 141 | Path: key, 142 | Op: "update", 143 | Insight: origInsight, 144 | }) 145 | continue 146 | } else { 147 | // optimize by not equal as whole 148 | if !reflect.DeepEqual(origInsight, destinationInsight) { 149 | 150 | originUpdateTime, err := time.Parse(time.RFC3339Nano, origInsight.UpdateTime) 151 | if err != nil { 152 | log.Debug().Str("key", key).Int64("originVersion", origInsight.Version).Int64("destinationVersion", destinationInsight.Version).Str("updatedTime", origInsight.UpdateTime).Err(err).Msg("cannot parse origin string to time") 153 | errs = append(errs, apperr.New(fmt.Sprint("cannot parse origin updated time, string to time of path for comparison", key), err, op, ErrInvalidInsight)) 154 | } 155 | destinationUpdateTime, err := time.Parse(time.RFC3339Nano, destinationInsight.UpdateTime) 156 | if err != nil { 157 | log.Debug().Str("key", key).Int64("originVersion", origInsight.Version).Int64("destinationVersion", destinationInsight.Version).Str("updatedTime", destinationInsight.UpdateTime).Err(err).Msg("cannot parse destination string to time") 158 | errs = append(errs, apperr.New(fmt.Sprint("cannot parse destination updated time, string to time of path for comparison", key), err, op, ErrInvalidInsight)) 159 | } 160 | 161 | // origin got updated and its time is greater 162 | if originUpdateTime.After(destinationUpdateTime) { 163 | // origin metadata was cleared and key cleverly updated to match versions 164 | update = append(update, Task{ 165 | Path: key, 166 | Op: "update", 167 | Insight: origInsight, 168 | }) 169 | continue 170 | } 171 | } 172 | } 173 | } 174 | 175 | if len(destination) != len(processed) { 176 | // some destination key are stale 177 | 178 | for key := range destination { 179 | _, ok := origin[key] 180 | if !ok { 181 | // delete key 182 | delete = append(delete, Task{ 183 | Path: key, 184 | Op: "delete", 185 | Insight: Insight{}, 186 | }) 187 | continue 188 | } 189 | } 190 | } 191 | 192 | return add, update, delete, errs 193 | } 194 | 195 | func InfoToConsul(c *consul.Client, i *Info, syncPath string) error { 196 | const op = apperr.Op("syncer.InfoToConsul") 197 | 198 | index, err := i.GetIndex() 199 | if err != nil { 200 | log.Debug().Err(err).Msg("cannot find index") 201 | return apperr.New(fmt.Sprintf("cannot find index for saving"), err, op, ErrInvalidIndex) 202 | } 203 | 204 | // buckets 205 | // all buckets need to be saved first before index because index will trigger a cycle in destination 206 | for id := range index { 207 | syncBucket := fmt.Sprintf("%s%d", syncPath, id) 208 | bucket, err := i.GetBucket(id) 209 | if err != nil { 210 | log.Debug().Err(err).Int("bucketId", id).Msg("cannot get bucket") 211 | return apperr.New(fmt.Sprintf("cannot find bucket %q for saving", id), err, op, ErrInvalidBucket) 212 | } 213 | value, err := json.Marshal(bucket) 214 | if err != nil { 215 | log.Debug().Err(err).Int("bucketId", id).Msg("cannot marshal bucket") 216 | return apperr.New(fmt.Sprintf("cannot marshal bucket %q for saving", id), err, op, ErrInvalidBucket) 217 | } 218 | 219 | res, err := c.KV().Put(&api.KVPair{ 220 | Key: syncBucket, 221 | Value: value, 222 | }, nil) 223 | if err != nil { 224 | log.Debug().Err(err).Str("path", syncBucket).Msg("cannot save bucket to consul") 225 | return apperr.New(fmt.Sprintf("cannot save bucket %q for saving in consul kv path %q", id, syncBucket), err, op, ErrInvalidBucket) 226 | } 227 | log.Debug().Str("timeTaken", fmt.Sprint(res.RequestTime)).Str("path", syncBucket).Msg("saved bucket in consul") 228 | } 229 | 230 | // index 231 | value, err := json.Marshal(index) 232 | if err != nil { 233 | log.Debug().Err(err).Msg("cannot marshal index") 234 | return apperr.New(fmt.Sprintf("cannot marshal index for saving"), err, op, ErrInvalidIndex) 235 | } 236 | 237 | syncIndex := syncPath + "index" 238 | res, err := c.KV().Put(&api.KVPair{ 239 | Key: syncIndex, 240 | Value: value, 241 | }, nil) 242 | if err != nil { 243 | log.Debug().Err(err).Str("path", syncIndex).Msg("cannot save index to consul") 244 | return apperr.New(fmt.Sprintf("cannot save index for saving to consul kv path %q", syncIndex), err, op, ErrInvalidIndex) 245 | } 246 | log.Debug().Str("timeTaken", fmt.Sprint(res.RequestTime)).Str("path", syncIndex).Msg("saved index in consul") 247 | 248 | return nil 249 | } 250 | 251 | func InfoFromConsul(c *consul.Client, i *Info, syncPath string) (err error) { 252 | const op = apperr.Op("syncer.InfoFromConsul") 253 | 254 | defer func() { 255 | if r := recover(); r != nil { 256 | log.Debug().Msg("panic while getting sync info") 257 | var ok bool 258 | err, ok = r.(error) 259 | if !ok { 260 | err = apperr.New(fmt.Sprintf("panic while getting sync info (%v)", r), ErrInvalidInfo, op) 261 | } 262 | err = apperr.New(fmt.Sprintf("panic while getting sync info (%v)", r), err, op, ErrInvalidInfo) 263 | } 264 | }() 265 | 266 | // index 267 | syncIndex := syncPath + "index" 268 | res, _, err := c.KV().Get(syncIndex, nil) 269 | if err != nil { 270 | log.Debug().Err(err).Str("path", syncIndex).Msg("failure on retrieving index from consul") 271 | return apperr.New(fmt.Sprintf("cannot get index from consul kv path %q", syncIndex), err, op, ErrInvalidInfo) 272 | } 273 | if res == nil { 274 | log.Debug().Str("path", syncIndex).Msg("no response for retrieving index from consul") 275 | return apperr.New(fmt.Sprintf("cannot get index from consul kv path %q", syncIndex), ErrInvalidInfo, op) 276 | } 277 | 278 | err = json.Unmarshal(res.Value, &i.index) 279 | if err != nil { 280 | log.Debug().Err(err).Str("path", syncIndex).Msg("cannot unmarshall index from consul") 281 | return apperr.New(fmt.Sprintf("cannot unmarshal index from consul kv path %q", syncIndex), err, op, ErrInvalidIndex) 282 | } 283 | 284 | // buckets 285 | for id := range i.index { 286 | syncBucket := fmt.Sprintf("%s%d", syncPath, id) 287 | res, _, err := c.KV().Get(syncBucket, nil) 288 | if err != nil { 289 | log.Debug().Err(err).Int("bucketId", id).Str("path", syncBucket).Msg("failure on retrieving bucket from consul") 290 | return apperr.New(fmt.Sprintf("cannot get bucket %q from consul kv path %q", id, syncBucket), err, op, ErrInvalidInfo) 291 | } 292 | if res == nil { 293 | log.Debug().Int("bucketId", id).Str("path", syncBucket).Msg("no response for retrieving bucket from consul") 294 | return apperr.New(fmt.Sprintf("cannot get bucket %q from consul kv path %q", id, syncBucket), ErrInvalidInfo, op) 295 | } 296 | 297 | bucket := Bucket{} 298 | err = json.Unmarshal(res.Value, &bucket) 299 | if err != nil { 300 | log.Debug().Err(err).Int("bucketId", id).Str("path", syncBucket).Msg("cannot unmarshall bucket from consul") 301 | return apperr.New(fmt.Sprintf("cannot unmarshal bucket %q from consul kv path %q", id, syncBucket), err, op, ErrInvalidInfo) 302 | } 303 | i.buckets[id] = bucket 304 | } 305 | 306 | return nil 307 | } 308 | -------------------------------------------------------------------------------- /transformer/named_regex_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package transformer 16 | 17 | import ( 18 | "testing" 19 | 20 | "github.com/stretchr/testify/assert" 21 | ) 22 | 23 | func TestNamedRegexTransformer(t *testing.T) { 24 | r, err := NewNamedRegexpTransformer("test1", "(?Psecret)/(?P((meta)?data))?/(?Prunner)/(?P(dev|test|stage|prod))?/?(?P\\w+)?/?", "platform/meta/env/app/secrets") 25 | assert.NoError(t, err) 26 | 27 | type testCase struct { 28 | input string 29 | eOk bool 30 | expected string 31 | } 32 | cases := []testCase{ 33 | testCase{ 34 | "secret/metadata/runner/stage/myapp", 35 | true, 36 | "runner/metadata/stage/myapp/secrets", 37 | }, 38 | testCase{ 39 | "/secret/metadata/runner/stage/myapp/", 40 | true, 41 | "runner/metadata/stage/myapp/secrets", 42 | }, 43 | testCase{ 44 | "/secret/metadata/runner/stage/", 45 | false, 46 | "", 47 | }, 48 | } 49 | 50 | for _, c := range cases { 51 | s, ok := r.Transform(c.input) 52 | assert.Equal(t, c.eOk, ok) 53 | assert.Equal(t, c.expected, s) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /transformer/named_regexp.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package transformer 16 | 17 | import ( 18 | "errors" 19 | "fmt" 20 | "regexp" 21 | "strings" 22 | 23 | "github.com/rs/zerolog/log" 24 | "github.com/ExpediaGroup/vsync/apperr" 25 | ) 26 | 27 | var ErrRegexParse = errors.New("regex parse error") 28 | 29 | // NamedRegexp is a regexp type but has extra functions to find the named matching substrings 30 | type NamedRegexp struct { 31 | Plain string 32 | *regexp.Regexp 33 | } 34 | 35 | // FindStringSubmatchMap returns the map{name: foundString} from regexp 36 | func (r *NamedRegexp) FindStringSubmatchMap(s string) map[string]string { 37 | matchMap := map[string]string{} 38 | 39 | matches := r.FindStringSubmatch(s) 40 | matchNames := r.SubexpNames() 41 | 42 | if len(matches) == 0 { 43 | return matchMap 44 | } 45 | 46 | for i, name := range matchNames { 47 | if name == "" { 48 | continue 49 | } 50 | matchMap[name] = matches[i] 51 | } 52 | 53 | return matchMap 54 | } 55 | 56 | type NamedRegexpTransformer struct { 57 | Name string 58 | From NamedRegexp 59 | To string 60 | } 61 | 62 | func NewNamedRegexpTransformer(name string, from string, to string) (NamedRegexpTransformer, error) { 63 | const op = apperr.Op("transformer.NewNamedRegexpTransfomer") 64 | 65 | t := NamedRegexpTransformer{} 66 | 67 | r, err := regexp.Compile(from) 68 | if err != nil { 69 | log.Debug().Err(err).Str("from", from).Msg("cannot parse the regular expression") 70 | return t, apperr.New(fmt.Sprintf("From regular expression %q", from), err, op, ErrRegexParse) 71 | } 72 | 73 | t.Name = name 74 | t.From = NamedRegexp{ 75 | Plain: from, 76 | Regexp: r, 77 | } 78 | t.To = to 79 | 80 | return t, nil 81 | } 82 | 83 | func (t NamedRegexpTransformer) Transform(path string) (string, bool) { 84 | matchMap := t.From.FindStringSubmatchMap(path) 85 | 86 | tStrs := []string{} 87 | toNames := strings.Split(t.To, "/") 88 | 89 | if len(matchMap) == 0 { 90 | return "", false 91 | } 92 | 93 | // matchMap result could be map[app:secrets env:test mount:rockcut team:] 94 | // if any there are any unmatched key like team in above example then we return false 95 | for _, v := range matchMap { 96 | if v == "" { 97 | return "", false 98 | } 99 | } 100 | 101 | // example of transformer To string is mount/env/app/team 102 | // but it can have non group names like mount/env/app/team/v1 where v1 is not present in regexp itself as a group name 103 | // then we append v1 as a string instead of taking the actual value from regexp matchMap 104 | for _, toName := range toNames { 105 | v, ok := matchMap[toName] 106 | if ok { 107 | tStrs = append(tStrs, v) 108 | } else { 109 | tStrs = append(tStrs, toName) 110 | } 111 | } 112 | 113 | tStr := strings.Join(tStrs, "/") 114 | tStr = regexp.MustCompile("/+").ReplaceAllString(tStr, "/") 115 | 116 | log.Debug(). 117 | Str("name", t.Name). 118 | Str("before", path). 119 | Str("after", tStr). 120 | Interface("matchMap", matchMap). 121 | Strs("toNames", toNames). 122 | Msg("transformed") 123 | return tStr, true 124 | } 125 | -------------------------------------------------------------------------------- /transformer/nil.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package transformer 16 | 17 | type NilTransformer struct { 18 | } 19 | 20 | func NewNilTransformer() NilTransformer { 21 | t := NilTransformer{} 22 | 23 | return t 24 | } 25 | 26 | func (t NilTransformer) Transform(path string) (string, bool) { 27 | return path, true 28 | } 29 | -------------------------------------------------------------------------------- /transformer/transformer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package transformer 16 | 17 | import ( 18 | "errors" 19 | ) 20 | 21 | var ErrInitialize = errors.New("non initializable") 22 | 23 | type Transformer interface { 24 | Transform(path string) (string, bool) 25 | } 26 | 27 | type Pack []Transformer 28 | 29 | func (p Pack) Transform(path string) (string, bool) { 30 | for _, transformer := range p { 31 | if v, ok := transformer.Transform(path); ok { 32 | return v, true 33 | } 34 | } 35 | 36 | return "", false 37 | } 38 | 39 | func DefaultPack() (Pack, error) { 40 | p := Pack{} 41 | p = append(p, NewNilTransformer()) 42 | return p, nil 43 | } 44 | -------------------------------------------------------------------------------- /transformer/transformer_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package transformer 16 | 17 | import ( 18 | "testing" 19 | 20 | "github.com/stretchr/testify/assert" 21 | ) 22 | 23 | func TestGetDefaultPack(t *testing.T) { 24 | p, err := DefaultPack() 25 | assert.NoError(t, err) 26 | assert.Equal(t, 1, len(p)) 27 | } 28 | -------------------------------------------------------------------------------- /vault/check.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package vault 16 | 17 | import ( 18 | "fmt" 19 | "strings" 20 | 21 | "github.com/ExpediaGroup/vsync/apperr" 22 | "github.com/hashicorp/vault/api" 23 | "github.com/rs/zerolog/log" 24 | ) 25 | 26 | const ( 27 | CheckCreate = 1 << iota 28 | CheckDelete 29 | CheckList 30 | CheckRead 31 | CheckUpdate 32 | 33 | CheckAll = CheckCreate | CheckDelete | CheckList | CheckRead | CheckUpdate 34 | CheckOrigin = CheckRead | CheckList 35 | CheckDestination = CheckCreate | CheckDelete | CheckList | CheckRead | CheckUpdate 36 | CheckDestinationWithoutDelete = CheckCreate | CheckList | CheckRead | CheckUpdate 37 | ) 38 | 39 | func (v *Client) MountChecks(mPath string, checks int, name string) error { 40 | const op = apperr.Op("vault.MountChecks") 41 | if checks == 0 { 42 | checks = CheckAll 43 | } 44 | 45 | m, err := v.GetMount(mPath) 46 | if err != nil { 47 | return apperr.New(fmt.Sprintf("could not get mount in path %q, also check vault token permission for read+list on sys/mounts", mPath), err, op, apperr.Fatal, ErrInitialize) 48 | } 49 | 50 | // Currently we support only kv v2 for replication 51 | if (m.Type == "kv" || m.Type == "generic") && m.Options["version"] == "2" { 52 | log.Debug().Interface("mount", m).Str("path", mPath).Msg("mount is of type kv v2") 53 | } else { 54 | log.Debug().Interface("mount", m).Str("path", mPath).Msg("mount is not of type kv v2") 55 | return apperr.New(fmt.Sprintf("mount %q not a kv_v2", mPath), err, op, apperr.Fatal, ErrInitialize) 56 | } 57 | 58 | // data path token permission checks 59 | p := fmt.Sprintf("%sdata/", mPath) 60 | err = v.CheckTokenPermissions(p, checks) 61 | if err != nil { 62 | return apperr.New(fmt.Sprintf("vault token missing permissions on data path %q", p), err, op, apperr.Fatal, ErrInvalidToken) 63 | } 64 | log.Info().Str("path", p).Str("checks", fmt.Sprintf("%b", checks)).Msg("vault token has required capabilities on path") 65 | 66 | // meta data path token permission checks 67 | metaPath := strings.Replace(p, "/data/", "/metadata/", 1) 68 | err = v.CheckTokenPermissions(metaPath, checks) 69 | if err != nil { 70 | return apperr.New(fmt.Sprintf("vault token missing permissions on meta path %q", metaPath), err, op, apperr.Fatal, ErrInvalidToken) 71 | } 72 | log.Info().Str("path", metaPath).Str("checks", fmt.Sprintf("%b", checks)).Msg("vault token has required capabilities on path") 73 | 74 | // parentPath := metaPath[:strings.LastIndex(metaPath, "/")] 75 | return nil 76 | } 77 | 78 | func (v *Client) GetMount(m string) (*api.MountOutput, error) { 79 | const op = apperr.Op("vault.GetMount") 80 | 81 | mounts, err := v.Sys().ListMounts() 82 | if err != nil { 83 | log.Debug().Str("mountPath", m).Msg("cannot get mounts from vault") 84 | return nil, apperr.New(fmt.Sprintf("cannot get mounts from vault"), err, op, ErrConnection) 85 | } 86 | 87 | mount, ok := mounts[m] 88 | if !ok { 89 | log.Debug().Str("mountPath", m).Msg("mount not present in vault") 90 | return nil, apperr.New(fmt.Sprintf("mount %q not present in vault", m), ErrInitialize, op) 91 | } 92 | 93 | return mount, nil 94 | } 95 | 96 | func (v *Client) CheckTokenPermissions(p string, checks int) error { 97 | const op = apperr.Op("vault.CheckTokenPermissions") 98 | var err error 99 | 100 | // vault token self capabilities on a specific path 101 | path := p 102 | caps, err := v.Sys().CapabilitiesSelf(path) 103 | if err != nil { 104 | log.Debug().Err(err).Str("path", path).Msg("unable to get token capabilities on path") 105 | return apperr.New(fmt.Sprintf("unable to get token capabilities on path %q", path), err, op, ErrInvalidToken) 106 | } 107 | log.Debug().Str("path", path).Int("checks", checks).Strs("capabilities", caps).Msg("token capabilities on path") 108 | 109 | if checks&CheckCreate == CheckCreate { 110 | if isStringPresent(caps, "create") == false { 111 | log.Debug().Err(err).Str("path", path).Msg("token does not have create permission on path") 112 | return apperr.New(fmt.Sprintf("token does not have create permission on path %q", path), err, op, ErrInvalidToken) 113 | } 114 | } 115 | if checks&CheckDelete == CheckDelete { 116 | if isStringPresent(caps, "delete") == false { 117 | log.Debug().Err(err).Str("path", path).Msg("token does not have delete permission on path") 118 | return apperr.New(fmt.Sprintf("token does not have delete permission on path %q", path), err, op, ErrInvalidToken) 119 | } 120 | } 121 | if checks&CheckList == CheckList { 122 | if isStringPresent(caps, "list") == false { 123 | log.Debug().Err(err).Str("path", path).Msg("token does not have list permission on path") 124 | return apperr.New(fmt.Sprintf("token does not have list permission on path %q", path), err, op, ErrInvalidToken) 125 | } 126 | } 127 | if checks&CheckRead == CheckRead { 128 | if isStringPresent(caps, "read") == false { 129 | log.Debug().Err(err).Str("path", path).Msg("token does not have read permission on path") 130 | return apperr.New(fmt.Sprintf("token does not have read permission on path %q", path), err, op, ErrInvalidToken) 131 | } 132 | } 133 | if checks&CheckUpdate == CheckUpdate { 134 | if isStringPresent(caps, "update") == false { 135 | log.Debug().Err(err).Str("path", path).Msg("token does not have update permission on path") 136 | return apperr.New(fmt.Sprintf("token does not have update permission on path %q", path), err, op, ErrInvalidToken) 137 | } 138 | } 139 | 140 | return nil 141 | } 142 | 143 | func isStringPresent(slice []string, s string) bool { 144 | for _, v := range slice { 145 | if v == s { 146 | return true 147 | } 148 | } 149 | return false 150 | } 151 | -------------------------------------------------------------------------------- /vault/vault.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 Expedia, Inc. 2 | 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package vault 16 | 17 | import ( 18 | "context" 19 | "encoding/json" 20 | "fmt" 21 | "time" 22 | 23 | "github.com/hashicorp/vault/api" 24 | 25 | "github.com/ExpediaGroup/vsync/apperr" 26 | "github.com/rs/zerolog/log" 27 | ) 28 | 29 | var ( 30 | ErrInitialize = fmt.Errorf("cannot initialize vault client") 31 | ErrInvalidToken = fmt.Errorf("check token permission") 32 | ErrConnection = fmt.Errorf("vault connection refused") 33 | ErrInvalidPath = fmt.Errorf("invalid path") 34 | ErrCastPathData = fmt.Errorf("type cast errors on data from path") 35 | ErrInvalidSecretId = fmt.Errorf("invalid secret id") 36 | ErrInvalidRoleId = fmt.Errorf("invalid role id") 37 | ) 38 | 39 | type Client struct { 40 | *api.Client 41 | Address string 42 | Mode string 43 | } 44 | 45 | func NewClient(address string, token string, approlePath string, roleID string, secretID string) (*Client, error) { 46 | const op = apperr.Op("vault.NewClient") 47 | 48 | config := api.DefaultConfig() 49 | if address != "" { 50 | config.Address = address 51 | } 52 | 53 | client, err := api.NewClient(config) 54 | if err != nil { 55 | log.Debug().Err(err).Msg("cannot create vault client") 56 | return nil, apperr.New(fmt.Sprintf("cannot create vault client, address %q", address), err, op, apperr.Fatal, ErrInitialize) 57 | } 58 | 59 | if roleID != "" && secretID != "" && token != "" { 60 | log.Debug().Err(err).Msg("cannot use secret and approle together") 61 | return nil, apperr.New(fmt.Sprintf("cannot use secret and approle together %q", address), err, op, apperr.Fatal, ErrInitialize) 62 | } 63 | 64 | // Get approle token 65 | if roleID != "" && secretID != "" { 66 | resp, err := client.Logical().Write(fmt.Sprintf("auth/%s/login", approlePath), map[string]interface{}{ 67 | "role_id": roleID, 68 | "secret_id": secretID, 69 | }) 70 | if err != nil { 71 | return nil, apperr.New(fmt.Sprintf("cannot get correct a token from approle role_id and secret_id"), err, op, apperr.Fatal, ErrInitialize) 72 | } 73 | token = resp.Auth.ClientToken 74 | } 75 | 76 | client.SetToken(token) 77 | 78 | return &Client{ 79 | Client: client, 80 | Address: address, 81 | }, nil 82 | } 83 | 84 | // DeepListPaths returns set of paths and folders 85 | // path is a single path which has key value pairs 86 | // folder is a parent set of individual paths, it can have more folders and paths 87 | func (v *Client) DeepListPaths(path string) ([]string, []string, error) { 88 | const op = apperr.Op("vault.DeepListPaths") 89 | 90 | p := []string{} 91 | f := []string{} 92 | 93 | res, err := v.Logical().List(path) 94 | if err != nil { 95 | log.Debug().Err(err).Str("path", path).Msg("cannot list secrets present in data path") 96 | return p, f, apperr.New(fmt.Sprintf("cannot list secrets in data path %q", path), err, op, apperr.Warn, ErrInvalidPath) 97 | } 98 | 99 | if res == nil || res.Data["keys"] == nil { 100 | log.Debug().Str("path", path).Msg("no keys found list response from data path") 101 | return p, f, nil 102 | } 103 | 104 | data, ok := res.Data["keys"].([]interface{}) 105 | if !ok { 106 | return p, f, apperr.New(fmt.Sprintf("cannot type case from %q to %q in data path %q", "data keys", "[]interface{}", path), err, op, apperr.Warn, ErrCastPathData) 107 | } 108 | for _, v := range data { 109 | str := fmt.Sprint(v) 110 | if str[len(str)-1:] == "/" { 111 | f = append(f, str) 112 | } else { 113 | p = append(p, str) 114 | } 115 | } 116 | 117 | return p, f, nil 118 | } 119 | 120 | // GetAllSecretPaths recursively lists all absolute paths given a root vault kv v2 path 121 | // Note: do not convert this into go routines as we dont know how to kill the goroutine 122 | func (v *Client) GetAllPaths(metaPaths []string) ([]string, []error) { 123 | var paths []string 124 | var errs []error 125 | 126 | for _, metaPath := range metaPaths { 127 | p, e := v.getAllPaths(metaPath, []string{}, []error{}) 128 | paths = append(paths, p...) 129 | errs = append(errs, e...) 130 | } 131 | 132 | return paths, errs 133 | } 134 | 135 | // getAllSecretPaths is the actual recursive function 136 | func (v *Client) getAllPaths(metaPath string, paths []string, errs []error) ([]string, []error) { 137 | const op = apperr.Op("vault.getAllPaths") 138 | childFragments, childFolders, childErr := v.DeepListPaths(metaPath) 139 | if childErr != nil { 140 | e := apperr.New(fmt.Sprintf("cannot list secrets in data path %q", metaPath), childErr, op, apperr.Warn, ErrInvalidPath) 141 | errs = append(errs, e) 142 | return paths, errs 143 | } 144 | 145 | for _, folder := range childFolders { 146 | folder = folder[:len(folder)-1] 147 | subPaths, subErrs := v.getAllPaths(metaPath+"/"+folder, []string{}, []error{}) 148 | paths = append(paths, subPaths...) 149 | errs = append(errs, subErrs...) 150 | } 151 | 152 | for _, fragment := range childFragments { 153 | paths = append(paths, metaPath+"/"+fragment) 154 | } 155 | 156 | return paths, errs 157 | } 158 | 159 | // renews origin token 160 | func (v *Client) TokenRenewer(ctx context.Context, errCh chan error) { 161 | const op = apperr.Op("vault.TokenRenewer") 162 | lookup, err := v.Auth().Token().LookupSelf() 163 | if err != nil { 164 | log.Debug().Str("mode", v.Mode).Err(err).Msg("cannot get info for self token") 165 | errCh <- apperr.New(fmt.Sprintf("cannot get info for self token for %s", v.Mode), err, op, apperr.Fatal, ErrInitialize) 166 | } 167 | 168 | i, ok := lookup.Data["creation_ttl"] 169 | if !ok { 170 | log.Debug().Str("mode", v.Mode).Msg("error while getting creation ttl") 171 | errCh <- apperr.New(fmt.Sprintf("error while getting creation ttl for %s", v.Mode), err, op, apperr.Fatal, ErrInitialize) 172 | } 173 | 174 | ttl, err := i.(json.Number).Int64() 175 | if err != nil { 176 | log.Debug().Str("mode", v.Mode).Err(err).Msg("cannot get convert creation ttl to int") 177 | errCh <- apperr.New(fmt.Sprintf("cannot get convert creation ttl to int for %s", v.Mode), err, op, apperr.Fatal, ErrInitialize) 178 | } 179 | 180 | tick := time.Duration(float64(ttl)*0.85) * time.Second 181 | if tick == 0 { 182 | log.Warn().Str("mode", v.Mode).Err(err).Msg("ttl is 0 for origin token") 183 | errCh <- apperr.New(fmt.Sprintf("ttl is 0 for origin token for %s", v.Mode), err, op, apperr.Warn, ErrInitialize) 184 | return 185 | } 186 | if tick < 0 { 187 | log.Debug().Str("mode", v.Mode).Err(err).Msg("cannot be negative ttl value for origin token") 188 | errCh <- apperr.New(fmt.Sprintf("cannot be negative ttl value for origin token for %s", v.Mode), err, op, apperr.Fatal, ErrInitialize) 189 | return 190 | } 191 | 192 | ticker := time.NewTicker(tick) 193 | 194 | // refresh the token so that our tick calculation will be always valid 195 | _, err = v.Auth().Token().RenewSelf(int(ttl)) 196 | if err != nil { 197 | log.Debug().Str("mode", v.Mode).Err(err).Msg("cannot renew self token for the first time") 198 | errCh <- apperr.New(fmt.Sprintf("cannot renew self token for the first time for %s", v.Mode), err, op, apperr.Fatal, ErrInvalidToken) 199 | return 200 | } 201 | 202 | for { 203 | select { 204 | case <-ctx.Done(): 205 | ticker.Stop() 206 | time.Sleep(100 * time.Microsecond) 207 | log.Debug().Str("mode", v.Mode).Str("trigger", "context done").Msg("closed token renewer") 208 | return 209 | case <-ticker.C: 210 | resp, err := v.Auth().Token().RenewSelf(int(ttl)) 211 | if err != nil { 212 | log.Debug().Str("mode", v.Mode).Err(err).Msg("cannot renew self token") 213 | errCh <- apperr.New(fmt.Sprintf("cannot renew self token for %s", v.Mode), err, op, apperr.Fatal, ErrInvalidToken) 214 | return 215 | } 216 | 217 | newToken, err := resp.TokenID() 218 | if err != nil { 219 | log.Debug().Str("mode", v.Mode).Err(err).Msg("cannot get new token") 220 | errCh <- apperr.New(fmt.Sprintf("cannot get new token for %s", v.Mode), err, op, apperr.Fatal, ErrInvalidToken) 221 | return 222 | } 223 | 224 | v.SetToken(newToken) 225 | log.Info().Str("mode", v.Mode).Msg("vault token renewed") 226 | } 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /website/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | /node_modules 3 | 4 | # Production 5 | /build 6 | 7 | # Generated files 8 | .docusaurus 9 | .cache-loader 10 | 11 | # Misc 12 | .DS_Store 13 | .env.local 14 | .env.development.local 15 | .env.test.local 16 | .env.production.local 17 | 18 | npm-debug.log* 19 | yarn-debug.log* 20 | yarn-error.log* 21 | -------------------------------------------------------------------------------- /website/docs/contribution/build.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: build 3 | title: Build 4 | sidebar_label: Build 5 | --- 6 | 7 | ## Local 8 | 9 | ### Setup 10 | 11 | Run the script in `scripts/local_bootstrap.sh` which should create a miniature test envrionment using docker. 12 | 13 | It creates 14 | * 2 consuls connect via wan 15 | * http://localhost:6500 16 | * http://localhost:7500 17 | * 2 vaults backed by respective consul 18 | * http://localhost:6200 19 | * http://localhost:7200 20 | * unseals each vault and prints the root token which can be used in vsync config and vault ui 21 | 22 | You can find example working configs in `configs` folder 23 | 24 | More docs about vsync [deployment options](../deploy/options.md) 25 | 26 | To create more secrets for stress test purposes, change the `seq N` in populate data section of script. populate data will use parallel to use all your cpus to create secrets faster. I have tested `N` with 10000. 27 | 28 | ### Run 29 | 30 | ORIGIN: 31 | 32 | ```sh 33 | go run main.go origin --config ./configs/origin.json 34 | ``` 35 | 36 | DESTINATION: 37 | 38 | ```sh 39 | go run main.go destination --config ./configs/dest.v2.json 40 | ``` 41 | 42 | > loop will have destination vault same as origin vault, useful for transforming the secret paths 43 | 44 | ```sh 45 | go run main.go destination --config ./configs/dest_loop.v2.json 46 | ``` 47 | 48 | --- 49 | 50 | ### Versioning 51 | 52 | We use git tags extensively and follow basic semantic versioning strictly with `v` prefix 53 | 54 | `basic` semantic version -> `v{{Major}}.{{Minor}}.{{Patch}}` 55 | 56 | `Snapshot` -> development artifact, eg. `v0.0.0-1-g1feac53`, denotes from how many commits from recent tag on same branch and current commit hash 57 | 58 | `Release` -> public artifact, eg. `v0.0.1` 59 | 60 | All commit messages must follow: 61 | 62 | * Patch -> `pa: attributes: commit message` 63 | * Minor -> `mi: attributes: commit message` 64 | * Major -> `ma: attributes: commit message` 65 | 66 | ## Contributing 67 | Pull requests are welcome. Please refer to our CONTRIBUTING file. 68 | 69 | ## Legal 70 | This project is available under the [Apache 2.0 License](http://www.apache.org/licenses/LICENSE-2.0.html). 71 | -------------------------------------------------------------------------------- /website/docs/contribution/ci.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: ci 3 | title: CI 4 | sidebar_label: CI 5 | --- 6 | 7 | We are currently using github actions to produce build, test, integration-test ( yet to do ), release. 8 | 9 | 10 | ### Releasing 11 | 12 | You must perform a `git tag $(NEW_VERSION)` then `git push --tags $(NEW_VERSION)` 13 | 14 | Github action gets triggered only for tagged git references. 15 | 16 | ### Changelog 17 | 18 | Currently, we have goreleaser which helps us in creating releases along with git commits that might explain what are the things in each release. We should also have a changelog.md and follow the ritual. -------------------------------------------------------------------------------- /website/docs/contribution/goodparts.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: goodparts 3 | title: Good Parts 4 | sidebar_label: Good Parts 5 | --- 6 | 7 | These are good parts of vsync, probably we should not change accidentally in future 8 | 9 | * Don't change the destination metadata based on destination secrets, it is currently and it should be on origin metadata, because the destination updated time and will be different always. When we compare in next sync cycle the info will be different and we will forever be syncing 10 | 11 | * Destination is halting if syncmap is not present in destination vsync/ in consul, which will need manual restart to re initialize the vsync path 12 | 13 | * For transformer regex, use https://regex101.com/. Example: https://regex101.com/r/yelNjd/1 -------------------------------------------------------------------------------- /website/docs/deploy/options.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: options 3 | title: Options 4 | sidebar_label: Options 5 | --- 6 | 7 | We used `nomad` schedular with binary driver to deploy the job in origin and destination regions, it make easier to get destination vault tokens because of nomad vault integration. 8 | 9 | Feel free to deploy in a way that needs minimal manual maintanence. Go Nuts! 10 | 11 | ## Artifact 12 | 13 | There are options, 14 | 15 | * docker image 16 | * binary 17 | 18 | ## Securely transfer origin vault token 19 | 20 | Its not easy to securely transfer the origin vault token to destinations. 21 | 22 | We used destination vault for this, there could be multiple ways. 23 | 24 | #### Step 1 25 | 26 | Create periodic token from origin vault 27 | ``` sh 28 | vault token create --policy vsync_origin --period 24h --orphan -display-name vsync-origin-eu-west-1-test 29 | ``` 30 | 31 | #### Step 2 32 | 33 | Goto destination vault and under any path, say 34 | `secret/vsync/origin` 35 | ``` 36 | vaultToken: 37 | ``` 38 | 39 | #### Step 3 40 | 41 | When you start you destination vsync app make sure you pull the origin vault token from destination vault. 42 | -------------------------------------------------------------------------------- /website/docs/faq/deployment.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: deployment 3 | title: Deployment 4 | sidebar_label: Deployment 5 | --- 6 | 7 | ## Deployment 8 | 9 | ### Why we choose nomad? 10 | 11 | * Good integration between vault and nomad 12 | * Jobs will restart itself if something happens 13 | * Canary deployments 14 | * Isolated Fork/Exec Driver 15 | * Hashicorp quality 16 | 17 | ### Sync index not found 18 | 19 | Vsync Origin should be started and running successfully before Vsync Destination 20 | -------------------------------------------------------------------------------- /website/docs/faq/failures.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: failures 3 | title: Failures 4 | sidebar_label: Failures 5 | --- 6 | 7 | ## Failures 8 | 9 | ### If I delete the sync info in consul 10 | 11 | Vsync will stop with a fatal error, if you restart vsync it should be fine again 12 | 13 | ### If there is no origin sync info yet for destination 14 | 15 | Destination will wait for some time and then throw fatal error that it could not hook the consul watch on sync info 16 | 17 | ### Does not halt / stop syncing 18 | 19 | It is designed not to stop sync because of copying one secret from origin to destination. 20 | 21 | It should stop with fatal error for major error like if it could not start the cycle, missing required vault token permission etc 22 | 23 | ### Sync index not found 24 | 25 | Vsync Origin should be started and running successfully before Vsync Destination 26 | 27 | You will most probably get `cannot get sync info from origin consul` 28 | -------------------------------------------------------------------------------- /website/docs/faq/general.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: general 3 | title: General 4 | sidebar_label: General 5 | --- 6 | 7 | ## Why 8 | 9 | ### Why do we need vsync? 10 | 11 | If you have multiple vault clusters, 1 in every region (may be under same environment). 12 | 13 | Users need to create / update / delete secrets from each of those vaults manually for their apps (deployed in that region) to get the recent version of secret. 14 | 15 | Instead you can ask users to update in one vault (origin) and we can propagate changes to other vaults (destinations) 16 | 17 | This we call it sync. 18 | 19 | Currently, vsync works only for kv v2 secrets. 20 | 21 | ### Why its named VSYNC? 22 | 23 | Vault SYNC (oh com'on, naming is hard!!!) 24 | 25 | Its short, easy to understand, so why not? 26 | 27 | ### Why not use as vault enterprise replication replacement? 28 | 29 | Vault replication is an enterprise feature with hashicorp quality. 30 | 31 | It primarily uses streaming write ahead log to get changes propagated to other vault, which is blazing fast when compared to vsync. 32 | 33 | ### Requirements? 34 | 35 | 1. vault (atleast 1, we can use vsync in vault that is both origin and destination) 36 | 2. consul (atleast 1) 37 | 3. 50 mb of memory 38 | 4. 500 Mhz of cpu (may be less, like 300 Mhz) 39 | 5. 300 Mb of disk space (may be less) 40 | -------------------------------------------------------------------------------- /website/docs/getstarted/config.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: config 3 | title: Config 4 | sidebar_label: Config 5 | --- 6 | 7 | ## Hierarchy of parameters 8 | 9 | ``` 10 | "--cli-params" overrides 11 | "VSYNC_ENV_VARS" overrides 12 | "{config.vars}" overrides 13 | "default" 14 | ``` 15 | 16 | ## Config 17 | 18 | `--config` : cli parameter to specify the location of config 19 | 20 | `--version` : to get build version, build commit, build time information of vsync 21 | 22 | `syncPath` : consul kv path where vsync has to store its meta data (default: "vsync/") 23 | 24 | > Deprecated after v0.1.1, replaced by syncPath in origin and destination 25 | 26 | `dataPaths` : array of vault paths / mounts which needs to be synced 27 | 28 | > Deprecated after v0.0.1, replaced by mounts in origin and destination 29 | 30 | `log.level` : level of logs that needs to be printed to output; options: info | debug (default: "info") 31 | 32 | `log.type` : level of logs that needs to be printed to output; options: console | json (default: "console") 33 | 34 | `numBuckets` : sync info in consul kv will have N number of buckets and 1 index, each bucket is a map of path:insight. You will need to increase it as you hit per consul kv size limit. It must be same for origin and destinations. (default: 1) 35 | 36 | `ignoreDeletes` : flag for vsync destination to ignore syncing deletes from origin side. (default: false). 37 | ##### Does not save deletes in destination sync info too so it has to compute the differences every time but useful for seeing changes between origin and destination at any point in time. 38 | 39 | ### Origin 40 | 41 | `origin` : top level key for all origin related config parameters 42 | 43 | `origin.dc` : origin consul datacenter. "--origin.dc" cli param 44 | 45 | > Deprecated after v0.1.1, replaced by dc in origin.consul.dc 46 | 47 | `origin.vault` : origin vault top level key 48 | 49 | `origin.vault.address` : origin vault address where we need to get metadata ( vault kv metadata ). "--origin.vault.address" cli param 50 | 51 | `origin.vault.token` : origin vault token which has permissions to read, update, write in vault mounts. "--origin.vault.token" cli param 52 | 53 | `origin.vault.approle.path` : origin vault approle path. "--origin.vault.approle.path" cli param (use token OR approle) (default: approle) 54 | 55 | `origin.vault.approle.role_id` : origin vault role_id from an approle which has permissions to read, update, write in vault mounts. "--origin.vault.approle.role_id" cli param (use token OR approle) 56 | 57 | `origin.vault.approle.secret_id` : origin vault secret_id from an approle which has permissions to read, update, write in vault mounts. "--origin.vault.approle.secret_id" cli param (use token OR approle). ENV variable VSYNC_ORIGIN_VAULT_APPROLE_SECRET_ID 58 | 59 | `origin.mounts` : array of vault paths / mounts which needs to be synced. Each value needs to end with /. Token permissions to read, update, delete are checked for each cycle. 60 | 61 | `origin.consul.address` : origin consul address where we need to store vsync meta data ( sync info ). "--origin.consul.address" cli param 62 | 63 | `origin.consul.dc` : origin consul datacenter. "--origin.consul.dc" cli param 64 | 65 | `origin.numWorkers` : number of get insights worker (default: 1) 66 | 67 | `origin.tick` : interval for timer to start origin sync cycles. String format like 10m, 5s (default: "1m") 68 | 69 | `origin.timout` : time limit trigger of a bomb, killing an existing sync cycle. String format like 10m, 5s (default: "5m") 70 | 71 | `origin.renewToken` : renews origin vault periodic token and making it infinite token (default: true). See securely transfer origin vault token for more info. 72 | 73 | ### Destination 74 | 75 | `destination` : top level key for all destination related config parameters 76 | 77 | `destination.dc` : destination consul datacenter 78 | 79 | > Deprecated after v0.1.1, replaced by dc in destination.consul.dc 80 | 81 | `destination.vault` : destination vault top level key 82 | 83 | `destination.vault.address` : destination vault address where we need to get metadata ( vault kv metadata ). "--destination.vault.address" cli param 84 | 85 | `destination.vault.token` : destination vault token which has permissions to read, update, write in vault mounts. "--destination.vault.token" cli param 86 | 87 | `destination.vault.approle.path` : destination vault approle path. "--destination.vault.approle.path" cli param (use token OR approle) (default: approle) 88 | 89 | `destination.vault.approle.role_id` : destination vault role_id from an approle which has permissions to read, update, write in vault mounts. "--destination.vault.approle.role_id" cli param (use token OR approle) 90 | 91 | `destination.vault.approle.secret_id` : destination vault secret_id from an approle which has permissions to read, update, write in vault mounts. "--destination.vault.approle.secret_id" cli param (use token OR approle). ENV variable VSYNC_DESTINATION_VAULT_APPROLE_SECRET_ID 92 | 93 | `destination.mounts` : array of vault paths / mounts which needs to be synced. Each value needs to end with /. Token permissions to read, update, delete are checked for each cycle. 94 | 95 | `destination.consul.dc` : destination consul datacenter. "--destination.consul.dc" cli param 96 | 97 | `destination.consul.address` : destination consul address where we need to store vsync meta data ( sync info ). "--destination.consul.address" cli param 98 | 99 | `destination.numWorkers` : number of fetch and save worker (default: 1). 100 | 101 | `destination.tick` : interval for timer to start destination sync cycles. String format like 10m, 5s (default: "1m") 102 | 103 | `destination.timout` : time limit trigger of a bomb, killing an existing sync cycle. String format like 10m, 5s (default: "5m") 104 | 105 | ## Env 106 | 107 | Setting `VSYNC_*` envrionment variables will also have effects. eg: "VSYNC_LOGLEVEL=debug" 108 | 109 | ## Config file 110 | 111 | Supported format: json, hcl, yaml through [viper](https://github.com/spf13/viper) 112 | 113 | ## Examples 114 | 115 | ### Origin 116 | 117 | ``` 118 | { 119 | "log": { 120 | "level": "debug", 121 | "type": "console" 122 | }, 123 | "logLevel": "info", 124 | "numBuckets": 19, 125 | "origin": { 126 | "vault": { 127 | "address": "http://127.0.0.1:6200", 128 | "token": "s.MDLmK6gOVLL33bB5TkdnJPOB" 129 | }, 130 | "consul": { 131 | "dc": "dc1", 132 | "address": "http://127.0.0.1:6500" 133 | }, 134 | "mounts": [ 135 | "secret/" 136 | ], 137 | "syncPath": "vsync/", 138 | "numWorkers": 5, 139 | "tick": "10s", 140 | "timeout": "10s" 141 | } 142 | } 143 | ``` 144 | 145 | ### Simple destination 146 | 147 | ``` 148 | { 149 | "log": { 150 | "level": "debug", 151 | "type": "console" 152 | }, 153 | "numBuckets": 19, 154 | "origin": { 155 | "vault": { 156 | "address": "http://127.0.0.1:6200", 157 | "token": "s.8Te1siHQnIoJ4k6el4pioQhz" 158 | }, 159 | "consul": { 160 | "dc": "dc1", 161 | "address": "http://127.0.0.1:6500" 162 | }, 163 | "mounts": [ 164 | "secret/" 165 | ], 166 | "syncPath": "vsync/", 167 | "numWorkers": 5, 168 | "tick": "10s", 169 | "timeout": "10s" 170 | }, 171 | "destination": { 172 | "vault": { 173 | "address": "http://127.0.0.1:7200", 174 | "token": "s.5LvYTJQhwyh2CvrZtUpnHeLb" 175 | }, 176 | "consul": { 177 | "dc": "dc2", 178 | "address": "http://127.0.0.1:7500" 179 | }, 180 | "syncPath": "vsync/", 181 | "numWorkers": 10, 182 | "tick": "10s", 183 | "timeout": "10s" 184 | } 185 | } 186 | ``` 187 | 188 | ### Destination with transformers 189 | 190 | ``` 191 | { 192 | "log": { 193 | "level": "debug", 194 | "type": "console" 195 | }, 196 | "numBuckets": 19, 197 | "origin": { 198 | "vault": { 199 | "address": "http://127.0.0.1:6200", 200 | "token": "s.8Te1siHQnIoJ4k6el4pioQhz" 201 | }, 202 | "consul": { 203 | "dc": "dc1", 204 | "address": "http://127.0.0.1:6500" 205 | }, 206 | "mounts": [ 207 | "runner/" 208 | ], 209 | "syncPath": "vsync/", 210 | "numWorkers": 5, 211 | "tick": "10s", 212 | "timeout": "10s" 213 | }, 214 | "destination": { 215 | "vault": { 216 | "address": "http://127.0.0.1:7200", 217 | "token": "s.5LvYTJQhwyh2CvrZtUpnHeLb" 218 | }, 219 | "consul": { 220 | "dc": "dc2", 221 | "address": "http://127.0.0.1:7500" 222 | }, 223 | "syncPath": "vsync/", 224 | "numWorkers": 10, 225 | "tick": "10s", 226 | "timeout": "10s", 227 | "transforms": [ 228 | { 229 | "name": "v1->v2", 230 | "from": "(?Psecret)/(?P((meta)?data))?/(?Prunner)/(?P(dev|test|stage|prod))?/?(?P\\w+)?/?", 231 | "to": "runner/meta/env/app/secrets" 232 | } 233 | ] 234 | } 235 | } 236 | ``` 237 | 238 | ### Destination is same as origin 239 | 240 | We are transforming from one mount to another 241 | 242 | ``` 243 | { 244 | "log": { 245 | "level": "debug", 246 | "type": "console" 247 | }, 248 | "numBuckets": 19, 249 | "origin": { 250 | "vault": { 251 | "address": "http://127.0.0.1:6200", 252 | "token": "s.MDLmK6gOVLL33bB5TkdnJPOB" 253 | }, 254 | "consul": { 255 | "dc": "dc1", 256 | "address": "http://127.0.0.1:6500" 257 | }, 258 | "mounts": [ 259 | "runner/" 260 | ], 261 | "syncPath": "vsync/", 262 | "numWorkers": 5, 263 | "tick": "10s", 264 | "timeout": "10s" 265 | }, 266 | "destination": { 267 | "vault": { 268 | "address": "http://127.0.0.1:6200", 269 | "token": "s.MDLmK6gOVLL33bB5TkdnJPOB" 270 | }, 271 | "consul": { 272 | "dc": "dc1", 273 | "address": "http://127.0.0.1:6500" 274 | }, 275 | "syncPath": "vsync/", 276 | "numWorkers": 10, 277 | "tick": "10s", 278 | "timeout": "10s", 279 | "transforms": [ 280 | { 281 | "name": "v1->v2", 282 | "from": "(?Psecret)/(?P((meta)?data))?/(?Prunner)/(?P(dev|test|stage|prod))?/?(?P\\w+)?/?", 283 | "to": "runner/meta/env/app/secrets" 284 | } 285 | ] 286 | } 287 | } 288 | ``` 289 | -------------------------------------------------------------------------------- /website/docs/getstarted/install.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: install 3 | title: Install 4 | sidebar_label: Install 5 | --- 6 | 7 | ### Manual download 8 | 9 | 1. Goto [releases](https://github.com/ExpediaGroup/vsync/releases) page 10 | 2. Download the latest binary for your OS 11 | 3. Place the binary somewhere in global path, like `/usr/local/bin` 12 | 13 | ### Docker images 14 | Find the latest release tag from github release page of this project 15 | 16 | ``` 17 | TODO: add docker image 18 | ``` 19 | 20 | ## Usage 21 | 22 | ``` 23 | A tool that sync secrets between different vaults probably within same environment vaults 24 | 25 | Usage: 26 | vsync [flags] 27 | vsync [command] 28 | 29 | Available Commands: 30 | destination Performs comparisons of sync data structures and copies data from origin to destination for nullifying the diffs 31 | help Help about any command 32 | origin Generate sync data structure in consul kv for entities that we need to distribute 33 | 34 | Flags: 35 | -c, --config string load the config file along with path (default is $HOME/.vsync.json) 36 | --destination.consul.address string destination consul address 37 | --destination.consul.dc string destination consul datacenter 38 | --destination.vault.address string destination vault address 39 | --destination.vault.token string destination vault token 40 | -h, --help help for vsync 41 | --log.level string logger level (info|debug) 42 | --log.type string logger type (console|json) 43 | --origin.consul.address string origin consul address 44 | --origin.consul.dc string origin consul datacenter 45 | --origin.vault.address string origin vault address 46 | --origin.vault.token string origin vault token 47 | --version version information 48 | 49 | Use "vsync [command] --help" for more information about a command. 50 | ``` -------------------------------------------------------------------------------- /website/docs/getstarted/why.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: why 3 | title: Why 4 | sidebar_label: Why 5 | --- 6 | 7 | **vsync**, an easy, efficient way to sync credentials across from one origin to multiple destinations 8 | 9 | Developers might have their apps in multiple datacenters, each having its own vault. Its difficult for developers to update secrets in each datacenter for their apps to pickup updated secrets like database passwords. Instead we can have single origin vault, where developer will update and we can replicate the secrets to other vaults. This is where vsync fits in. 10 | 11 | * Parallel workers to finish the job faster 12 | * No need of cron jobs to trigger syncing 13 | * Cleanly closes the cycles 14 | * Exposes telemetry data (OpenTelemetry integration in future) 15 | * Clean vault audit log, as it uses only kv metadata for comparison and they are not clogged because of secret distribution 16 | * Transform the path between origin and destination while syncing eg: secret/data/runner/stage/app1 => runnerv2/data/stage/app1/secrets without impacting apps / users 17 | * Loopback to have origin and destination in the same vault 18 | * Meta sync information is stored in consul 19 | 20 | ## Similar products 21 | 22 | ### Shell scripts 23 | Generally people comeup with shell scripts and a cron job that does the job of sequentially copying secrets from one vault to another. 24 | 25 | ### Custom application for copying the secrets 26 | Vsync is one of them. One major missing feature is parallelly copying which does should not stop the job while copying the a particular bad secret 27 | 28 | ### Vault Enterprise 29 | Vault Enterprise is a paid version of vault. It uses write ahead log streaming to sync blazingly fast. We all know hashicorp products are more robust. One missing piece is tranforming the paths while syncing. Its useful while performing a major platform migration without impacting any application / teams. 30 | 31 | ## Prerequiste 32 | 33 | * All vault kv mount must be of type `KV V2` 34 | * Currently, only works for secrets in KV mount, does not work for policies 35 | * Currently, works only with consul as kv backend 36 | * Single origin where developers will update their secrets 37 | * All secrets are synced, in order to have region / environment specific secrets we may need to use secret paths like `plaform/stage/us-east-1/myapp/secrets` 38 | 39 | ## Legal 40 | This project is available under the [Apache 2.0 License](http://www.apache.org/licenses/LICENSE-2.0.html). -------------------------------------------------------------------------------- /website/docs/internals/destination.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: destination 3 | title: Destination 4 | sidebar_label: Destination 5 | --- 6 | 7 | This is the end of the unidirectional connection for syncing secrets. It should point to secondary vault clusters in different regions from which regional apps can pull out secrets. 8 | 9 | --- 10 | 11 | ## Startup 12 | 13 | #### Step 1 14 | 15 | Get consul and vault clients pointing to origin 16 | 17 | Get consul and vault clients pointing to destinations 18 | 19 | #### Step 2 20 | 21 | Check if we could read, write, update, delete in origin consul kv under origin and destination sync paths 22 | 23 | #### Step 3 24 | 25 | Check if we could read in origin vault under data paths specified in config 26 | 27 | Check if we could read, write, update, delete in destination vault under data paths specified in config 28 | 29 | #### Step 4 30 | 31 | Prepare an error channel through which anyone under sync cycle can contact to throw errors 32 | 33 | We also need to listen to error channel and check if the error at hand is fatal or not. 34 | 35 | If not fatal, log the error with as much context available. 36 | If fatal, stop the current sync cycle cleanly and future cycles. Log the error, inform a human, halt the program. 37 | 38 | #### Step 5 39 | 40 | Prepare an signal channel through which OS can send halt signals. Useful for humans to stop the whole sync program cleanly stop. 41 | 42 | #### Step 7 43 | 44 | Prepare a consul watch on origin sync index so whenever there is a change in consul index change we can run destination cycle 45 | 46 | As a backup to consul watch, we also have a ticker initialized for interval (default: 1m) to run destination cycle 47 | 48 | #### Step 6 49 | 50 | A ticker is initialized for an interval (default: 1m) to start the sync cycle. 51 | The trigger will be starting point for one cycle. 52 | 53 | #### Step 7 54 | 55 | Transformers are initialized as a stack, bottom most are from default pack, top ones are from config file. 56 | 57 | ## Cycle 58 | 59 | #### Step 0 60 | 61 | A timer with timeout (default: 5m) will be created for every sync cycle. If workers get struck inbetween or something happens we do not halt vsync. Instead we wait till the timeout and kill everything created for current sync cycle. 62 | 63 | #### Step 1 64 | 65 | If triggered either via origin consul watch or ticker, we get origin sync info and destination sync info 66 | 67 | #### Step 2 68 | 69 | Compare the infos from origin and destination. 70 | 71 | Comparison starts basics like number of buckets then moves on to do index matching. 72 | 73 | If index of a specific bucket is different between origin and destination, then we assume that bucket's contains are not changed. 74 | 75 | If index of a specific bucket is changed between origin and destination, then we check each key value pair in that bucket map. 76 | 77 | Origin is the source of truth and has more priority in solving differences in comparison. 78 | 79 | For every secret we first check version and type then updated time to make sure we update destinations based on changes from origin 80 | 81 | > Edge Case: 82 | User at origin has deleted the meta information, so it resets version numbers. User has also updated a new secret in same path, same number of times to match the version as old secret. In that case, the updated time will differ; hence we should be able to sync new changes 83 | 84 | The output of comparison will be 3 lists [addTask, updateTask, deleteTask] which can be given for workers to fetch from origin and save to destination. 85 | 86 | We have effectively zeroed redundent copying of unaltered secrets. 87 | 88 | #### Step 3 89 | 90 | We create multiple worker go routines (default: 1). Each worker will fetch secret from origin and save in destination. 91 | 92 | Each routine will be given: 93 | * vault client pointing to origin 94 | * vault client pointing to destination 95 | * pack of tranformers 96 | * shared sync info 97 | * error channel 98 | * multiple origin absolute paths but one at a time 99 | 100 | sync info needs be safe for concurrent usage 101 | 102 | Each worker 103 | 1. will call origin vault for secret data 104 | 2. transform the path for destination if necessary 105 | 3. save the secret in destination under new path 106 | 4. copy the origin sync info for that secret to destination sync info. The reason for copying origin and not saving destination metainfo as such, is we need to compare origin and destination sync infos in future cycles 107 | 108 | #### Step 4 109 | 110 | Create 1 go routine to handle saving info to consul 111 | * if cycle is successful, save consul sync info 112 | * if cycle has failed, abort saving info because it will corrupt existing sync info 113 | 114 | #### Step 5 115 | 116 | From the list of absolute paths from comparison, send one path to next available worker. Once we have sent all the paths, wait for all worker go routines to complete their work. 117 | 118 | The sender needs to be in separate routine, because we need to stop sending work to worker if we get halt signals. 119 | 120 | #### Step 6 121 | 122 | Reindex the destination sync info, for generating index info for each bucket. 123 | 124 | #### Step 7 125 | 126 | If everything is successful, send save signal for saving info ( index and buckets ) to destination consul. 127 | 128 | If the cycle is aborted by signal, do not send the save signal for saving. 129 | 130 | We need to cleanly close the cycle. Log appropriate cycle messages. 131 | -------------------------------------------------------------------------------- /website/docs/internals/keywords.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: keywords 3 | title: Keywords 4 | sidebar_label: Keywords 5 | --- 6 | 7 | ## Vault 8 | 9 | A hashicorp product to handle secrets. [vault homepage](https://www.vaultproject.io/) 10 | 11 | ### Path 12 | 13 | The location where the values are stored 14 | 15 | *For kv mounts* 16 | ``` 17 | Path = mount/env/app1 [string] 18 | Key [string] : Value [string / json] 19 | ``` 20 | 21 | This can be confusing as `mount/env/app` is also called as `key`. So to reduce confusion we call the location a `path` 22 | 23 | *For approle mounts* 24 | ``` 25 | Path = auth/approle/role/role_name [string] 26 | token_policy [string] : value [string array] 27 | . 28 | . 29 | ``` 30 | 31 | ## KV store 32 | 33 | Key value is a mount in vault which can be used to store 34 | 35 | ### KV V1 36 | 37 | Vault mount only stores one version of key and value pair. If user wants to update, he/she will change the existing version without backup 38 | 39 | ### KV V2 40 | 41 | Vault mount stores multiple versions *(default: 10)*. If user wants to update, hq/she will create a new version from current version, so there exists a backup inherently. 42 | 43 | *Data* 44 | 45 | This path contains actual data for kv. Example `mount/data/env/app1` 46 | 47 | *Metadata* 48 | 49 | This path contains only meta data about each kv path. Example `mount/metadata/env/app1` 50 | 51 | If we access metadata, we could easily get a clean audit log. 52 | 53 | ## Origin 54 | 55 | This is the source of truth, is user updates any kv pair here it should be propagated to other vaults that are in sync 56 | 57 | There could be only `1` origin 58 | 59 | ## Destination 60 | 61 | This is the destination where the sync must reflect the origin kv store 62 | 63 | There could be 0 or more destinations 64 | 65 | ## Mounts 66 | 67 | > Mounts replaced Data Paths 68 | 69 | A list of vault mount paths which needs to be synced. It could be different between origin and destination. Vault token provided for vsync needs to have approriate permission on these paths. 70 | 71 | ``` 72 | Origin: 73 | "mounts": [ 74 | "secret/" 75 | ], 76 | 77 | Destination: 78 | "mounts": [ 79 | "new_mount/" 80 | "secret/" 81 | ], 82 | ``` 83 | In future: we could have exclude paths regex and can be used to NOT sync 84 | 85 | ## Data paths 86 | 87 | > Deprecated after v0.0.1, renamed as mounts 88 | 89 | ## Sync Info 90 | 91 | Vsync uses consul kv to store meta data about sync. 92 | 93 | The datastructure is designed to handle any number of entries [keys, policies] for syncing between vaults. 94 | On the positive side it overcomes the size limit of consul kv storage as well as consul event size. 95 | 96 | Sync Info is a collection of number of buckets (default: 20 buckets) along with 1 index. 97 | 98 | This structure needs to be safe for concurrent usage because more workers will update their secrets at the same time. 99 | 100 | ### Insight 101 | 102 | Each secret insight has these meta information 103 | 104 | *struct* 105 | ``` 106 | version -> vault kv version 107 | updateTime -> vault kv update time 108 | type -> kvV1 / kvV2 / policy 109 | ``` 110 | 111 | *eg* 112 | ``` 113 | {"version":1,"updateTime":"2019-05-14T23:41:52.904927369Z","type":"kvV2"} 114 | ``` 115 | 116 | ### Bucket 117 | 118 | Each bucket is a map with absolute path as key and insight datastructure as value 119 | 120 | *eg* 121 | ``` 122 | "mount/data/platform/env/app1":{"version":1,"updateTime":"2019-05-14T23:41:52.904927369Z","type":"kvV2"} 123 | , 124 | "mount/data/platform/env/app2":{"version":1,"updateTime":"2019-05-14T23:41:52.736990492Z","type":"kvV2"} 125 | ``` 126 | 127 | ### Index 128 | 129 | An array of hashes with length as number of buckets. Each hash is constructed from contents in a particular bucket 130 | 131 | ``` 132 | ["6cdb282cb3c9f6d8d3bc1d5eab88d60b728e69249f86e317c3b0d5458993bc80", ... 19 more sha256 133 | 134 | ``` 135 | 136 | > Some types are not yet implemented like kvV1 and policy 137 | 138 | ## Sync Path 139 | 140 | A consul path to store the meta data used by vsync [sync info] 141 | 142 | ## Task 143 | 144 | Task is a datastructure given to fetch and save worker in destination cycles 145 | 146 | Each task has all the information needed for the worker 147 | 148 | *struct* 149 | ``` 150 | path -> absolute path 151 | op -> operation add/update/delete 152 | insight -> insight 153 | ``` 154 | 155 | ## Transformer 156 | 157 | Each secret path is passed through a set of transformers one by one and at last the origin secret path may be transformed to destination secret path. 158 | 159 | To perform these changes 160 | ``` 161 | secret/data/runner/stage/app1 => runnerv2/data/stage/app1/secrets 162 | ``` 163 | 164 | *struct* 165 | ``` 166 | name (string) -> useful in logs 167 | from (regex) -> checked with secret path for matching 168 | to (string) -> could use group names present in `from` regex 169 | ``` 170 | 171 | *eg* 172 | ``` 173 | { 174 | "name": "v1->v2", 175 | "from": "(?Psecret)/(?P((meta)?data))?/(?Prunner)/(?P(dev|test|stage|prod))?/?(?P\\w+)?/?", 176 | "to": "runner2/meta/env/app/secrets" 177 | } 178 | ``` 179 | 180 | ## Cycle 181 | 182 | A set of actions performed after an interval. Origin Cycle and Destination Cycle are different. 183 | 184 | If there is a fatal failure, we abort the current cycle cleanly and cancel all future cycles then halt. 185 | 186 | If there is a non fatal failure, we surface the error with approriate context but do not kill the cycle and future cycles. -------------------------------------------------------------------------------- /website/docs/internals/origin.md: -------------------------------------------------------------------------------- 1 | --- 2 | id: origin 3 | title: Origin 4 | sidebar_label: Origin 5 | --- 6 | 7 | This is the start of unidirectional connection for syncing secrets. It should point to primary vault cluster from which users expect the secrets to be propagated to other vaults in different regions. 8 | 9 | --- 10 | 11 | ## Startup 12 | 13 | #### Step 1 14 | 15 | Get consul and vault clients pointing to origin 16 | 17 | #### Step 2 18 | 19 | Check if we could read, write, update, delete in origin consul kv under sync path 20 | 21 | #### Step 3 22 | 23 | Check if we could read, write, update, delete in origin vault under data paths specified in config 24 | 25 | #### Step 4 26 | 27 | Prepare an error channel through which anyone under sync cycle can contact to throw errors 28 | 29 | We also need to listen to error channel and check if the error at hand is fatal or not. 30 | 31 | If not fatal, log the error with as much context available. 32 | If fatal, stop the current sync cycle cleanly and future cycles. Log the error, inform a human, halt the program. 33 | 34 | #### Step 5 35 | 36 | Prepare an signal channel through which OS can send halt signals. Useful for humans to stop the whole sync program cleanly stop. 37 | 38 | #### Step 6 39 | 40 | A ticker is initialized for an interval (default: 1m) to start the sync cycle. 41 | The trigger will be starting point for one cycle. 42 | 43 | --- 44 | 45 | ## Cycle 46 | 47 | #### Step 0 48 | 49 | A timer with timeout (default: 5m) will be created for every sync cycle. If workers get struck inbetween or something happens we do not halt vsync. Instead we wait till the timeout and kill everything created for current sync cycle. 50 | 51 | #### Step 1 52 | 53 | Create a fresh `sync info` to store vsync metadata. It needs to be safe for concurrent usage. 54 | 55 | #### Step 2 56 | 57 | For an interval (default: 1m) we get a list of paths recursively that needs to be synced based on data paths. Example, for mount `secret/` we get absolute paths `[secret/metadata/stage/app1, secret/metadata/stage/app2]` 58 | 59 | #### Step 3 60 | 61 | We create multiple worker go routines (default: 1). Each worker will generate insight and save in sync info for a given absolute path. 62 | 63 | Each routine will be given: 64 | * vault client pointing to origin 65 | * shared sync info 66 | * error channel 67 | * multiple absolute paths but one at a time 68 | 69 | sync info needs be safe for concurrent usage 70 | 71 | #### Step 4 72 | 73 | Create 1 go routine to handle saving info to consul 74 | * if cycle is successful, save consul sync info 75 | * if cycle has failed, abort saving info because it will corrupt existing sync info 76 | 77 | #### Step 5 78 | 79 | From the list of absolute paths send one path to next available worker. Once we have sent all the paths, wait for all worker go routines to complete their work. 80 | 81 | The sender needs to be in separate routine, because we need to stop sending work to worker if we get halt signals. 82 | 83 | #### Step 6 84 | 85 | Reindex the sync info, for generating index info for each bucket. 86 | 87 | #### Step 7 88 | 89 | If everything is successful, send save signal for saving info ( index and buckets ) to consul. 90 | 91 | If the cycle is aborted by signal, do not send the save signal for saving. 92 | 93 | We need to cleanly close the cycle. Log appropriate cycle messages. 94 | -------------------------------------------------------------------------------- /website/docusaurus.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | title: 'vsync', 3 | tagline: 'Sync secrets between HashiCorp vaults', 4 | url: 'https://expediagroup.github.io', 5 | baseUrl: '/vsync/', 6 | favicon: 'img/icon.png', 7 | organizationName: 'ExpediaGroup', // Usually your GitHub org/user name. 8 | projectName: 'vsync', // Usually your repo name. 9 | themeConfig: { 10 | navbar: { 11 | title: 'Vsync', 12 | logo: { 13 | alt: 'vsync logo', 14 | src: 'img/icon.png', 15 | }, 16 | items: [ 17 | {to: 'docs/getstarted/why', label: 'Docs', position: 'left'}, 18 | { 19 | href: 'https://github.com/ExpediaGroup/vsync', 20 | label: 'GitHub', 21 | position: 'right', 22 | }, 23 | ], 24 | }, 25 | footer: { 26 | style: 'dark', 27 | copyright: `Copyright © ${new Date().getFullYear()} Expedia, Inc. Built with Docusaurus.`, 28 | }, 29 | }, 30 | presets: [ 31 | [ 32 | '@docusaurus/preset-classic', 33 | { 34 | docs: { 35 | sidebarPath: require.resolve('./sidebars.js'), 36 | editUrl: 37 | 'https://github.com/ExpediaGroup/vsync/edit/master/website/', 38 | }, 39 | theme: { 40 | customCss: require.resolve('./src/css/custom.css'), 41 | algolia: { 42 | apiKey: 'bc2a3a8a5df178c4e174b77b084f0739', 43 | indexName: 'vsync', 44 | algoliaOptions: {}, // Optional, if provided by Algolia 45 | }, 46 | }, 47 | }, 48 | ], 49 | ], 50 | }; 51 | -------------------------------------------------------------------------------- /website/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "website", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "start": "docusaurus start", 7 | "build": "docusaurus build", 8 | "swizzle": "docusaurus swizzle", 9 | "deploy": "docusaurus deploy" 10 | }, 11 | "dependencies": { 12 | "@docusaurus/core": "^2.0.0-alpha.70", 13 | "@docusaurus/preset-classic": "^2.0.0-alpha.70", 14 | "classnames": "^2.2.6", 15 | "react": "^16.14.0", 16 | "react-dom": "^16.14.0" 17 | }, 18 | "browserslist": { 19 | "production": [ 20 | ">0.2%", 21 | "not dead", 22 | "not op_mini all" 23 | ], 24 | "development": [ 25 | "last 1 chrome version", 26 | "last 1 firefox version", 27 | "last 1 safari version" 28 | ] 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /website/sidebars.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2017-present, Facebook, Inc. 3 | * 4 | * This source code is licensed under the MIT license found in the 5 | * LICENSE file in the root directory of this source tree. 6 | */ 7 | 8 | module.exports = { 9 | someSidebar: { 10 | GetStarted: ['getstarted/why', 'getstarted/install','getstarted/config'], 11 | Deploy: ['deploy/options'], 12 | Internals: ['internals/keywords', 'internals/origin', 'internals/destination'], 13 | Faq: ['faq/general', 'faq/deployment', 'faq/failures'], 14 | Contribution: ['contribution/build', 'contribution/ci', 'contribution/goodparts'], 15 | }, 16 | }; 17 | -------------------------------------------------------------------------------- /website/src/css/custom.css: -------------------------------------------------------------------------------- 1 | /** 2 | * Any CSS included here will be global. The classic template 3 | * bundles Infima by default. Infima is a CSS framework designed to 4 | * work well for content-centric websites. 5 | */ 6 | 7 | /* You can override the default Infima variables here. */ 8 | :root { 9 | --ifm-color-primary: #25c2a0; 10 | --ifm-color-primary-dark: rgb(33, 175, 144); 11 | --ifm-color-primary-darker: rgb(31, 165, 136); 12 | --ifm-color-primary-darkest: rgb(26, 136, 112); 13 | --ifm-color-primary-light: rgb(70, 203, 174); 14 | --ifm-color-primary-lighter: rgb(102, 212, 189); 15 | --ifm-color-primary-lightest: rgb(146, 224, 208); 16 | --ifm-code-font-size: 95%; 17 | } 18 | 19 | .docusaurus-highlight-code-line { 20 | background-color: rgb(72, 77, 91); 21 | display: block; 22 | margin: 0 calc(-1 * var(--ifm-pre-padding)); 23 | padding: 0 var(--ifm-pre-padding); 24 | } 25 | -------------------------------------------------------------------------------- /website/src/pages/index.js: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import classnames from 'classnames'; 3 | import Layout from '@theme/Layout'; 4 | import Link from '@docusaurus/Link'; 5 | import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; 6 | import useBaseUrl from '@docusaurus/useBaseUrl'; 7 | import styles from './styles.module.css'; 8 | 9 | const features = [ 10 | { 11 | title: <>Faster, 12 | // imageUrl: 'img/undraw_docusaurus_mountain.svg', 13 | description: ( 14 | <> 15 | Parallel workers to finish the job faster 16 | 17 | ), 18 | }, 19 | { 20 | title: <>Resilient, 21 | // imageUrl: 'img/undraw_docusaurus_mountain.svg', 22 | description: ( 23 | <> 24 | Does not fail on copying single bad secret

25 | No need of cron jobs to trigger syncing 26 | 27 | ), 28 | }, 29 | { 30 | title: <>Cleaner, 31 | // imageUrl: 'img/undraw_docusaurus_tree.svg', 32 | description: ( 33 | <> 34 | Vault audit logs are cleaner as vsync uses only kv metadata for comparison

35 | Vsync closes all routines in each cycle cleanly on timeout 36 | 37 | ), 38 | }, 39 | { 40 | title: <>Transformers, 41 | // imageUrl: 'img/undraw_docusaurus_react.svg', 42 | description: ( 43 | <> 44 | Migrate paths of secrets from one format to another format and keep in sync without impacting developers and apps 45 | 46 | ), 47 | }, 48 | ]; 49 | 50 | function Feature({ title, description}) { 51 | // const imgUrl = useBaseUrl(imageUrl); 52 | return ( 53 |
54 | {/* {imgUrl && ( 55 |
56 | {title} 57 |
58 | )} */} 59 |

{title}

60 |

{description}

61 |
62 | ); 63 | } 64 | 65 | function Home() { 66 | const context = useDocusaurusContext(); 67 | const {siteConfig = {}} = context; 68 | return ( 69 | 72 |
73 |
74 | 75 |

{siteConfig.title}

76 |

{siteConfig.tagline}

77 |
78 | 84 | Get Started 85 | 86 |
87 |
88 |
89 |
90 | {features && features.length && ( 91 |
92 |
93 |
94 | {features.map((props, idx) => ( 95 | 96 | ))} 97 |
98 |
99 |
100 | )} 101 |
102 |
103 | ); 104 | } 105 | 106 | export default Home; 107 | -------------------------------------------------------------------------------- /website/src/pages/styles.module.css: -------------------------------------------------------------------------------- 1 | /** 2 | * CSS files with the .module.css suffix will be treated as CSS modules 3 | * and scoped locally. 4 | */ 5 | 6 | .heroBanner { 7 | padding: 4rem 0; 8 | text-align: center; 9 | position: relative; 10 | overflow: hidden; 11 | } 12 | 13 | @media screen and (max-width: 966px) { 14 | .heroBanner { 15 | padding: 2rem; 16 | } 17 | } 18 | 19 | .buttons { 20 | display: flex; 21 | align-items: center; 22 | justify-content: center; 23 | } 24 | 25 | .features { 26 | display: flex; 27 | align-items: center; 28 | padding: 2rem 0; 29 | width: 100%; 30 | } 31 | 32 | .featureImage { 33 | height: 200px; 34 | width: 200px; 35 | } 36 | -------------------------------------------------------------------------------- /website/static/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ExpediaGroup/vsync/d6162687e6ab75e2822c7107d2104fde57a02a65/website/static/img/favicon.ico -------------------------------------------------------------------------------- /website/static/img/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ExpediaGroup/vsync/d6162687e6ab75e2822c7107d2104fde57a02a65/website/static/img/icon.png -------------------------------------------------------------------------------- /website/static/img/vsync_text_animation.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ExpediaGroup/vsync/d6162687e6ab75e2822c7107d2104fde57a02a65/website/static/img/vsync_text_animation.gif -------------------------------------------------------------------------------- /website/static/img/vsync_text_morelight_lowres.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ExpediaGroup/vsync/d6162687e6ab75e2822c7107d2104fde57a02a65/website/static/img/vsync_text_morelight_lowres.png --------------------------------------------------------------------------------