├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── README.adoc ├── cmd ├── ccloud-schema-exporter │ └── ccloud-schema-exporter.go ├── integrationTests │ ├── exporter-integration_test.go │ └── hardDeletetionTesting │ │ └── hardDeleteCloud_test.go ├── internals │ ├── context.go │ ├── customDestination.go │ ├── customDestination_test.go │ ├── customSource.go │ ├── customSource_test.go │ ├── definitions.go │ ├── exportSchemas.go │ ├── getInfo.go │ ├── helpers.go │ ├── helpers_test.go │ ├── localFSFunctions.go │ ├── localFSFunctions_test.go │ ├── meta.go │ ├── schema-registry-light-client.go │ ├── schema-registry-light-client_test.go │ ├── schemaLoads.go │ └── syncSchemas.go ├── testingUtils │ └── testing_helpers.go └── trustedEntities │ └── LetsEncryptCA.pem ├── go.mod ├── go.sum └── samples ├── Grafana_Sample_Dashboard_JSON.json ├── SampleGrafanaDashboard.png ├── bump_ids.sh └── docker-compose.yml /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | /.env 3 | /ccloud-schema-exporter 4 | /export_env* 5 | /testk8s.yml 6 | /apicurio-compose.yml 7 | /LICENSE 8 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.19.x 5 | - 1.20.x 6 | 7 | services: 8 | - docker 9 | 10 | script: 11 | - docker pull confluentinc/confluent-local:7.4.1 12 | - docker pull confluentinc/cp-schema-registry:7.4.1 13 | - go build cmd/ccloud-schema-exporter/ccloud-schema-exporter.go 14 | - cd cmd/integrationTests 15 | - go test 16 | - cd ../internals 17 | - go test 18 | 19 | addons: 20 | sonarcloud: 21 | organization: "abraham-leal" 22 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.20.5 AS builder 2 | COPY . /app 3 | WORKDIR /app 4 | RUN CGO_ENABLED=0 GOOS=linux go build ./cmd/ccloud-schema-exporter/ccloud-schema-exporter.go 5 | 6 | FROM scratch 7 | COPY --from=builder /app/ccloud-schema-exporter / 8 | ADD cmd/trustedEntities /etc/ssl/certs/ 9 | ENTRYPOINT ["/ccloud-schema-exporter", "-sync", "-syncDeletes", "-syncHardDeletes", "-withMetrics", "-noPrompt"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.adoc: -------------------------------------------------------------------------------- 1 | = Schema Exporter for Confluent Schema Registry 2 | 3 | image:https://travis-ci.com/abraham-leal/ccloud-schema-exporter.svg?branch=master["Build Status", link="https://travis-ci.com/abraham-leal/ccloud-schema-exporter"] 4 | image:https://sonarcloud.io/api/project_badges/measure?project=abraham-leal_ccloud-schema-exporter&metric=alert_status["Quality Gate", link="https://sonarcloud.io/dashboard?id=abraham-leal_ccloud-schema-exporter"] 5 | 6 | A tool to export schemas from a Confluent Schema Registry to another through the REST API. 7 | This app supports five modes: `batchExport`, `sync`, `getLocalCopy`, `fromLocalCopy`, and `schemaLoad`. 8 | 9 | - `sync` will continuously sync newly registered schemas into the destination registry. 10 | - `batchExport` will do a one time migration between schema registries, then it will reset the destination registry to `READWRTIE` mode. 11 | - `getLocalCopy` will fetch and write local copies of Schema Registry's Schemas. 12 | - `fromLocalCopy` will write schemas fetched by `getLocalCopy` to the destination Schema Registry. 13 | - `schemaLoad` will write schemas from your local directory to Schema Registry. 14 | 15 | This tool supports migrating from self-hosted Schema Registries as well, but if you are looking to migrate schemas 16 | between On-Premise and Confluent Cloud, check out 17 | https://docs.confluent.io/current/connect/kafka-connect-replicator/index.html[Confluent Replicator]. 18 | (Dummy values can be placed for non-secured Schema Registries) 19 | 20 | The exporter expects the following variables to be set in the environment to make the necessary calls: 21 | (In the case of `-getLocalCopy` and `-customDestination` it does not need `DST_*` variables; In the case of `-fromLocalCopy`, `-schemaLoad`, and `-customSource` it does not need `SRC_*` variables) 22 | 23 | - `SRC_SR_URL` : The URL for the source Schema Registry 24 | - `SRC_API_KEY` : The API KEY to be used to make calls to the source Schema Registry 25 | - `SRC_API_SECRET` : The API SECRET to be used to make calls to the source Schema Registry 26 | - `DST_SR_URL` : The URL for the destination Schema Registry 27 | - `DST_API_KEY` : The API KEY to be used to make calls to the destination Schema Registry 28 | - `DST_API_SECRET` : The API SECRET to be used to make calls to the destination Schema Registry 29 | 30 | It is also possible to define the credentials through command flags. If both are defined, the flags take precedence. 31 | 32 | == Build 33 | [source,bash] 34 | ---- 35 | git clone https://github.com/abraham-leal/ccloud-schema-exporter 36 | cd ccloud-schema-exporter 37 | go build ./cmd/ccloud-schema-exporter/ccloud-schema-exporter.go 38 | ---- 39 | 40 | == Docker Run 41 | [source,bash] 42 | ---- 43 | docker run \ 44 | -e SRC_SR_URL=$SRC_SR_URL \ 45 | -e SRC_API_KEY=$SRC_API_KEY \ 46 | -e SRC_API_SECRET=$SRC_API_SECRET \ 47 | -e DST_SR_URL=$DST_SR_URL \ 48 | -e DST_API_KEY=$DST_API_KEY \ 49 | -e DST_API_SECRET=$DST_API_SECRET \ 50 | abrahamleal/ccloud-schema-exporter:latest 51 | 52 | ---- 53 | 54 | A sample docker-compose is also provided under the `samples` folder. 55 | 56 | The docker image handles `-sync -syncDeletes -syncHardDeletes -withMetrics -noPrompt` continuous sync. For a one time export, it is recommended to use a release binary. 57 | 58 | If you'd like to pass custom flags, it is recommended to override the entry-point such as with `--entrypoint` with `/ccloud-schema-exporter` at the beginning of the override. 59 | 60 | For Docker, the `latest` tag will build directly from master. The master branch of this project is kept non-breaking; 61 | However, for stable images tag a release. 62 | 63 | == Run 64 | - `./ccloud-schema-exporter -batchExport` : Running the app with this flag will perform a batch export. 65 | Starting v1.1, `-batchExport` can be declared with `-syncDeletes` to perform an export of soft deleted schemas. 66 | - `./ccloud-schema-exporter -sync` : Running the app with this flag will start a continuous sync 67 | between the source and destination schema registries. 68 | - `./ccloud-schema-exporter -getLocalCopy` : Running the app with this flag will get a snapshot of your Schema Registry 69 | into local files with naming structure subjectName-version-id-schemaType per schema. The default directory is 70 | {currentPath}/SchemaRegistryBackup/. 71 | - `./ccloud-schema-exporter -fromLocalCopy` : Running the app with this flag will write schemas previously fetched. 72 | It relies on the naming convention of `-getLocalCopy` to obtain the necessary metadata to register the schemas. 73 | The default directory is {currentPath}/SchemaRegistryBackup/. The file lookup is recursive from the specified directory. 74 | - `./ccloud-schema-exporter -schemaLoad` : Running the app with this flag will write schemas from the filesystem. 75 | The schema loader respects references. For more information on behavior, see the Schema Load section. 76 | 77 | When multiple flags are applied, precedence is `sync` -> `batchExport` -> `getLocalCopy` -> `fromLocalCopy` -> `schemaLoad` 78 | 79 | NOTE: Given that the exporter cannot determine a per-subject compatibility rule, it is recommended to set the destination schema registry compatibility level to `NONE` on first sync and restore it to the source's level afterwards. 80 | 81 | === Options 82 | 83 | [source,bash] 84 | ---- 85 | Usage of ./ccloud-schema-exporter: 86 | 87 | -allowList value 88 | A comma delimited list of schema subjects to allow. It also accepts paths to a file containing a list of subjects. 89 | -batchExport 90 | Perform a one-time export of all schemas 91 | -customDestination string 92 | Name of the implementation to be used as a destination (same as mapping) 93 | -customSource string 94 | Name of the implementation to be used as a source (same as mapping) 95 | -deleteAllFromDestination 96 | Setting this will run a delete on all schemas written to the destination registry. No respect for allow/disallow lists. 97 | -dest-sr-key string 98 | API KEY for the Destination Schema Registry Cluster 99 | -dest-sr-secret string 100 | API SECRET for the Destination Schema Registry Cluster 101 | -dest-sr-url string 102 | Url to the Destination Schema Registry Cluster 103 | -disallowList value 104 | A comma delimited list of schema subjects to disallow. It also accepts paths to a file containing a list of subjects. 105 | -fromLocalCopy 106 | Registers all local schemas written by getLocalCopy. Defaults to a folder (SchemaRegistryBackup) in the current path of the binaries. 107 | -getLocalCopy 108 | Perform a local back-up of all schemas in the source registry. Defaults to a folder (SchemaRegistryBackup) in the current path of the binaries. 109 | -localPath string 110 | Optional custom path for local functions. This must be an existing directory structure. 111 | -noPrompt 112 | Set this flag to avoid checks while running. Assure you have the destination SR to correct Mode and Compatibility. 113 | -schemaLoad string 114 | Schema Type for the load. Currently supported: AVRO 115 | -scrapeInterval int 116 | Amount of time ccloud-schema-exporter will delay between schema sync checks in seconds (default 60) 117 | -src-sr-key string 118 | API KEY for the Source Schema Registry Cluster 119 | -src-sr-secret string 120 | API SECRET for the Source Schema Registry Cluster 121 | -src-sr-url string 122 | Url to the Source Schema Registry Cluster 123 | -sync 124 | Sync schemas continuously 125 | -syncDeletes 126 | Setting this will sync soft deletes from the source cluster to the destination 127 | -syncHardDeletes 128 | Setting this will sync hard deletes from the source cluster to the destination 129 | -timeout int 130 | Timeout, in seconds, to use for all REST calls with the Schema Registries (default 60) 131 | -usage 132 | Print the usage of this tool 133 | -version 134 | Print the current version and exit 135 | -withMetrics 136 | Exposes metrics for the application in Prometheus format on :9020/metrics 137 | 138 | ---- 139 | 140 | === Example Usage 141 | [source,bash] 142 | ---- 143 | export SRC_SR_URL=XXXX 144 | export SRC_API_KEY=XXXX 145 | export SRC_API_SECRET=XXXX 146 | export DST_SR_URL=XXXX 147 | export DST_API_KEY=XXXX 148 | export DST_API_SECRET=XXXX 149 | ./ccloud-schema-exporter <-sync | -batchExport | -getLocalCopy | -fromLocalCopy> 150 | ---- 151 | 152 | === Filtering the export 153 | 154 | It is now possible to filter the subjects which are sync-ed in all modes (`<-sync | -batchExport | -getLocalCopy | -fromLocalCopy>`). 155 | Setting `-allowList` or/and `-disallowList` flags will accept either a comma delimited string, or a file containing 156 | comma delimited entries for subject names (keep in mind these subjects must have their postfixes such as `-value` or 157 | `-key` to match the topic schema). 158 | These lists will be respected with all run modes. 159 | If specifying a file, make sure it has an extension (such as `.txt`). 160 | A subject specified in `-disallowList` and `-allowList` will be disallowed by default. 161 | 162 | NOTE: Lists aren't respected with the utility `-deleteAllFromDestination` 163 | 164 | === A note on syncing hard deletions 165 | 166 | Starting v1.1, `ccloud-schema-exporter` provides an efficient way of syncing hard deletions. 167 | In previous versions, this was done through inefficient lookups. 168 | 169 | Support for syncing hard deletions applies when the source and destination are both a Confluent Cloud Schema Registry 170 | or Confluent Platform 6.1+. 171 | 172 | NOTE: With regular `-syncDeletes`, the exporter will attempt to sync previously soft-deleted schemas to the destination. 173 | This functionality also only applies to Confluent Cloud or Confluent Platform 6.1+; However, if it is not able to perform this sync 174 | it will just keep syncing soft deletes it detects in the future. 175 | 176 | === Non-Interactive Run 177 | 178 | `ccloud-schema-exporter` is meant to be run in a non-interactive way. 179 | However, it does include some checks to assure things go smoothly in the replication flow. 180 | You can disable these checks by setting the configuration `-noPrompt`. 181 | By default, the docker image has this in its entry point. 182 | 183 | There are three checks made: 184 | 185 | - The destination schema registry is in `IMPORT` mode. This is a requirement, otherwise the replication won't work. 186 | - When syncing hard deletions, both clusters are Confluent Cloud Schema Registries. This is a requirement. 187 | - The destination schema registry is in `NONE` global compatibility mode. 188 | 189 | This is not a requirement, but suggested since per-subject compatibility rules cannot be determined per version. 190 | Not setting this may result in some versions not being able to be registered since they do not adhere to the global compatibility mode. 191 | (The default compatibility in Confluent Cloud is `BACKWARD`). 192 | 193 | If you'd like more info on how to change the Schema Registry mode to enable non-interactive runs, see the https://docs.confluent.io/current/schema-registry/develop/api.html#mode[Schema Registry API Documentation] 194 | 195 | === Extendability: Custom Sources and Destinations 196 | 197 | `ccloud-schema-exporter` supports custom implementations of sources and destinations. 198 | If you'd like to leverage the already built back-end, all you have to do is an implementation of the `CustomSource` or `CustomDestination` interfaces. 199 | A copy of the interface definitions is below for convenience: 200 | 201 | [source,go] 202 | ---- 203 | type CustomSource interface { 204 | // Perform any set-up behavior before start of sync/batch export 205 | SetUp() error 206 | // An implementation should handle the retrieval of a schema from the source. 207 | GetSchema(subject string, version int64) (id int64, stype string, schema string, references []SchemaReference, err error) 208 | // An implementation should be able to send exactly one map describing the state of the source 209 | // This map should be minimal. Describing only the Subject and Versions that exist. 210 | GetSourceState() (map[string][]int64, error) 211 | // Perform any tear-down behavior before stop of sync/batch export 212 | TearDown() error 213 | } 214 | 215 | type CustomDestination interface { 216 | // Perform any set-up behavior before start of sync/batch export 217 | SetUp() error 218 | // An implementation should handle the registration of a schema in the destination. 219 | // The SchemaRecord struct provides all details needed for registration. 220 | RegisterSchema(record SchemaRecord) error 221 | // An implementation should handle the deletion of a schema in the destination. 222 | // The SchemaRecord struct provides all details needed for deletion. 223 | DeleteSchema(record SchemaRecord) error 224 | // An implementation should be able to send exactly one map describing the state of the destination 225 | // This map should be minimal. Describing only the Subject and Versions that already exist. 226 | GetDestinationState() (map[string][]int64, error) 227 | // Perform any tear-down behavior before stop of sync/batch export 228 | TearDown() error 229 | } 230 | ---- 231 | 232 | Golang isn't candid on a runtime lookup of implementations of interfaces, so in order to make this implementation to the tool you must register it. 233 | To register your implementation, go into `cmd/ccloud-schema-exporter/ccloud-schema-exporter.go` and modify the following maps: 234 | 235 | [source,go] 236 | ---- 237 | var sampleDestObject = client.NewSampleCustomDestination() 238 | var customDestFactory = map[string]client.CustomDestination{ 239 | "sampleCustomDestination": &sampleDestObject, 240 | // Add here a mapping of name -> customDestFactory/empty struct for reference at runtime 241 | // See sample above for the built-in sample custom destination that is within the client package 242 | } 243 | var apicurioObject = client.NewApicurioSource() 244 | var customSrcFactory = map[string]client.CustomSource{ 245 | "sampleCustomSourceApicurio": &apicurioObject, 246 | // Add here a mapping of name -> customSrcFactory/empty struct for reference at runtime 247 | // See sample above for the built-in sample custom source that is within the client package 248 | } 249 | ---- 250 | 251 | You will see that these maps already have one entry, that is because `ccloud-schema-exporter` comes with sample 252 | implementations of the interface under `cmd/internals/customDestination.go` and `cmd/internals/customSource.go`, check them out! 253 | 254 | For the custom source example, there is an implementation to allow sourcing schemas from Apicurio into Schema Registry. 255 | It defaults to looking for Apicurio in `http://localhost:8081`, but you can override it by providing a mapping 256 | `apicurioUrl=http://yourUrl:yourPort` in the environment variable `APICURIO_OPTIONS`. (if you'd like to pass more headers to the Apicurio calls, 257 | you can do so through the same env variable by separating them through a semi-colon such as `apicurioUrl=http://yourUrl:yourPort;someHeader=someValue`) 258 | Note: The schemas get exported using record names (all treated as `-value`), so you'll want to use the RecordNameStrategy in Schema Registry clients to use the newly exported schemas! 259 | 260 | Once added, all you have to do is indicate you will want to run with a custom source/destination with the `-customSource | -customDestination` flag. 261 | The value of this flag must be the name you gave it in the factory mapping. 262 | 263 | The following options are respected for custom sources / destinations as well: 264 | 265 | [source,bash] 266 | ---- 267 | -allowList value 268 | A comma delimited list of schema subjects to allow. It also accepts paths to a file containing a list of subjects. 269 | -batchExport 270 | Perform a one-time export of all schemas 271 | -disallowList value 272 | A comma delimited list of schema subjects to disallow. It also accepts paths to a file containing a list of subjects. 273 | -scrapeInterval int 274 | Amount of time ccloud-schema-exporter will delay between schema sync checks in seconds (default 60) 275 | -sync 276 | Sync schemas continuously 277 | -syncDeletes 278 | Setting this will sync soft deletes from the source cluster to the destination 279 | ---- 280 | 281 | 282 | === Schema Loads 283 | 284 | `ccloud-schema-exporter` supports AVRO schema loads through defining a `-schemaLoad` and `-localPath`, 285 | the tool will register all avro schemas it finds recursively in that path, including references. 286 | It will utilize the RecordNamingStrategy to name the subjects. 287 | 288 | Schema Loads support schema versioning. All versions of a schema will be registered. Versions are decided 289 | according to the lexicographical order of the files (for example, a file named `orders_v1` will be registered before `orders_v2`). 290 | References are also versioned; However, only the latest version of reference will be referenced by other schemas. 291 | 292 | Schema References in AVRO are supported in the following format (in-line references are supported by default already): 293 | 294 | [source,json] 295 | ---- 296 | { 297 | "type" : "record", 298 | "namespace" : "io.leal.abraham", 299 | "name" : "myRecord", 300 | "fields" : [ 301 | { "name" : "Name" , "type" : ["null", "io.leal.abraham.anotherReference"], "default": null }, 302 | { "name" : "Age" , "type" : "io.leal.abraham.singleReference" } 303 | ] 304 | } 305 | ---- 306 | 307 | Where `io.leal.abraham.anotherReference` and `io.leal.abraham.singleReference` are both the full names 308 | to referenced records that also live within the path being transversed. `ccloud-schema-exporter` will ensure 309 | those references are registered first to Schema Registry and are correctly set in the ultimate 310 | registration of the referencing schema. 311 | 312 | This feature also supports allow and disallow lists. 313 | 314 | === Monitoring 315 | 316 | When specified with `-withMetrics`, `ccloud-schema-exporter` will export health metrics on `:9020/metrics`. 317 | These metrics are in Prometheus format for ease of parse. A sample Grafana dashboard is under the `samples` directory. 318 | 319 | == Feature Requests / Issue Reporting 320 | 321 | This repo tracks feature requests and issues through Github Issues. 322 | If you'd like to see something fixed that was not caught by testing, or you'd like to see a new feature, please feel free 323 | to file a Github issue in this repo, I'll review and answer at best effort. 324 | 325 | Additionally, if you'd like to contribute a fix/feature, please feel free to open a PR for review. -------------------------------------------------------------------------------- /cmd/ccloud-schema-exporter/ccloud-schema-exporter.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // 4 | // ccloud-schema-exporter.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "fmt" 10 | "github.com/abraham-leal/ccloud-schema-exporter/cmd/internals" 11 | "github.com/prometheus/client_golang/prometheus/promhttp" 12 | "log" 13 | "net/http" 14 | "os" 15 | "strings" 16 | ) 17 | 18 | var sampleDestObject = client.NewSampleCustomDestination() 19 | var customDestFactory = map[string]client.CustomDestination{ 20 | "sampleCustomDestination": &sampleDestObject, 21 | // Add here a mapping of name -> customDestFactory/empty struct for reference at runtime 22 | // See sample above for the built-in sample custom destination that is within the client package 23 | } 24 | var apicurioObject = client.NewApicurioSource() 25 | var customSrcFactory = map[string]client.CustomSource{ 26 | "sampleCustomSourceApicurio": &apicurioObject, 27 | // Add here a mapping of name -> customSrcFactory/empty struct for reference at runtime 28 | // See sample above for the built-in sample custom source that is within the client package 29 | } 30 | 31 | func main() { 32 | 33 | client.GetFlags() 34 | 35 | if client.WithMetrics { 36 | log.Println("Starting exposure of metrics on :9020/metrics") 37 | http.Handle("/metrics", promhttp.Handler()) 38 | 39 | go func() { 40 | err := http.ListenAndServe(":9020", nil) 41 | if err != nil { 42 | log.Println("Could not start metrics endpoint") 43 | log.Println(err) 44 | log.Println("Continuing without exposing metrics") 45 | } 46 | }() 47 | 48 | } 49 | 50 | if client.CustomSourceName != "" { 51 | 52 | destClient := client.NewSchemaRegistryClient(client.DestSRUrl, client.DestSRKey, client.DestSRSecret, "dst") 53 | if !client.NoPrompt { 54 | preflightWriteChecks(destClient, false) 55 | } 56 | 57 | if client.ThisRun == client.BATCH { 58 | client.RunCustomSourceBatch(destClient, customSrcFactory[client.CustomSourceName]) 59 | } 60 | if client.ThisRun == client.SYNC { 61 | client.RunCustomSourceSync(destClient, customSrcFactory[client.CustomSourceName]) 62 | } 63 | log.Println("-----------------------------------------------") 64 | log.Println("All Done! Thanks for using ccloud-schema-exporter!") 65 | 66 | os.Exit(0) 67 | } 68 | 69 | if client.ThisRun == client.FROMLOCAL { 70 | workingDir, err := os.Getwd() 71 | if err != nil { 72 | log.Fatalln("Could not get execution path. Possibly a permissions issue.") 73 | } 74 | 75 | destClient := client.NewSchemaRegistryClient(client.DestSRUrl, client.DestSRKey, client.DestSRSecret, "dst") 76 | if !client.NoPrompt { 77 | preflightWriteChecks(destClient, false) 78 | } 79 | 80 | client.WriteFromFS(destClient, client.PathToWrite, workingDir) 81 | 82 | log.Println("-----------------------------------------------") 83 | log.Println("All Done! Thanks for using ccloud-schema-exporter!") 84 | 85 | os.Exit(0) 86 | } 87 | 88 | if client.ThisRun == client.SCHEMALOAD { 89 | workingDir, err := os.Getwd() 90 | if err != nil { 91 | log.Fatalln("Could not get execution path. Possibly a permissions issue.") 92 | } 93 | 94 | destClient := client.NewSchemaRegistryClient(client.DestSRUrl, client.DestSRKey, client.DestSRSecret, "dst") 95 | if !client.NoPrompt { 96 | preflightWriteChecks(destClient, true) 97 | } 98 | 99 | schemaLoader := client.NewSchemaLoader(client.SchemaLoadType, destClient, client.PathToWrite , workingDir) 100 | schemaLoader.Run() 101 | 102 | log.Println("-----------------------------------------------") 103 | log.Println("All Done! Thanks for using ccloud-schema-exporter!") 104 | 105 | os.Exit(0) 106 | } 107 | 108 | srcClient := client.NewSchemaRegistryClient(client.SrcSRUrl, client.SrcSRKey, client.SrcSRSecret, "src") 109 | if !srcClient.IsReachable() { 110 | log.Fatalln("Could not reach source registry. Possible bad credentials?") 111 | } 112 | 113 | if client.CustomDestinationName != "" { 114 | 115 | if client.ThisRun == client.BATCH { 116 | client.RunCustomDestinationBatch(srcClient, customDestFactory[client.CustomDestinationName]) 117 | } 118 | if client.ThisRun == client.SYNC { 119 | client.RunCustomDestinationSync(srcClient, customDestFactory[client.CustomDestinationName]) 120 | } 121 | log.Println("-----------------------------------------------") 122 | log.Println("All Done! Thanks for using ccloud-schema-exporter!") 123 | 124 | os.Exit(0) 125 | } 126 | 127 | if client.ThisRun == client.TOLOCAL { 128 | workingDir, err := os.Getwd() 129 | if err != nil { 130 | log.Fatalln("Could not get execution path. Possibly a permissions issue.") 131 | } 132 | client.WriteToFS(srcClient, client.PathToWrite, workingDir) 133 | 134 | log.Println("-----------------------------------------------") 135 | log.Println("All Done! Thanks for using ccloud-schema-exporter!") 136 | 137 | os.Exit(0) 138 | } 139 | 140 | destClient := client.NewSchemaRegistryClient(client.DestSRUrl, client.DestSRKey, client.DestSRSecret, "dst") 141 | if !client.NoPrompt { 142 | preflightWriteChecks(destClient, false) 143 | } 144 | 145 | if (!strings.HasSuffix(srcClient.SRUrl, "confluent.cloud") || 146 | !strings.HasSuffix(destClient.SRUrl, "confluent.cloud")) && 147 | client.ThisRun == client.SYNC && client.SyncHardDeletes && !client.NoPrompt { 148 | 149 | fmt.Println("It looks like you are trying to sync hard deletions between non-Confluent Cloud Schema Registries") 150 | fmt.Println("Starting v1.1, ccloud-schema-exporter only supports hard deletion sync between Confluent Cloud Schema Registries, or Confluent Platform 6.1+") 151 | fmt.Println("------------------------------------------------------") 152 | fmt.Println("Do you wish to continue? (Y/n)") 153 | 154 | var text string 155 | 156 | _, err := fmt.Scanln(&text) 157 | if err != nil { 158 | log.Fatal(err) 159 | } 160 | 161 | if !strings.EqualFold(text, "Y") { 162 | os.Exit(0) 163 | } 164 | } 165 | 166 | if client.ThisRun == client.SYNC { 167 | client.Sync(srcClient, destClient) 168 | } 169 | if client.ThisRun == client.BATCH { 170 | client.BatchExport(srcClient, destClient) 171 | } 172 | 173 | log.Println("-----------------------------------------------") 174 | 175 | if client.ThisRun == client.BATCH { 176 | log.Println("Resetting target to READWRITE") 177 | destClient.SetMode(client.READWRITE) 178 | } 179 | 180 | log.Println("All Done! Thanks for using ccloud-schema-exporter!") 181 | 182 | } 183 | 184 | func preflightWriteChecks(destClient *client.SchemaRegistryClient, noImport bool) { 185 | 186 | if !destClient.IsReachable() { 187 | log.Println("Could not reach destination registry. Possible bad credentials?") 188 | os.Exit(0) 189 | } 190 | 191 | if !noImport { 192 | destSubjects := client.GetCurrentSubjectState(destClient) 193 | if len(destSubjects) != 0 && client.ThisRun != client.SYNC { 194 | log.Println("You have existing subjects registered in the destination registry, exporter cannot write schemas when " + 195 | "previous schemas exist in batch mode.") 196 | os.Exit(0) 197 | } 198 | 199 | if !destClient.IsImportModeReady() { 200 | 201 | fmt.Println("Destination Schema Registry is not set to IMPORT mode!") 202 | fmt.Println("------------------------------------------------------") 203 | fmt.Println("Set to import mode? (Y/n)") 204 | 205 | var text string 206 | 207 | _, err := fmt.Scanln(&text) 208 | if err != nil { 209 | log.Fatal(err) 210 | } 211 | 212 | if strings.EqualFold(text, "Y") { 213 | err := destClient.SetMode(client.IMPORT) 214 | if err == false { 215 | log.Println("Could not set destination registry to IMPORT Mode.") 216 | os.Exit(0) 217 | } 218 | } else { 219 | log.Println("Cannot export schemas if destination is not set to IMPORT Mode") 220 | os.Exit(0) 221 | } 222 | } 223 | } 224 | 225 | if !destClient.IsCompatReady() { 226 | 227 | fmt.Println("Destination Schema Registry is not set to NONE global compatibility level!") 228 | fmt.Println("We assume the source to be maintaining correct compatibility between registrations, per subject compatibility changes are not supported.") 229 | fmt.Println("------------------------------------------------------") 230 | fmt.Println("Set to NONE? (Y/n)") 231 | 232 | var text string 233 | 234 | _, err := fmt.Scanln(&text) 235 | if err != nil { 236 | log.Fatal(err) 237 | } 238 | 239 | if strings.EqualFold(text, "Y") { 240 | err := destClient.SetGlobalCompatibility(client.NONE) 241 | if err == false { 242 | log.Fatalln("Could not set destination registry to Global NONE Compatibility Level.") 243 | } 244 | } else { 245 | log.Println("Continuing without NONE Global Compatibility Level. Note this might arise some failures in registration of some schemas.") 246 | } 247 | } 248 | } 249 | -------------------------------------------------------------------------------- /cmd/integrationTests/hardDeletetionTesting/hardDeleteCloud_test.go: -------------------------------------------------------------------------------- 1 | package integration_deletion 2 | 3 | // 4 | // hardDeleteCloud_test.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | 9 | import ( 10 | client "github.com/abraham-leal/ccloud-schema-exporter/cmd/internals" 11 | "github.com/stretchr/testify/assert" 12 | "log" 13 | "os" 14 | "reflect" 15 | "testing" 16 | "time" 17 | ) 18 | 19 | var testClientSrc *client.SchemaRegistryClient 20 | var testClientDst *client.SchemaRegistryClient 21 | var testingSubjectKey = "thisCannotExist-key" 22 | var newSchema = "{\"type\":\"record\",\"name\":\"value_newnew\",\"namespace\":\"com.mycorp.mynamespace\",\"doc\":\"Sample schema to help you get started.\",\"fields\":[{\"name\":\"this\",\"type\":\"int\",\"doc\":\"The int type is a 32-bit signed integer.\"},{\"default\": null,\"name\": \"onefield\",\"type\": [\"null\",\"string\"]}]}" 23 | 24 | func TestMain(m *testing.M) { 25 | setupHardDeletesTest() 26 | code := m.Run() 27 | os.Exit(code) 28 | } 29 | 30 | func setupHardDeletesTest() { 31 | client.ScrapeInterval = 2 32 | client.SyncDeletes = true 33 | client.SyncHardDeletes = true 34 | client.HttpCallTimeout = 60 35 | 36 | testClientSrc = client.NewSchemaRegistryClient(os.Getenv("XX_SRC_CCLOUD_URL"), os.Getenv("XX_SRC_CCLOUD_KEY"), os.Getenv("XX_SRC_CCLOUD_SECRET"), "src") 37 | testClientDst = client.NewSchemaRegistryClient(os.Getenv("XX_DST_CCLOUD_URL"), os.Getenv("XX_DST_CCLOUD_KEY"), os.Getenv("XX_DST_CCLOUD_SECRET"), "dst") 38 | } 39 | 40 | func TestSyncModeHardDeletes(t *testing.T) { 41 | log.Println("Test Sync Hard Delete Mode!") 42 | 43 | setupHardDeletesTest() 44 | client.CancelRun = false 45 | client.AllowList = nil 46 | client.DisallowList = nil 47 | 48 | setImportMode() 49 | 50 | testClientSrc.RegisterSchema(newSchema, testingSubjectKey, "AVRO", nil) 51 | 52 | client.AllowList = map[string]bool{ 53 | testingSubjectKey: true, 54 | } 55 | client.DisallowList = nil 56 | 57 | commonSyncTest(t) 58 | killAsyncRoutine() 59 | cleanup() 60 | 61 | } 62 | 63 | func setImportMode() { 64 | if !testClientDst.IsImportModeReady() { 65 | err := testClientDst.SetMode(client.IMPORT) 66 | if err == false { 67 | log.Fatalln("Could not set destination registry to IMPORT ModeRecord.") 68 | } 69 | } 70 | } 71 | 72 | func commonSyncTest(t *testing.T) { 73 | 74 | startAsyncRoutine() 75 | time.Sleep(time.Duration(10) * time.Second) // Give time for sync 76 | testInitialSync(t, 1) 77 | time.Sleep(time.Duration(10) * time.Second) // Give time for sync 78 | testSoftDelete(t, 0) 79 | time.Sleep(time.Duration(10) * time.Second) // Give time for sync 80 | testHardDeleteSync(t, 0) 81 | time.Sleep(time.Duration(10) * time.Second) // Give time for sync 82 | } 83 | 84 | func startAsyncRoutine() { 85 | // Start sync in another goroutine 86 | go client.Sync(testClientSrc, testClientDst) 87 | time.Sleep(time.Duration(10) * time.Second) // Give time for sync 88 | } 89 | 90 | func killAsyncRoutine() { 91 | log.Println("Killing async goroutine") 92 | client.CancelRun = true 93 | time.Sleep(time.Duration(3) * time.Second) // Give thread time to die 94 | } 95 | 96 | func cleanup() { 97 | log.Println("Clean up SR") 98 | testClientDst.DeleteAllSubjectsPermanently() 99 | time.Sleep(time.Duration(3) * time.Second) // Allow time for deletes to complete 100 | } 101 | 102 | func printSubjectTestResult(srcSubjects map[string][]int64, destSubjects map[string][]int64) { 103 | log.Printf("Source subject-version mapping contents: %v", srcSubjects) 104 | log.Printf("Destination subject-version mapping contents: %v", destSubjects) 105 | } 106 | 107 | func printIDTestResult(srcIDs map[int64]map[string][]int64, dstIDs map[int64]map[string][]int64) { 108 | log.Printf("Source IDs contents: %v", srcIDs) 109 | log.Printf("Destination IDs contents: %v", dstIDs) 110 | } 111 | 112 | func testSoftDelete(t *testing.T, lenOfDestSubjects int) { 113 | 114 | // inject a soft delete 115 | testClientSrc.PerformSoftDelete(testingSubjectKey, 1) 116 | time.Sleep(time.Duration(10) * time.Second) // Give time for sync 117 | 118 | srcSubjects, destSubjects := getCurrentState() 119 | printSubjectTestResult(srcSubjects, destSubjects) 120 | 121 | assert.True(t, reflect.DeepEqual(srcSubjects, destSubjects)) 122 | assert.True(t, len(destSubjects) == lenOfDestSubjects) 123 | } 124 | 125 | func testInitialSync(t *testing.T, lenOfDestSubjects int) { 126 | log.Println("Testing initial sync") 127 | // Assert schemas in dest deep equal schemas in src 128 | 129 | srcSubjects, destSubjects := getCurrentState() 130 | 131 | printSubjectTestResult(srcSubjects, destSubjects) 132 | 133 | assert.True(t, reflect.DeepEqual(srcSubjects, destSubjects)) 134 | assert.True(t, len(destSubjects) == lenOfDestSubjects) 135 | } 136 | 137 | func testHardDeleteSync(t *testing.T, lenOfDestIDs int) { 138 | 139 | // inject a hard delete 140 | testClientSrc.PerformHardDelete(testingSubjectKey, 1) 141 | time.Sleep(time.Duration(10) * time.Second) // Give time for sync 142 | 143 | // Assert schemas in dest deep equal schemas in src 144 | srcIDs := testClientSrc.GetSoftDeletedIDs() 145 | dstIDs := testClientDst.GetSoftDeletedIDs() 146 | 147 | printIDTestResult(srcIDs, dstIDs) 148 | 149 | assert.True(t, reflect.DeepEqual(srcIDs, dstIDs)) 150 | assert.True(t, len(dstIDs) == lenOfDestIDs) 151 | 152 | } 153 | 154 | func getCurrentState() (map[string][]int64, map[string][]int64) { 155 | 156 | srcSubjects := make(map[string][]int64) 157 | destSubjects := make(map[string][]int64) 158 | 159 | srcChan := make(chan map[string][]int64) 160 | destChan := make(chan map[string][]int64) 161 | 162 | go testClientSrc.GetSubjectsWithVersions(srcChan, false) 163 | go testClientDst.GetSubjectsWithVersions(destChan, false) 164 | 165 | srcSubjects = <-srcChan 166 | destSubjects = <-destChan 167 | 168 | return srcSubjects, destSubjects 169 | } 170 | -------------------------------------------------------------------------------- /cmd/internals/context.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // context.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "flag" 10 | "fmt" 11 | "log" 12 | "os" 13 | ) 14 | 15 | func GetFlags() { 16 | 17 | flag.StringVar(&SrcSRUrl, "src-sr-url", "", "Url to the Source Schema Registry Cluster") 18 | flag.StringVar(&SrcSRKey, "src-sr-key", "", "API KEY for the Source Schema Registry Cluster") 19 | flag.StringVar(&SrcSRSecret, "src-sr-secret", "", "API SECRET for the Source Schema Registry Cluster") 20 | flag.StringVar(&DestSRUrl, "dest-sr-url", "", "Url to the Destination Schema Registry Cluster") 21 | flag.StringVar(&DestSRKey, "dest-sr-key", "", "API KEY for the Destination Schema Registry Cluster") 22 | flag.StringVar(&DestSRSecret, "dest-sr-secret", "", "API SECRET for the Destination Schema Registry Cluster") 23 | flag.StringVar(&CustomDestinationName, "customDestination", "", "Name of the implementation to be used as a destination (same as mapping)") 24 | flag.StringVar(&CustomSourceName, "customSource", "", "Name of the implementation to be used as a source (same as mapping)") 25 | flag.StringVar(&SchemaLoadType, "schemaLoad", "", "Schema Type for the load. Currently supported: AVRO") 26 | flag.IntVar(&HttpCallTimeout, "timeout", 60, "Timeout, in seconds, to use for all REST calls with the Schema Registries") 27 | flag.IntVar(&ScrapeInterval, "scrapeInterval", 60, "Amount of time ccloud-schema-exporter will delay between schema sync checks in seconds") 28 | flag.StringVar(&PathToWrite, "localPath", "", 29 | "Optional custom path for local functions. This must be an existing directory structure.") 30 | flag.BoolVar(&WithMetrics, "withMetrics", false, "Exposes metrics for the application in Prometheus format on :9020/metrics") 31 | flag.Var(&AllowList, "allowList", "A comma delimited list of schema subjects to allow. It also accepts paths to a file containing a list of subjects.") 32 | flag.Var(&DisallowList, "disallowList", "A comma delimited list of schema subjects to disallow. It also accepts paths to a file containing a list of subjects.") 33 | versionFlag := flag.Bool("version", false, "Print the current version and exit") 34 | usageFlag := flag.Bool("usage", false, "Print the usage of this tool") 35 | batchExportFlag := flag.Bool("batchExport", false, "Perform a one-time export of all schemas") 36 | syncFlag := flag.Bool("sync", false, "Sync schemas continuously") 37 | localCopyFlag := flag.Bool("getLocalCopy", false, "Perform a local back-up of all schemas in the source registry. Defaults to a folder (SchemaRegistryBackup) in the current path of the binaries.") 38 | fromLocalCopyFlag := flag.Bool("fromLocalCopy", false, "Registers all local schemas written by getLocalCopy. Defaults to a folder (SchemaRegistryBackup) in the current path of the binaries.") 39 | deleteFlag := flag.Bool("deleteAllFromDestination", false, "Setting this will run a delete on all schemas written to the destination registry. No respect for allow/disallow lists.") 40 | syncDeletesFlag := flag.Bool("syncDeletes", false, "Setting this will sync soft deletes from the source cluster to the destination") 41 | syncHardDeletesFlag := flag.Bool("syncHardDeletes", false, "Setting this will sync hard deletes from the source cluster to the destination") 42 | noPromptFlag := flag.Bool("noPrompt", false, "Set this flag to avoid checks while running. Assure you have the destination SR to correct Mode and Compatibility.") 43 | 44 | flag.Parse() 45 | 46 | if *noPromptFlag { 47 | NoPrompt = true 48 | } 49 | 50 | if *syncDeletesFlag { 51 | SyncDeletes = true 52 | } 53 | 54 | if *syncHardDeletesFlag { 55 | SyncHardDeletes = true 56 | } 57 | 58 | if *versionFlag { 59 | printVersion() 60 | os.Exit(0) 61 | } 62 | 63 | if *usageFlag { 64 | flag.PrintDefaults() 65 | os.Exit(0) 66 | } 67 | 68 | if *deleteFlag { 69 | log.Println("Deleting all schemas from DESTINATION registry") 70 | deleteAllFromDestination(DestSRUrl, DestSRKey, DestSRSecret) 71 | os.Exit(0) 72 | } 73 | 74 | if !*syncFlag && !*batchExportFlag && !*localCopyFlag && !*fromLocalCopyFlag && SchemaLoadType == "" { 75 | fmt.Println("You must specify a mode to run on.") 76 | fmt.Println("Usage:") 77 | fmt.Println("") 78 | flag.PrintDefaults() 79 | os.Exit(0) 80 | } 81 | 82 | if SchemaLoadType != "" { 83 | ThisRun = SCHEMALOAD 84 | } 85 | 86 | if *fromLocalCopyFlag { 87 | ThisRun = FROMLOCAL 88 | } 89 | 90 | if *localCopyFlag { 91 | ThisRun = TOLOCAL 92 | } 93 | 94 | if *batchExportFlag { 95 | ThisRun = BATCH 96 | } 97 | 98 | if *syncFlag { 99 | ThisRun = SYNC 100 | } 101 | 102 | } 103 | -------------------------------------------------------------------------------- /cmd/internals/customDestination.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // customDestination.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "log" 10 | "reflect" 11 | "strconv" 12 | "time" 13 | ) 14 | 15 | // Runner for the custom destination sync job 16 | func RunCustomDestinationSync(srcClient *SchemaRegistryClient, customDest CustomDestination) { 17 | err := customDest.SetUp() 18 | if err != nil { 19 | log.Println("Could not perform proper set-up of custom destination") 20 | log.Println(err) 21 | } 22 | 23 | listenForInterruption() 24 | 25 | //Begin sync 26 | for { 27 | if CancelRun == true { 28 | err := customDest.TearDown() 29 | if err != nil { 30 | log.Println("Could not perform proper tear-down of custom destination") 31 | log.Println(err) 32 | } 33 | return 34 | } 35 | beginSync := time.Now() 36 | 37 | srcSubjects := GetCurrentSubjectState(srcClient) 38 | destSubjects, err := customDest.GetDestinationState() 39 | checkDontFail(err) 40 | 41 | if !reflect.DeepEqual(srcSubjects, destSubjects) { 42 | diff := GetSubjectDiff(srcSubjects, destSubjects) 43 | // Perform sync 44 | customDestSync(diff, srcClient, customDest) 45 | //We anticipate that the custom destination will not have the notion of hard or soft deletes 46 | if SyncDeletes { 47 | customDestSyncDeletes(destSubjects, srcSubjects, srcClient, customDest) 48 | } 49 | } 50 | syncDuration := time.Since(beginSync) 51 | log.Printf("Finished sync in %d ms", syncDuration.Milliseconds()) 52 | 53 | time.Sleep(time.Duration(ScrapeInterval) * time.Second) 54 | } 55 | } 56 | 57 | // Runner of the batch job for custom destination 58 | func RunCustomDestinationBatch(srcClient *SchemaRegistryClient, customDest CustomDestination) { 59 | err := customDest.SetUp() 60 | if err != nil { 61 | log.Println("Could not perform proper set-up of custom destination") 62 | log.Println(err) 63 | } 64 | 65 | listenForInterruption() 66 | 67 | srcChan := make(chan map[string][]int64) 68 | go srcClient.GetSubjectsWithVersions(srcChan, false) 69 | srcSubjects := <-srcChan 70 | 71 | log.Println("Registering all schemas from " + srcClient.SRUrl) 72 | for srcSubject, srcVersions := range srcSubjects { 73 | if CancelRun == true { 74 | err := customDest.TearDown() 75 | if err != nil { 76 | log.Println("Could not perform proper tear-down of custom destination") 77 | log.Println(err) 78 | } 79 | return 80 | } 81 | for _, v := range srcVersions { 82 | schema := srcClient.GetSchema(srcSubject, v, false) 83 | log.Printf("Registering schema: %s with version: %d and ID: %d and Type: %s", 84 | schema.Subject, schema.Version, schema.Id, schema.SType) 85 | err := customDest.RegisterSchema(schema) 86 | checkCouldNotRegister(err) 87 | } 88 | } 89 | } 90 | 91 | // Sync function for custom destination 92 | func customDestSync(diff map[string][]int64, srcClient *SchemaRegistryClient, customDest CustomDestination) { 93 | if len(diff) != 0 { 94 | log.Println("Source registry has values that Destination does not, syncing...") 95 | for subject, versions := range diff { 96 | for _, v := range versions { 97 | schema := srcClient.GetSchema(subject, v, false) 98 | log.Println("Registering new schema: " + schema.Subject + 99 | " with version: " + strconv.FormatInt(schema.Version, 10) + 100 | " and ID: " + strconv.FormatInt(schema.Id, 10) + 101 | " and Type: " + schema.SType) 102 | err := customDest.RegisterSchema(schema) 103 | checkCouldNotRegister(err) 104 | } 105 | } 106 | } 107 | } 108 | 109 | // Function to delete the schemas that have been deleted (whether soft or hard deleted) in the source Schema Registry. 110 | func customDestSyncDeletes(destSubjects map[string][]int64, srcSubjects map[string][]int64, srcClient *SchemaRegistryClient, customDest CustomDestination) { 111 | diff := GetSubjectDiff(destSubjects, srcSubjects) 112 | if len(diff) != 0 { 113 | log.Println("Source registry has deletes that Destination does not, syncing...") 114 | for subject, versions := range diff { 115 | for _, v := range versions { 116 | err := customDest.DeleteSchema(subject, v) 117 | checkCouldNotRegister(err) 118 | } 119 | } 120 | } 121 | } 122 | 123 | /* 124 | This is a simple example of implementing the CustomDestination interface. 125 | It holds schemas in memory and performs/reports all necessary calls. 126 | */ 127 | 128 | type SampleCustomDestination struct { 129 | inMemState map[string][]int64 130 | } 131 | 132 | func NewSampleCustomDestination() SampleCustomDestination { 133 | return SampleCustomDestination{inMemState: map[string][]int64{}} 134 | } 135 | 136 | func (cd *SampleCustomDestination) SetUp() error { 137 | // Nothing to set up 138 | return nil 139 | } 140 | 141 | func (cd *SampleCustomDestination) RegisterSchema(record SchemaRecord) error { 142 | currentVersionSlice, exists := cd.inMemState[record.Subject] 143 | if exists { 144 | tempVersionSlice := append(currentVersionSlice, record.Version) 145 | cd.inMemState[record.Subject] = tempVersionSlice 146 | 147 | } else { 148 | tempVersionSlice := []int64{record.Version} 149 | cd.inMemState[record.Subject] = tempVersionSlice 150 | } 151 | return nil 152 | } 153 | 154 | func (cd *SampleCustomDestination) DeleteSchema(subject string, version int64) error { 155 | currentVersionSlice, exists := cd.inMemState[subject] 156 | newSlice := currentVersionSlice 157 | if exists { 158 | for index, v := range currentVersionSlice { 159 | if v == version { 160 | newSlice = removeFromSlice(currentVersionSlice, index) 161 | } 162 | } 163 | cd.inMemState[subject] = newSlice 164 | if len(cd.inMemState[subject]) == 0 { 165 | delete(cd.inMemState, subject) 166 | } 167 | } 168 | return nil 169 | } 170 | 171 | func (cd *SampleCustomDestination) GetDestinationState() (map[string][]int64, error) { 172 | return cd.inMemState, nil 173 | } 174 | 175 | func (cd *SampleCustomDestination) TearDown() error { 176 | // Nothing to tear-down 177 | return nil 178 | } 179 | 180 | func removeFromSlice(s []int64, i int) []int64 { 181 | s[i] = s[len(s)-1] 182 | return s[:len(s)-1] 183 | } 184 | 185 | func checkCouldNotRegister(err error) { 186 | if err != nil { 187 | log.Println("Could not register schema to destination:") 188 | log.Println(err) 189 | } 190 | } 191 | -------------------------------------------------------------------------------- /cmd/internals/customDestination_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // customDestination_test.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "github.com/stretchr/testify/assert" 10 | "log" 11 | "testing" 12 | "time" 13 | ) 14 | 15 | func TestMainStackCustomDestination(t *testing.T) { 16 | setup() 17 | t.Run("TCustomDestinationBatch", func(t *testing.T) { TCustomDestinationBatch(t) }) 18 | t.Run("TCustomDestinationSync", func(t *testing.T) { TCustomDestinationSync(t) }) 19 | tearDown() 20 | } 21 | 22 | func TCustomDestinationBatch(t *testing.T) { 23 | log.Println("Testing Custom Destination in Batch Mode") 24 | testClient.RegisterSchemaBySubjectAndIDAndVersion(mockSchema, testingSubject, 10001, 1, "AVRO", []SchemaReference{}) 25 | myTestCustomDestination := NewSampleCustomDestination() 26 | 27 | RunCustomDestinationBatch(testClient, &myTestCustomDestination) 28 | 29 | _, exists := myTestCustomDestination.inMemState[testingSubject] 30 | 31 | assert.True(t, exists) 32 | } 33 | 34 | func TCustomDestinationSync(t *testing.T) { 35 | log.Println("Testing Custom Destination in Sync Mode") 36 | myTestCustomDestination := NewSampleCustomDestination() 37 | 38 | ScrapeInterval = 3 39 | SyncDeletes = true 40 | go RunCustomDestinationSync(testClient, &myTestCustomDestination) 41 | time.Sleep(time.Duration(4) * time.Second) // Give time for sync 42 | 43 | testClient.RegisterSchemaBySubjectAndIDAndVersion(mockSchema, newSubject, 10001, 1, "AVRO", []SchemaReference{}) 44 | time.Sleep(time.Duration(4) * time.Second) // Give time for sync 45 | _, exists1 := myTestCustomDestination.inMemState[testingSubject] 46 | _, exists2 := myTestCustomDestination.inMemState[newSubject] 47 | 48 | assert.True(t, exists1) 49 | assert.True(t, exists2) 50 | 51 | // Test Delete 52 | testClient.PerformSoftDelete(newSubject, 1) 53 | testClient.PerformHardDelete(newSubject, 1) 54 | time.Sleep(time.Duration(4) * time.Second) // Give time for sync 55 | 56 | _, exists3 := myTestCustomDestination.inMemState[newSubject] 57 | assert.False(t, exists3) 58 | 59 | CancelRun = true 60 | time.Sleep(time.Duration(4) * time.Second) // Give time for killing goroutine 61 | } 62 | -------------------------------------------------------------------------------- /cmd/internals/customSource.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // customSource.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "encoding/json" 10 | "fmt" 11 | "io/ioutil" 12 | "log" 13 | "os" 14 | "reflect" 15 | "strconv" 16 | "strings" 17 | "time" 18 | ) 19 | 20 | // Runner for the custom source sync mode 21 | func RunCustomSourceSync(dstClient *SchemaRegistryClient, customSrc CustomSource) { 22 | err := customSrc.SetUp() 23 | if err != nil { 24 | log.Println("Could not perform proper set-up of custom source") 25 | log.Println(err) 26 | } 27 | 28 | listenForInterruption() 29 | 30 | //Begin sync 31 | for { 32 | if CancelRun == true { 33 | err := customSrc.TearDown() 34 | if err != nil { 35 | log.Println("Could not perform proper tear-down of custom source") 36 | log.Println(err) 37 | } 38 | return 39 | } 40 | beginSync := time.Now() 41 | 42 | srcSubjects, err := customSrc.GetSourceState() 43 | destSubjects := GetCurrentSubjectState(dstClient) 44 | checkDontFail(err) 45 | 46 | if !reflect.DeepEqual(srcSubjects, destSubjects) { 47 | diff := GetSubjectDiff(srcSubjects, destSubjects) 48 | // Perform sync 49 | customSrcSync(diff, dstClient, customSrc) 50 | //We anticipate that the custom source will not have the concept of hard or soft deletes 51 | if SyncDeletes { 52 | customSrcSyncDeletes(destSubjects, srcSubjects, dstClient, customSrc) 53 | } 54 | } 55 | syncDuration := time.Since(beginSync) 56 | log.Printf("Finished sync in %d ms", syncDuration.Milliseconds()) 57 | 58 | time.Sleep(time.Duration(ScrapeInterval) * time.Second) 59 | } 60 | 61 | return 62 | } 63 | 64 | // Sync job for custom source 65 | func customSrcSync(diff map[string][]int64, dstClient *SchemaRegistryClient, customSrc CustomSource) { 66 | if len(diff) != 0 { 67 | log.Println("Custom Source has values that Schema Registry does not, syncing...") 68 | for sbj, versions := range diff { 69 | for _, v := range versions { 70 | id, stype, schema, references, err := customSrc.GetSchema(sbj, v) 71 | if err != nil { 72 | log.Println("Could not retrieve schema from custom source") 73 | } 74 | if checkSubjectIsAllowed(sbj) { 75 | thisSchemaRecord := SchemaRecord{ 76 | Subject: sbj, 77 | Schema: schema, 78 | SType: stype, 79 | Version: v, 80 | Id: id, 81 | References: references, 82 | } 83 | // Assure References are registered first 84 | RegisterReferencesWithCustomSource(thisSchemaRecord, customSrc, dstClient) 85 | log.Println("Registering new schema: " + sbj + 86 | " with version: " + strconv.FormatInt(v, 10) + 87 | " and ID: " + strconv.FormatInt(id, 10) + 88 | " and Type: " + stype) 89 | dstClient.RegisterSchemaBySubjectAndIDAndVersion(thisSchemaRecord.Schema, 90 | thisSchemaRecord.Subject, thisSchemaRecord.Id, thisSchemaRecord.Version, 91 | thisSchemaRecord.SType, thisSchemaRecord.References) 92 | } 93 | } 94 | } 95 | } 96 | } 97 | 98 | // Sync deletes for custom source, this performs hard deleted in the Schema Registry 99 | func customSrcSyncDeletes(destSubjects map[string][]int64, srcSubjects map[string][]int64, dstClient *SchemaRegistryClient, customSrc CustomSource) { 100 | diff := GetSubjectDiff(destSubjects, srcSubjects) 101 | if len(diff) != 0 { 102 | log.Println("Source registry has deletes that Destination does not, syncing...") 103 | for sbj, versions := range diff { 104 | for _, v := range versions { 105 | if checkSubjectIsAllowed(sbj) { 106 | if dstClient.subjectExists(sbj) { 107 | if dstClient.PerformSoftDelete(sbj, v) { 108 | dstClient.PerformHardDelete(sbj, v) 109 | } 110 | } 111 | } 112 | } 113 | } 114 | } 115 | } 116 | 117 | // Runner for the batch job of custom source 118 | func RunCustomSourceBatch(dstClient *SchemaRegistryClient, customSrc CustomSource) { 119 | err := customSrc.SetUp() 120 | if err != nil { 121 | log.Println("Could not perform proper set-up of custom source") 122 | log.Println(err) 123 | } 124 | 125 | listenForInterruption() 126 | 127 | srcSubjects, err := customSrc.GetSourceState() 128 | checkDontFail(err) 129 | 130 | log.Println("Registering all schemas from custom source") 131 | for sbj, srcVersions := range srcSubjects { 132 | if CancelRun == true { 133 | err := customSrc.TearDown() 134 | if err != nil { 135 | log.Println("Could not perform proper tear-down of custom source") 136 | log.Println(err) 137 | } 138 | return 139 | } 140 | for _, v := range srcVersions { 141 | id, stype, schema, references, err := customSrc.GetSchema(sbj, v) 142 | if err != nil { 143 | log.Println("Could not retrieve schema from custom source") 144 | } else { 145 | if checkSubjectIsAllowed(sbj) { 146 | thisSchemaRecord := SchemaRecord{ 147 | Subject: sbj, 148 | Schema: schema, 149 | SType: stype, 150 | Version: v, 151 | Id: id, 152 | References: references, 153 | } 154 | // Assure references are registered first 155 | RegisterReferencesWithCustomSource(thisSchemaRecord, customSrc, dstClient) 156 | log.Printf("Registering schema: %s with version: %d and ID: %d and Type: %s", 157 | sbj, v, id, stype) 158 | dstClient.RegisterSchemaBySubjectAndIDAndVersion(thisSchemaRecord.Schema, 159 | thisSchemaRecord.Subject, thisSchemaRecord.Id, thisSchemaRecord.Version, 160 | thisSchemaRecord.SType, thisSchemaRecord.References) 161 | } 162 | } 163 | } 164 | } 165 | } 166 | 167 | // Registers the schema references given in the SchemaRecord, recursively, for a custom source 168 | func RegisterReferencesWithCustomSource(wrappingSchema SchemaRecord, customSrc CustomSource, destClient *SchemaRegistryClient) { 169 | if len(wrappingSchema.References) != 0 { 170 | log.Printf("Registering references for subject %s and version %d", wrappingSchema.Subject, wrappingSchema.Version) 171 | for _, schemaReference := range wrappingSchema.References { 172 | schemaId, schemaType, schemaString, schemaReferencesWithin, err := customSrc.GetSchema(schemaReference.Subject, schemaReference.Version) 173 | if err != nil { 174 | log.Println("Could not retrieve schema from custom source") 175 | } 176 | if len(schemaReferencesWithin) != 0 { 177 | thisReferenceSchemaRecord := SchemaRecord{ 178 | Subject: schemaReference.Subject, 179 | Schema: schemaString, 180 | SType: schemaType, 181 | Version: schemaReference.Version, 182 | Id: schemaId, 183 | References: schemaReferencesWithin, 184 | } 185 | RegisterReferencesWithCustomSource(thisReferenceSchemaRecord, customSrc, destClient) 186 | } 187 | 188 | schemaAlreadyRegistered := new(SchemaAlreadyRegisteredResponse) 189 | 190 | responseBody := destClient.RegisterSchemaBySubjectAndIDAndVersion(schemaString, 191 | schemaReference.Subject, 192 | schemaId, 193 | schemaReference.Version, 194 | schemaType, 195 | schemaReferencesWithin) 196 | 197 | err = json.Unmarshal(responseBody, &schemaAlreadyRegistered) 198 | 199 | if err == nil { 200 | log.Printf("Reference schema subject %s was already written with version: %d and ID: %d", 201 | schemaReference.Subject, schemaReference.Version, schemaId) 202 | } else { 203 | log.Printf("Registering referenced schema: %s with version: %d and ID: %d and Type: %s", 204 | schemaReference.Subject, schemaReference.Version, schemaId, schemaType) 205 | } 206 | } 207 | } 208 | } 209 | 210 | /* 211 | This is an example of a custom source. 212 | This example uses Apicurio Registry as the source. 213 | */ 214 | func NewApicurioSource() ApicurioSource { 215 | apicurioOptionsVar := os.Getenv("APICURIO_OPTIONS") 216 | apicurioOptionsMap := map[string]string{} 217 | if apicurioOptionsVar != "" { 218 | tempOptionsSlice := strings.Split(apicurioOptionsVar, ";") 219 | for _, option := range tempOptionsSlice { 220 | splitOption := strings.SplitN(option, "=", 2) 221 | apicurioOptionsMap[splitOption[0]] = splitOption[1] 222 | } 223 | log.Printf("Starting Apicurio Source with endpoint: %s", apicurioOptionsMap["apicurioUrl"]) 224 | return ApicurioSource{ 225 | Options: apicurioOptionsMap, 226 | apiCurioUrl: apicurioOptionsMap["apicurioUrl"], 227 | referenceName: map[string]string{}, 228 | } 229 | } 230 | return ApicurioSource{ 231 | Options: apicurioOptionsMap, 232 | apiCurioUrl: "http://localhost:8081/api", 233 | referenceName: map[string]string{}, 234 | } 235 | } 236 | 237 | // In-Mem Custom Source for testing purposes 238 | func NewInMemRegistry(records []SchemaRecord) inMemRegistry { 239 | state := map[int64]SchemaRecord{} 240 | for _, record := range records { 241 | state[record.Id] = SchemaRecord{ 242 | Subject: record.Subject, 243 | Schema: record.Schema, 244 | SType: record.SType, 245 | Version: record.Version, 246 | Id: record.Id, 247 | References: record.References, 248 | } 249 | } 250 | 251 | return inMemRegistry{state} 252 | } 253 | 254 | /* 255 | Implementation of a simple custom source 256 | */ 257 | type inMemRegistry struct { 258 | inMemSchemas map[int64]SchemaRecord 259 | } 260 | 261 | func (iM inMemRegistry) SetUp() error { 262 | return nil 263 | } 264 | 265 | func (iM inMemRegistry) GetSchema(sbj string, version int64) (id int64, stype string, schema string, references []SchemaReference, err error) { 266 | for _, schemaRecord := range iM.inMemSchemas { 267 | if schemaRecord.Subject == sbj && schemaRecord.Version == version { 268 | return schemaRecord.Id, schemaRecord.SType, schemaRecord.Schema, schemaRecord.References, nil 269 | } 270 | } 271 | return 0, "", "", nil, fmt.Errorf("schema not found") 272 | } 273 | 274 | func (iM inMemRegistry) GetSourceState() (map[string][]int64, error) { 275 | currentState := map[string][]int64{} 276 | for _, schemaRecord := range iM.inMemSchemas { 277 | _, haveSeen := currentState[schemaRecord.Subject] 278 | if haveSeen { 279 | currentState[schemaRecord.Subject] = append(currentState[schemaRecord.Subject], schemaRecord.Version) 280 | } else { 281 | currentState[schemaRecord.Subject] = []int64{schemaRecord.Version} 282 | } 283 | } 284 | return currentState, nil 285 | } 286 | 287 | func (iM inMemRegistry) TearDown() error { 288 | return nil 289 | } 290 | 291 | // Another example of a custom source 292 | 293 | type SchemaApicurioMeta struct { 294 | Name string `json:"name"` 295 | CreatedOn int64 `json:"createdOn,omitempty"` 296 | ModifiedOn int64 `json:"modifiedOn,omitempty"` 297 | Id string `json:"id,omitempty"` 298 | Version int64 `json:"version"` 299 | Stype string `json:"type"` 300 | GlobalId int64 `json:"globalId"` 301 | State string `json:"state,omitempty"` 302 | Labels []string `json:"labels,omitempty"` 303 | Properties map[string]string `json:"properties,omitempty"` 304 | } 305 | 306 | type ApicurioSource struct { 307 | Options map[string]string 308 | apiCurioUrl string 309 | referenceName map[string]string 310 | } 311 | 312 | func (ap *ApicurioSource) SetUp() error { 313 | url, exists := ap.Options["apicurioUrl"] 314 | if exists && ap.apiCurioUrl != "http://localhost:8081/api" { 315 | ap.apiCurioUrl = url + "/api" 316 | delete(ap.Options, "apicurioUrl") 317 | } else { 318 | log.Println("Options not provided, using local apicurio connection at: http://localhost:8081") 319 | } 320 | return nil 321 | } 322 | 323 | func (ap *ApicurioSource) GetSchema(subject string, version int64) (id int64, stype string, schema string, references []SchemaReference, err error) { 324 | artifactID, isThere := ap.referenceName[subject] 325 | if !isThere { 326 | log.Println("State snapshot does not match new requests. Allow a new run for a better sync.") 327 | } 328 | getSchemaEndpoint := fmt.Sprintf("%s/artifacts/%s/versions/%v", ap.apiCurioUrl, artifactID, version) 329 | log.Println(getSchemaEndpoint) 330 | metaEndpoint := getSchemaEndpoint + "/meta" 331 | metaReq := GetNewRequest("GET", metaEndpoint, "x", "x", ap.Options, nil) 332 | schemaReq := GetNewRequest("GET", getSchemaEndpoint, "x", "x", ap.Options, nil) 333 | 334 | metaResponse, err := httpClient.Do(metaReq) 335 | checkDontFail(err) 336 | if metaResponse.StatusCode != 200 { 337 | log.Println("Could not fetch schema metadata") 338 | } 339 | metaResponseContainer := SchemaApicurioMeta{} 340 | 341 | metaBody, err := ioutil.ReadAll(metaResponse.Body) 342 | checkDontFail(err) 343 | metaResponse.Body.Close() 344 | 345 | err = json.Unmarshal(metaBody, &metaResponseContainer) 346 | checkDontFail(err) 347 | 348 | schemaResponse, err := httpClient.Do(schemaReq) 349 | checkDontFail(err) 350 | if schemaResponse.StatusCode != 200 { 351 | log.Println("Could not fetch schema") 352 | } 353 | 354 | schemaBody, err := ioutil.ReadAll(schemaResponse.Body) 355 | checkDontFail(err) 356 | schemaResponse.Body.Close() 357 | 358 | return metaResponseContainer.GlobalId, metaResponseContainer.Stype, string(schemaBody), nil, nil 359 | } 360 | 361 | func (ap *ApicurioSource) GetSourceState() (map[string][]int64, error) { 362 | ap.referenceName = make(map[string]string) 363 | 364 | // Get All Artifacts 365 | listArtifactsEndpoint := fmt.Sprintf("%s/artifacts", ap.apiCurioUrl) 366 | listReq := GetNewRequest("GET", listArtifactsEndpoint, "x", "x", ap.Options, nil) 367 | listResponse, err := httpClient.Do(listReq) 368 | checkDontFail(err) 369 | if listResponse.StatusCode != 200 { 370 | log.Println("Could not fetch artifact metadata for state assessment") 371 | } 372 | listResponseContainer := []string{} 373 | listBody, err := ioutil.ReadAll(listResponse.Body) 374 | checkDontFail(err) 375 | listResponse.Body.Close() 376 | err = json.Unmarshal(listBody, &listResponseContainer) 377 | checkDontFail(err) 378 | 379 | // Get All Versions 380 | ArtifactVersionMap := map[string][]int64{} 381 | for _, artifactID := range listResponseContainer { 382 | listArtifactsVersionsEndpoint := fmt.Sprintf("%s/%s/versions", listArtifactsEndpoint, artifactID) 383 | versionsReq := GetNewRequest("GET", listArtifactsVersionsEndpoint, "x", "x", ap.Options, nil) 384 | listVersionResponse, err := httpClient.Do(versionsReq) 385 | checkDontFail(err) 386 | if listVersionResponse.StatusCode != 200 { 387 | log.Println("Could not fetch version metadata for state assessment") 388 | } 389 | versionsResponseContainer := []int64{} 390 | versionsBody, err := ioutil.ReadAll(listVersionResponse.Body) 391 | checkDontFail(err) 392 | listResponse.Body.Close() 393 | err = json.Unmarshal(versionsBody, &versionsResponseContainer) 394 | checkDontFail(err) 395 | 396 | ArtifactVersionMap[artifactID] = versionsResponseContainer 397 | } 398 | 399 | sourceState := map[string][]int64{} 400 | // Get All necessary metadata 401 | for artifactID, versions := range ArtifactVersionMap { 402 | for _, version := range versions { 403 | listArtifactsVersionsMetaEndpoint := fmt.Sprintf("%s/%s/versions/%v/meta", listArtifactsEndpoint, artifactID, version) 404 | metaReq := GetNewRequest("GET", listArtifactsVersionsMetaEndpoint, "x", "x", ap.Options, nil) 405 | 406 | metaResponse, err := httpClient.Do(metaReq) 407 | checkDontFail(err) 408 | if metaResponse.StatusCode != 200 { 409 | log.Println("Could not fetch schema metadata for state assessment") 410 | } 411 | metaResponseContainer := SchemaApicurioMeta{} 412 | metaBody, err := ioutil.ReadAll(metaResponse.Body) 413 | checkDontFail(err) 414 | metaResponse.Body.Close() 415 | err = json.Unmarshal(metaBody, &metaResponseContainer) 416 | checkDontFail(err) 417 | 418 | if metaResponseContainer.Stype == "AVRO" || metaResponseContainer.Stype == "JSON" || 419 | metaResponseContainer.Stype == "PROTOBUF" { 420 | artifactVersions, haveSeenBefore := sourceState[artifactID] 421 | if !haveSeenBefore { 422 | sourceState[metaResponseContainer.Name] = []int64{metaResponseContainer.Version} 423 | ap.referenceName[metaResponseContainer.Name] = artifactID 424 | } else { 425 | artifactVersions := append(artifactVersions, metaResponseContainer.Version) 426 | sourceState[metaResponseContainer.Name] = artifactVersions 427 | log.Println(sourceState) 428 | } 429 | } 430 | } 431 | } 432 | 433 | return sourceState, nil 434 | } 435 | 436 | func (ap *ApicurioSource) TearDown() error { 437 | return nil 438 | } 439 | -------------------------------------------------------------------------------- /cmd/internals/customSource_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // customSource_test.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "github.com/stretchr/testify/assert" 10 | "log" 11 | "reflect" 12 | "testing" 13 | "time" 14 | ) 15 | 16 | var schemaToReferenceDTwo = "{\"type\":\"record\",\"name\":\"referenceNextLevel\",\"namespace\":\"com.reference\",\"fields\":[{\"name\":\"someField\",\"type\":\"string\"}]}" 17 | 18 | var schemaToReferenceDOne = "{\"type\":\"record\",\"name\":\"reference\",\"namespace\":\"com.reference\",\"fields\":[{\"name\":\"someField\",\"type\":\"com.reference.referenceNextLevel\"}]}" 19 | 20 | var schemaReferencing = "{\"type\":\"record\",\"name\":\"sampleRecordreferencing\",\"namespace\":\"com.mycorp.somethinghere\",\"fields\":[{\"name\":\"reference\",\"type\":\"com.reference.reference\"}]}" 21 | 22 | var referenceDepthTwo = SchemaRecord{ 23 | Subject: "referenceNextLevel", 24 | Schema: schemaToReferenceDTwo, 25 | SType: "AVRO", 26 | Version: 1, 27 | Id: 998, 28 | References: nil, 29 | } 30 | 31 | var DTwoReference = SchemaReference{ 32 | Name: "com.reference.referenceNextLevel", 33 | Subject: "referenceNextLevel", 34 | Version: 1, 35 | } 36 | 37 | var referenceDepthOne = SchemaRecord{ 38 | Subject: "reference", 39 | Schema: schemaToReferenceDOne, 40 | SType: "AVRO", 41 | Version: 1, 42 | Id: 999, 43 | References: []SchemaReference{DTwoReference}, 44 | } 45 | var DOneReference = SchemaReference{ 46 | Name: "com.reference.reference", 47 | Subject: "reference", 48 | Version: 1, 49 | } 50 | 51 | var schema1 = SchemaRecord{ 52 | Subject: testingSubject, 53 | Schema: schemaReferencing, 54 | SType: "AVRO", 55 | Version: 1, 56 | Id: 10001, 57 | References: []SchemaReference{DOneReference}, 58 | } 59 | 60 | func TestMainStackCustomSource(t *testing.T) { 61 | setup() 62 | t.Run("TCustomSourceBatch", func(t *testing.T) { TCustomSourceBatch(t) }) 63 | t.Run("TCustomSourceSync", func(t *testing.T) { TCustomSourceSync(t) }) 64 | tearDown() 65 | } 66 | 67 | func TCustomSourceBatch(t *testing.T) { 68 | testClient.DeleteAllSubjectsPermanently() 69 | 70 | CancelRun = false 71 | log.Println("Testing Custom Source in Batch Mode") 72 | 73 | myTestCustomSource := NewInMemRegistry([]SchemaRecord{schema1, referenceDepthOne, referenceDepthTwo}) 74 | 75 | RunCustomSourceBatch(testClient, &myTestCustomSource) 76 | 77 | subjectState := GetCurrentSubjectState(testClient) 78 | 79 | assert.True(t, reflect.DeepEqual(subjectState[myTestCustomSource.inMemSchemas[referenceDepthTwo.Id].Subject], []int64{1})) 80 | assert.True(t, reflect.DeepEqual(subjectState[myTestCustomSource.inMemSchemas[referenceDepthOne.Id].Subject], []int64{1})) 81 | assert.True(t, reflect.DeepEqual(subjectState[myTestCustomSource.inMemSchemas[schema1.Id].Subject], []int64{1})) 82 | 83 | testClient.DeleteAllSubjectsPermanently() 84 | } 85 | 86 | func TCustomSourceSync(t *testing.T) { 87 | testClient.DeleteAllSubjectsPermanently() 88 | 89 | log.Println("Testing Custom Source in Sync Mode") 90 | 91 | myTestCustomSource := NewInMemRegistry([]SchemaRecord{schema1, referenceDepthOne, referenceDepthTwo}) 92 | 93 | ScrapeInterval = 3 94 | CancelRun = false 95 | SyncDeletes = true 96 | go RunCustomSourceSync(testClient, &myTestCustomSource) 97 | time.Sleep(time.Duration(5) * time.Second) // Give time for sync 98 | 99 | subjectState := GetCurrentSubjectState(testClient) 100 | 101 | assert.True(t, reflect.DeepEqual(subjectState[myTestCustomSource.inMemSchemas[referenceDepthTwo.Id].Subject], []int64{1})) 102 | assert.True(t, reflect.DeepEqual(subjectState[myTestCustomSource.inMemSchemas[referenceDepthOne.Id].Subject], []int64{1})) 103 | assert.True(t, reflect.DeepEqual(subjectState[myTestCustomSource.inMemSchemas[schema1.Id].Subject], []int64{1})) 104 | 105 | // Test deletes sync, this should delete the schema specified and any that reference it. 106 | delete(myTestCustomSource.inMemSchemas, referenceDepthOne.Id) 107 | delete(myTestCustomSource.inMemSchemas, schema1.Id) 108 | time.Sleep(time.Duration(5) * time.Second) // Give time for sync 109 | 110 | assert.Equal(t, 1, len(myTestCustomSource.inMemSchemas)) 111 | assert.Equal(t, len(myTestCustomSource.inMemSchemas), len(testClient.getSchemaList(false))) 112 | 113 | CancelRun = true 114 | time.Sleep(time.Duration(3) * time.Second) // Give time for killing goroutine 115 | 116 | testClient.DeleteAllSubjectsPermanently() 117 | } 118 | -------------------------------------------------------------------------------- /cmd/internals/definitions.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // definitions.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "fmt" 10 | "io/ioutil" 11 | "os" 12 | "strings" 13 | "unicode" 14 | ) 15 | 16 | // A client that can perform actions against a backing Schema Registry 17 | type SchemaRegistryClient struct { 18 | SRUrl string 19 | SRApiKey string 20 | SRApiSecret string 21 | } 22 | 23 | /* 24 | Any struct that implements this interface is able to run an instance of sync and batch exporting. 25 | */ 26 | type CustomDestination interface { 27 | // Perform any set-up behavior before start of sync/batch export 28 | SetUp() error 29 | // An implementation should handle the registration of a schema in the destination. 30 | // The SchemaRecord struct provides all details needed for registration. 31 | RegisterSchema(record SchemaRecord) error 32 | // An implementation should handle the deletion of a schema in the destination. 33 | // The Destination must be able to resolve what to delete from the subject and version provided. 34 | DeleteSchema(subject string, version int64) error 35 | // An implementation should be able to send exactly one map describing the state of the destination 36 | // This map should be minimal. Describing only the Subject and Versions that already exist. 37 | GetDestinationState() (map[string][]int64, error) 38 | // Perform any tear-down behavior before stop of sync/batch export 39 | TearDown() error 40 | } 41 | 42 | /* 43 | Any struct that implements this interface is able to run an instance of sync and batch exporting. 44 | */ 45 | type CustomSource interface { 46 | // Perform any set-up behavior before start of sync/batch export 47 | SetUp() error 48 | // An implementation should handle the retrieval of a schema from the source. 49 | GetSchema(subject string, version int64) (id int64, stype string, schema string, references []SchemaReference, err error) 50 | // An implementation should be able to send exactly one map describing the state of the source 51 | // This map should be minimal. Describing only the Subject and Versions that exist. 52 | GetSourceState() (map[string][]int64, error) 53 | // Perform any tear-down behavior before stop of sync/batch export 54 | TearDown() error 55 | } 56 | 57 | // Holding struct that describes a schema record 58 | type SchemaRecord struct { 59 | Subject string `json:"subject"` 60 | Schema string `json:"schema"` 61 | SType string `json:"schemaType"` 62 | Version int64 `json:"version"` 63 | Id int64 `json:"id"` 64 | References []SchemaReference `json:"references"` 65 | } 66 | 67 | type SchemaReference struct { 68 | Name string `json:"name"` 69 | Subject string `json:"subject"` 70 | Version int64 `json:"version"` 71 | } 72 | 73 | type SchemaAlreadyRegisteredResponse struct { 74 | Id int64 `json:"id"` 75 | } 76 | 77 | //Constructor to assure Type-less schemas get registered with Avro 78 | func (srs SchemaRecord) setTypeIfEmpty() SchemaRecord { 79 | if srs.SType == "" { 80 | srs.SType = "AVRO" 81 | } 82 | 83 | return srs 84 | } 85 | 86 | //Constructor to assure Reference-less schemas get registered 87 | func (srs SchemaRecord) setReferenceIfEmpty() SchemaRecord { 88 | if len(srs.References) == 0 { 89 | srs.References = []SchemaReference{} 90 | } 91 | 92 | return srs 93 | } 94 | 95 | // Holding struct for registering a schema in an SR compliant way 96 | type SchemaToRegister struct { 97 | Schema string `json:"schema"` 98 | Id int64 `json:"id,omitempty"` 99 | Version int64 `json:"version,omitempty"` 100 | SType string `json:"schemaType"` 101 | References []SchemaReference `json:"references"` 102 | } 103 | 104 | // Holding struct for retrieving a schema 105 | type SchemaExtraction struct { 106 | Schema string `json:"schema"` 107 | Id int64 `json:"id"` 108 | Version int64 `json:"version"` 109 | Subject string `json:"subject"` 110 | References []SchemaReference `json:"references"` 111 | } 112 | 113 | type ModeRecord struct { 114 | Mode string `json:"mode"` 115 | } 116 | 117 | type CompatRecord struct { 118 | Compatibility string `json:"compatibility"` 119 | } 120 | 121 | type SubjectWithVersions struct { 122 | Subject string 123 | Versions []int64 124 | } 125 | 126 | type SubjectVersion struct { 127 | Subject string `json:"subject"` 128 | Version int64 `json:"version"` 129 | } 130 | 131 | type ErrorMessage struct { 132 | ErrorCode int64 `json:"error_code"` 133 | ErrorMessage string `json:"message"` 134 | } 135 | 136 | type StringArrayFlag map[string]bool 137 | 138 | func (i *StringArrayFlag) String() string { 139 | return fmt.Sprintln(*i) 140 | } 141 | 142 | func (i *StringArrayFlag) Set(value string) error { 143 | currentPath, _ := os.Getwd() 144 | 145 | if fileExists(value) { 146 | f, err := ioutil.ReadFile(CheckPath(value, currentPath)) 147 | if err != nil { 148 | panic(err) 149 | } 150 | value = string(f) 151 | } 152 | 153 | nospaces := i.removeSpaces(value) 154 | 155 | tempMap := map[string]bool{} 156 | 157 | for _, s := range strings.Split(nospaces, ",") { 158 | tempMap[s] = true 159 | } 160 | 161 | *i = tempMap 162 | 163 | return nil 164 | } 165 | 166 | func (i *StringArrayFlag) removeSpaces(str string) string { 167 | return strings.Map(func(r rune) rune { 168 | if unicode.IsSpace(r) { 169 | return -1 170 | } 171 | return r 172 | }, str) 173 | } 174 | -------------------------------------------------------------------------------- /cmd/internals/exportSchemas.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // exportSchemas.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "log" 10 | ) 11 | 12 | func BatchExport(srcClient *SchemaRegistryClient, destClient *SchemaRegistryClient) { 13 | listenForInterruption() 14 | 15 | srcSubjects := GetCurrentSubjectState(srcClient) 16 | 17 | // Set up soft Deleted IDs in destination for interpretation by the destination registry 18 | if SyncDeletes { 19 | syncExistingSoftDeletedSubjects(srcClient, destClient) 20 | } 21 | 22 | log.Println("Registering all schemas from " + srcClient.SRUrl) 23 | for srcSubject, srcVersions := range srcSubjects { 24 | if CancelRun == true { 25 | return 26 | } 27 | for _, v := range srcVersions { 28 | schema := srcClient.GetSchema(srcSubject, v, false) 29 | RegisterReferences(schema, srcClient, destClient, false) 30 | log.Printf("Registering schema: %s with version: %d and ID: %d and Type: %s", 31 | schema.Subject, schema.Version, schema.Id, schema.SType) 32 | destClient.RegisterSchemaBySubjectAndIDAndVersion(schema.Schema, 33 | schema.Subject, 34 | schema.Id, 35 | schema.Version, 36 | schema.SType, 37 | schema.References) 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /cmd/internals/getInfo.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // getInfo.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "errors" 10 | "os" 11 | ) 12 | 13 | // SrcGetAPIKey returns the API Key from environment variables 14 | // if an API Key can not be found, it exits the process 15 | func SrcGetAPIKey() string { 16 | key, present := os.LookupEnv("SRC_API_KEY") 17 | if present && key != "" { 18 | return key 19 | } 20 | 21 | panic(errors.New("SRC_API_KEY environment variable has not been specified")) 22 | } 23 | 24 | // SrcGetAPISecret returns the API Key from environment variables 25 | // if an API Key can not be found, it exits the process 26 | func SrcGetAPISecret() string { 27 | secret, present := os.LookupEnv("SRC_API_SECRET") 28 | if present && secret != "" { 29 | return secret 30 | } 31 | 32 | panic(errors.New("SRC_API_SECRET environment variable has not been specified")) 33 | } 34 | 35 | // SrcGetSRUrl returns the Source URL from environment variables 36 | // if an API Key can not be found, it exits the process 37 | func SrcGetSRUrl() string { 38 | url, present := os.LookupEnv("SRC_SR_URL") 39 | if present && url != "" { 40 | return url 41 | } 42 | 43 | panic(errors.New("SRC_SR_URL environment variable has not been specified")) 44 | } 45 | 46 | // DestGetAPIKey returns the API Key from environment variables 47 | // if an API Key can not be found, it exits the process 48 | func DestGetAPIKey() string { 49 | key, present := os.LookupEnv("DST_API_KEY") 50 | if present && key != "" { 51 | return key 52 | } 53 | 54 | panic(errors.New("DST_API_KEY environment variable has not been specified")) 55 | } 56 | 57 | // DestGetAPISecret returns the API Key from environment variables 58 | // if an API Key can not be found, it exits the process 59 | func DestGetAPISecret() string { 60 | secret, present := os.LookupEnv("DST_API_SECRET") 61 | if present && secret != "" { 62 | return secret 63 | } 64 | 65 | panic(errors.New("DST_API_SECRET environment variable has not been specified")) 66 | } 67 | 68 | // DestGetSRUrl returns the Destination URL from environment variables 69 | // if an API Key can not be found, it exits the process 70 | func DestGetSRUrl() string { 71 | url, present := os.LookupEnv("DST_SR_URL") 72 | if present && url != "" { 73 | return url 74 | } 75 | 76 | panic(errors.New("DST_SR_URL environment variable has not been specified")) 77 | } 78 | -------------------------------------------------------------------------------- /cmd/internals/helpers.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // helpers.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "encoding/json" 10 | "fmt" 11 | "io" 12 | "io/ioutil" 13 | "log" 14 | "net/http" 15 | "net/url" 16 | "os" 17 | "os/signal" 18 | "path/filepath" 19 | "strings" 20 | "syscall" 21 | ) 22 | 23 | // Simple check function that will panic if there is an error present 24 | func check(e error) { 25 | if e != nil { 26 | panic(e) 27 | } 28 | } 29 | 30 | // Simple check function that will fail all if there is an error present and allows a custom message to be printed 31 | func checkFail(e error, msg string) { 32 | if e != nil { 33 | log.Println(e) 34 | log.Fatalln(msg) 35 | } 36 | } 37 | 38 | // Simple check function that will not fail and log if there is an error present 39 | func checkDontFail(e error) { 40 | if e != nil { 41 | log.Println(e) 42 | } 43 | } 44 | 45 | // Prints the version of the ccloud-schema-exporter 46 | func printVersion() { 47 | fmt.Printf("ccloud-schema-exporter: %s\n", Version) 48 | } 49 | 50 | // Checks if the described path is a file 51 | // It will return false if the file does not exist or the given is a directory 52 | func fileExists(filename string) bool { 53 | info, err := os.Stat(filename) 54 | if os.IsNotExist(err) { 55 | return false 56 | } 57 | return !info.IsDir() 58 | } 59 | 60 | func isInSlice(i int64, list []int64) bool { 61 | for _, current := range list { 62 | if current == i { 63 | return true 64 | } 65 | } 66 | return false 67 | } 68 | 69 | func referenceIsInSlice(i SchemaReference, list []SchemaReference) bool { 70 | for _, current := range list { 71 | if current == i { 72 | return true 73 | } 74 | } 75 | return false 76 | } 77 | 78 | // Returns an HTTP request with the given information to execute 79 | func GetNewRequest(method string, endpoint string, key string, secret string, headers map[string]string, reader io.Reader) *http.Request { 80 | req, err := http.NewRequest(method, endpoint, reader) 81 | if err != nil { 82 | panic(err) 83 | } 84 | 85 | req.SetBasicAuth(key, secret) 86 | req.Header.Add("Content-Type", "application/json") 87 | req.Header.Add("User-Agent", "ccloud-schema-exporter/"+Version) 88 | req.Header.Add("Correlation-Context", "service.name=ccloud-schema-exporter,service.version="+Version) 89 | 90 | if headers != nil { 91 | for key, val := range headers { 92 | req.Header.Add(key, val) 93 | } 94 | } 95 | 96 | return req 97 | } 98 | 99 | // Checks if the given subject is allowed by the defined Allow and Disallow Lists 100 | // Returns true if the subject is allowed 101 | func checkSubjectIsAllowed(subject string) bool { 102 | if len(AllowList) != 0 { 103 | _, isAllowed := AllowList[subject] 104 | if !isAllowed { 105 | return false 106 | } 107 | } 108 | if len(DisallowList) != 0 { 109 | _, isDisallowed := DisallowList[subject] 110 | if isDisallowed { 111 | return false 112 | } 113 | } 114 | if len(AllowList) != 0 && len(DisallowList) != 00 { 115 | _, isAllowed := AllowList[subject] 116 | _, isDisallowed := DisallowList[subject] 117 | if !isAllowed || isDisallowed { 118 | return false 119 | } 120 | } 121 | return true 122 | } 123 | 124 | // Deletes all registered schemas from the destination SR 125 | func deleteAllFromDestination(sr string, key string, secret string) { 126 | destClient := NewSchemaRegistryClient(sr, key, secret, "dst") 127 | if !destClient.IsReachable() { 128 | log.Fatalln("Could not reach source registry. Possible bad credentials?") 129 | } 130 | destClient.DeleteAllSubjectsPermanently() 131 | } 132 | 133 | // Handle the SR response to a delete command for a subject/version 134 | func handleDeletesHTTPResponse(body io.ReadCloser, statusCode int, method string, endpoint string, 135 | reqType string, subject string, version int64) bool { 136 | defer body.Close() 137 | if statusCode != 200 { 138 | body, _ := ioutil.ReadAll(body) 139 | errorMsg := fmt.Sprintf(statusError, statusCode, method, endpoint) 140 | log.Printf("ERROR: %s, HTTP Response: %s", errorMsg, string(body)) 141 | } else { 142 | log.Println(fmt.Sprintf("%s deleted subject: %s, version: %d", reqType, subject, version)) 143 | if WithMetrics { 144 | if reqType == "Soft" { 145 | schemasSoftDeleted.Inc() 146 | } 147 | if reqType == "Hard" { 148 | schemasHardDeleted.Inc() 149 | } 150 | } 151 | return true 152 | } 153 | return false 154 | } 155 | 156 | // Handles the case where an HTTP call was not successful generically 157 | func handleNotSuccess(body io.Reader, statusCode int, method string, endpoint string) { 158 | if statusCode != 200 { 159 | body, _ := ioutil.ReadAll(body) 160 | errorMsg := fmt.Sprintf(statusError, statusCode, method, endpoint) 161 | log.Printf("ERROR: %s, HTTP Response: %s", errorMsg, string(body)) 162 | } 163 | } 164 | 165 | // Filters the provided slice of subjects according to what is provided in AllowList and DisallowList 166 | func filterListedSubjects(response []string) map[string]bool { 167 | // Start allow list work 168 | subjectMap := map[string]bool{} 169 | 170 | for _, s := range response { // Generate a map of subjects for easier manipulation 171 | subjectMap[s] = true 172 | } 173 | 174 | for s, _ := range subjectMap { // Filter out for allow lists 175 | if AllowList != nil { // If allow list is defined 176 | _, allowContains := AllowList[s] 177 | if !allowContains { // If allow list does not contain it, delete it 178 | delete(subjectMap, s) 179 | } 180 | } 181 | if DisallowList != nil { // If disallow list is defined 182 | _, disallowContains := DisallowList[s] 183 | if disallowContains { // If disallow list contains it, delete it 184 | delete(subjectMap, s) 185 | } 186 | } 187 | } 188 | 189 | return subjectMap 190 | } 191 | 192 | // Filters the provided slice of SubjectVersion according to what is provided in AllowList and DisallowList 193 | func filterListedSubjectsVersions(response []SubjectVersion) []SubjectVersion { 194 | subjectMap := map[string]int64{} 195 | 196 | for _, s := range response { // Generate a map of subjects for easier manipulation 197 | subjectMap[s.Subject] = s.Version 198 | } 199 | 200 | for s, _ := range subjectMap { // Filter out for allow lists 201 | if AllowList != nil { // If allow list is defined 202 | _, allowContains := AllowList[s] 203 | if !allowContains { // If allow list does not contain it, delete it 204 | delete(subjectMap, s) 205 | } 206 | } 207 | if DisallowList != nil { // If disallow list is defined 208 | _, disallowContains := DisallowList[s] 209 | if disallowContains { // If disallow list contains it, delete it 210 | delete(subjectMap, s) 211 | } 212 | } 213 | } 214 | 215 | subjectVersionSlice := []SubjectVersion{} 216 | for s, v := range subjectMap { 217 | tempSubjVer := SubjectVersion{ 218 | Subject: s, 219 | Version: v, 220 | } 221 | subjectVersionSlice = append(subjectVersionSlice, tempSubjVer) 222 | } 223 | 224 | return subjectVersionSlice 225 | } 226 | 227 | // Filters the provided map of [ID]:[Subject:Version] according to what is provided in AllowList and DisallowList 228 | func filterIDs(candidate map[int64]map[string][]int64) map[int64]map[string][]int64 { 229 | 230 | for id, subjects := range candidate { // Filter out for allow lists 231 | for sbj, _ := range subjects { 232 | if AllowList != nil { // If allow list is defined 233 | _, allowContains := AllowList[sbj] 234 | if !allowContains { // If allow list does not contain it, delete it 235 | delete(candidate[id], sbj) 236 | } 237 | } 238 | if DisallowList != nil { // If disallow list is defined 239 | _, disallowContains := DisallowList[sbj] 240 | if disallowContains { // If disallow list contains it, delete it 241 | delete(candidate[id], sbj) 242 | } 243 | } 244 | } 245 | if len(subjects) == 0 { 246 | delete(candidate, id) 247 | } 248 | } 249 | 250 | return candidate 251 | } 252 | 253 | // Generic handling of queries to the SR instance. Returns back the response as a map of string:string 254 | // Not suitable for endpoint queries that do not conform to this structure 255 | func handleEndpointQuery(end string, src *SchemaRegistryClient) (map[string]string, bool) { 256 | endpoint := fmt.Sprintf("%s/%s", src.SRUrl, end) 257 | req := GetNewRequest("GET", endpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 258 | 259 | res, err := httpClient.Do(req) 260 | if err != nil { 261 | log.Printf(err.Error()) 262 | return nil, false 263 | } 264 | handleNotSuccess(res.Body, res.StatusCode, req.Method, endpoint) 265 | 266 | response := map[string]string{} 267 | 268 | body, err := ioutil.ReadAll(res.Body) 269 | if err != nil { 270 | log.Printf(err.Error()) 271 | return nil, false 272 | } 273 | 274 | err = json.Unmarshal(body, &response) 275 | if err != nil { 276 | log.Printf(err.Error()) 277 | } 278 | return response, true 279 | } 280 | 281 | // Returns the difference between the provided maps of Subject:Version 282 | // The difference will be what is contained in the left map that is not contained in the right map 283 | func GetSubjectDiff(m1 map[string][]int64, m2 map[string][]int64) map[string][]int64 { 284 | diffMap := map[string][]int64{} 285 | for subject, versions := range m1 { 286 | if m2[subject] != nil { 287 | versionDiff := GetVersionsDiff(m1[subject], m2[subject]) 288 | if len(versionDiff) != 0 { 289 | aDiff := versionDiff 290 | diffMap[subject] = aDiff 291 | } 292 | } else { 293 | diffMap[subject] = versions 294 | } 295 | } 296 | return diffMap 297 | } 298 | 299 | // Returns the difference between the provided slices 300 | // The difference will be what is contained in the left slice that is not contained in the right slice 301 | func GetVersionsDiff(a1 []int64, a2 []int64) []int64 { 302 | m := map[int64]bool{} 303 | diff := []int64{} 304 | 305 | for _, item := range a2 { 306 | m[item] = true 307 | } 308 | 309 | for _, item := range a1 { 310 | if _, ok := m[item]; !ok { 311 | diff = append(diff, item) 312 | } 313 | } 314 | return diff 315 | } 316 | 317 | // Returns the difference between the provided maps of [ID][Subject:Version] 318 | // The difference will be what is contained in the left map that is not contained in the right map 319 | func GetIDDiff(m1 map[int64]map[string][]int64, m2 map[int64]map[string][]int64) map[int64]map[string][]int64 { 320 | diffMap := map[int64]map[string][]int64{} 321 | 322 | for idLeft, subjectVersionsLeftMap := range m1 { // Iterate through the left id -> (subject->version) mapping 323 | subjVersionsRightMap, idExistsRight := m2[idLeft] // Check if right has this mapping, if it does, retrieve it 324 | if !idExistsRight { // if the right does NOT have this mapping 325 | diffMap[idLeft] = subjectVersionsLeftMap // This whole mapping gets added to the map of things to be deleted 326 | } else { // if the right DOES have the ID 327 | toDelete := map[int64]map[string][]int64{} // Holder for schema/version references to delete 328 | for subjectLeft, versionsLeft := range subjectVersionsLeftMap { // iterate through subject/versions for current id 329 | subjectRightVersions, subjectExistsRight := subjVersionsRightMap[subjectLeft] 330 | if subjectExistsRight { 331 | for _, singleVersionLeft := range versionsLeft { // Iterate through versions on left 332 | if !isInSlice(singleVersionLeft, subjectRightVersions) { // if not exists on right 333 | _, idInQueue := toDelete[idLeft] 334 | if idInQueue { 335 | _, subjectInQueue := toDelete[idLeft][subjectLeft] 336 | if subjectInQueue { 337 | toDelete[idLeft][subjectLeft] = append(toDelete[idLeft][subjectLeft], singleVersionLeft) // Add to holder for queueing for deletion 338 | } else { 339 | tmpIDContents := toDelete[idLeft] 340 | tmpIDContents[subjectLeft] = []int64{singleVersionLeft} 341 | toDelete[idLeft] = tmpIDContents 342 | } 343 | } else { 344 | tempMap := map[string][]int64{subjectLeft: {singleVersionLeft}} 345 | toDelete[idLeft] = tempMap 346 | } 347 | } 348 | } 349 | } else { 350 | _, idInQueue := toDelete[idLeft] 351 | if idInQueue { 352 | tempMap := toDelete[idLeft] 353 | tempMap[subjectLeft] = versionsLeft 354 | toDelete[idLeft] = tempMap 355 | } else { 356 | toDelete[idLeft] = map[string][]int64{subjectLeft: versionsLeft} 357 | } 358 | } 359 | } 360 | if len(toDelete) != 0 { 361 | diffMap[idLeft] = toDelete[idLeft] // Add deletion queue to diffMap 362 | } 363 | } 364 | } 365 | 366 | return diffMap 367 | } 368 | 369 | // Returns the currently registered subjects for the SR clients provided 370 | func GetCurrentSubjectsStates(srcClient *SchemaRegistryClient, destClient *SchemaRegistryClient) (map[string][]int64, map[string][]int64) { 371 | return GetCurrentSubjectState(srcClient), GetCurrentSubjectState(destClient) 372 | } 373 | 374 | // Returns the currently registered subjects for the single SR provided 375 | func GetCurrentSubjectState(client *SchemaRegistryClient) map[string][]int64 { 376 | subjects := make(map[string][]int64) 377 | aChan := make(chan map[string][]int64) 378 | 379 | go client.GetSubjectsWithVersions(aChan, false) 380 | 381 | subjects = <-aChan 382 | return subjects 383 | } 384 | 385 | // Listens for user-provided controlled exit, and terminated the current process 386 | func listenForInterruption() { 387 | sigs := make(chan os.Signal, 1) 388 | signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) 389 | go func() { 390 | sig := <-sigs 391 | log.Printf("Received %v signal, quitting non-started schema writes...", sig) 392 | CancelRun = true 393 | }() 394 | } 395 | 396 | func RegisterReferences(wrappingSchema SchemaRecord, srcClient *SchemaRegistryClient, destClient *SchemaRegistryClient, deleted bool) { 397 | if len(wrappingSchema.References) != 0 { 398 | log.Printf("Registering references for subject %s and version %d", wrappingSchema.Subject, wrappingSchema.Version) 399 | log.Println(len(wrappingSchema.References)) 400 | for _, schemaReference := range wrappingSchema.References { 401 | 402 | schema := srcClient.GetSchema(schemaReference.Subject, schemaReference.Version, deleted) 403 | 404 | if !destClient.schemaIsRegisteredUnderSubject(schema.Subject, schema.SType, schema.Schema, schema.References) { 405 | log.Printf("Registering referenced schema: %s with version: %d and ID: %d and Type: %s", 406 | schema.Subject, schema.Version, schema.Id, schema.SType) 407 | RegisterReferences(schema, srcClient, destClient, deleted) 408 | destClient.RegisterSchemaBySubjectAndIDAndVersion(schema.Schema, 409 | schema.Subject, 410 | schema.Id, 411 | schema.Version, 412 | schema.SType, 413 | schema.References) 414 | continue 415 | } 416 | 417 | log.Printf("Reference schema subject %s was already written with version: %d and ID: %d", 418 | schema.Subject, schema.Version, schema.Id) 419 | 420 | } 421 | } 422 | } 423 | 424 | func RegisterReferencesFromLocalFS(referencesToRegister []SchemaReference, dstClient *SchemaRegistryClient, pathToLookForReferences string) { 425 | 426 | err := filepath.Walk(pathToLookForReferences, 427 | func(path string, info os.FileInfo, err error) error { 428 | check(err) 429 | for _, oneRef := range referencesToRegister { 430 | if !info.IsDir() && strings.Contains(info.Name(), fmt.Sprintf("%s-%d", url.QueryEscape(oneRef.Subject), oneRef.Version)) { 431 | log.Println(fmt.Sprintf("Writing referenced schema with Subject: %s and Version: %d. Filepath: %s", oneRef.Subject, oneRef.Version, path)) 432 | writeSchemaToSR(dstClient, path) 433 | } 434 | } 435 | 436 | return nil 437 | }) 438 | check(err) 439 | } 440 | 441 | func GetAvroSchemaDescriptor(fullReferenceName string) SchemaDescriptor { 442 | 443 | lastDot := strings.LastIndex(fullReferenceName, ".") 444 | 445 | if lastDot == -1 { 446 | return SchemaDescriptor{} 447 | } 448 | 449 | namespace := fullReferenceName[:lastDot] 450 | name := fullReferenceName[lastDot+1:] 451 | 452 | thisDescriptor := SchemaDescriptor{ 453 | namespace: strings.TrimSpace(namespace), 454 | name: strings.TrimSpace(name), 455 | } 456 | 457 | return thisDescriptor 458 | } 459 | 460 | func WriteFile(path string, filename string, contents string) { 461 | 462 | fileNameEscaped := fmt.Sprintf("%s", url.QueryEscape(filename)) 463 | fullPath := filepath.Join(path, fileNameEscaped) 464 | log.Printf("Writing file: %s", fullPath) 465 | 466 | f, err := os.Create(fullPath) 467 | check(err) 468 | defer f.Close() 469 | 470 | _, err = f.WriteString(contents) 471 | check(err) 472 | } 473 | -------------------------------------------------------------------------------- /cmd/internals/helpers_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // helpers_test.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "github.com/stretchr/testify/assert" 10 | "reflect" 11 | "testing" 12 | ) 13 | 14 | func TestMainStackHelpers(t *testing.T) { 15 | t.Run("TisInSlice", func(t *testing.T) { TisInSlice(t) }) 16 | t.Run("TcheckSubjectIsAllowed", func(t *testing.T) { TcheckSubjectIsAllowed(t) }) 17 | t.Run("TfilterListedSubjects", func(t *testing.T) { TfilterListedSubjects(t) }) 18 | t.Run("TfilterListedSubjectsVersions", func(t *testing.T) { TfilterListedSubjectsVersions(t) }) 19 | t.Run("TfilterIDs", func(t *testing.T) { TfilterIDs(t) }) 20 | t.Run("TGetSubjectDiff", func(t *testing.T) { TGetSubjectDiff(t) }) 21 | t.Run("TGetVersionsDiff", func(t *testing.T) { TGetVersionsDiff(t) }) 22 | t.Run("TGetIDDiff", func(t *testing.T) { TGetIDDiff(t) }) 23 | } 24 | 25 | func TisInSlice(t *testing.T) { 26 | sliceOne := []int64{1, 2, 3, 4, 5} 27 | 28 | assert.True(t, isInSlice(3, sliceOne)) 29 | assert.True(t, !isInSlice(6, sliceOne)) 30 | } 31 | 32 | func TcheckSubjectIsAllowed(t *testing.T) { 33 | AllowList = map[string]bool{ 34 | "testingSubjectKey": true, 35 | "SomeOtherValue": true, 36 | } 37 | 38 | DisallowList = map[string]bool{ 39 | "someValuePartTwo": true, 40 | "SomeOtherValue": true, 41 | } 42 | 43 | assert.True(t, checkSubjectIsAllowed("testingSubjectKey")) 44 | assert.True(t, !checkSubjectIsAllowed("SomeOtherValue")) 45 | 46 | AllowList = nil 47 | DisallowList = nil 48 | 49 | } 50 | 51 | func TfilterListedSubjects(t *testing.T) { 52 | 53 | AllowList = map[string]bool{ 54 | "testingSubjectKey": true, 55 | "SomeOtherValue": true, 56 | } 57 | 58 | DisallowList = map[string]bool{ 59 | "someValuePartTwo": true, 60 | "SomeOtherValue": true, 61 | } 62 | 63 | toFiler := []string{ 64 | "someValuePartTwo", 65 | "testingSubjectKey", 66 | "SomeOtherValue", 67 | } 68 | 69 | result := filterListedSubjects(toFiler) 70 | 71 | expected := map[string]bool{"testingSubjectKey": true} 72 | 73 | AllowList = nil 74 | DisallowList = nil 75 | 76 | assert.True(t, reflect.DeepEqual(result, expected)) 77 | } 78 | 79 | func TfilterListedSubjectsVersions(t *testing.T) { 80 | AllowList = map[string]bool{ 81 | "testingSubjectKey": true, 82 | "SomeOtherValue": true, 83 | } 84 | 85 | DisallowList = map[string]bool{ 86 | "someValuePartTwo": true, 87 | "SomeOtherValue": true, 88 | } 89 | 90 | toFilter := []SubjectVersion{ 91 | {Subject: "testingSubjectKey", Version: 1}, 92 | {Subject: "someValuePartTwo", Version: 1}, 93 | {Subject: "SomeOtherValue", Version: 1}, 94 | } 95 | 96 | result := filterListedSubjectsVersions(toFilter) 97 | 98 | expected := []SubjectVersion{ 99 | {Subject: "testingSubjectKey", Version: 1}, 100 | } 101 | 102 | AllowList = nil 103 | DisallowList = nil 104 | 105 | assert.True(t, reflect.DeepEqual(result, expected)) 106 | 107 | } 108 | 109 | func TfilterIDs(t *testing.T) { 110 | 111 | AllowList = map[string]bool{ 112 | "testingSubjectKey": true, 113 | "SomeOtherValue": true, 114 | } 115 | 116 | DisallowList = map[string]bool{ 117 | "someValuePartTwo": true, 118 | "SomeOtherValue": true, 119 | } 120 | 121 | toFilter := map[int64]map[string][]int64{ 122 | 1001: {"testingSubjectKey": {1, 2, 3}, "someValuePartTwo": {1, 2}}, 123 | 5000: {"SomeOtherValue": {1, 2}}, 124 | } 125 | 126 | result := filterIDs(toFilter) 127 | 128 | expected := map[int64]map[string][]int64{ 129 | 1001: {"testingSubjectKey": {1, 2, 3}}, 130 | } 131 | 132 | AllowList = nil 133 | DisallowList = nil 134 | 135 | assert.True(t, reflect.DeepEqual(result, expected)) 136 | } 137 | 138 | func TGetSubjectDiff(t *testing.T) { 139 | subjectMapOne := map[string][]int64{ 140 | "someValue": {1, 2, 3}, 141 | "SomeOtherValue": {1, 2}, 142 | } 143 | 144 | subjectMapTwo := map[string][]int64{ 145 | "someValue": {1, 3}, 146 | "SomeOtherValue": {2}, 147 | } 148 | 149 | result := GetSubjectDiff(subjectMapOne, subjectMapTwo) 150 | 151 | expected := map[string][]int64{ 152 | "someValue": {2}, 153 | "SomeOtherValue": {1}, 154 | } 155 | 156 | // Assert values that are contained in left but not right are returned 157 | assert.True(t, reflect.DeepEqual(expected, result)) 158 | } 159 | 160 | func TGetVersionsDiff(t *testing.T) { 161 | versionArrayOne := []int64{1, 2, 3} 162 | versionArrayTwo := []int64{1, 3} 163 | 164 | result := GetVersionsDiff(versionArrayOne, versionArrayTwo) 165 | 166 | expected := []int64{2} 167 | 168 | // Assert values that are contained in left but not right are returned 169 | assert.Equal(t, expected, result) 170 | } 171 | 172 | func TGetIDDiff(t *testing.T) { 173 | 174 | idMapOne := map[int64]map[string][]int64{ 175 | 1001: {"someValue": {1, 2, 3}, "someValuePartTwo": {1, 2}}, 176 | 5000: {"SomeOtherValue": {1, 2}}, 177 | } 178 | 179 | idMapTwo := map[int64]map[string][]int64{ 180 | 1001: {"someValue": {1, 3}, "someValuePartTwo": {1}}, 181 | 5000: {"SomeOtherValue": {2}}, 182 | } 183 | 184 | results := GetIDDiff(idMapOne, idMapTwo) 185 | 186 | expected := map[int64]map[string][]int64{ 187 | 1001: {"someValue": {2}, "someValuePartTwo": {2}}, 188 | 5000: {"SomeOtherValue": {1}}, 189 | } 190 | 191 | assert.True(t, reflect.DeepEqual(expected, results)) 192 | } 193 | -------------------------------------------------------------------------------- /cmd/internals/localFSFunctions.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // writeToLocal.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "encoding/json" 10 | "fmt" 11 | "io/ioutil" 12 | "log" 13 | "net/url" 14 | "os" 15 | "path" 16 | "path/filepath" 17 | "strconv" 18 | "strings" 19 | "sync" 20 | "time" 21 | ) 22 | 23 | func WriteToFS(srcClient *SchemaRegistryClient, definedPath string, workingDirectory string) { 24 | listenForInterruption() 25 | 26 | definedPath = CheckPath(definedPath, workingDirectory) 27 | 28 | srcSubjects := GetCurrentSubjectState(srcClient) 29 | var aGroup sync.WaitGroup 30 | 31 | log.Printf("Writing schemas from %s to path %s", srcClient.SRUrl, definedPath) 32 | for srcSubject, srcVersions := range srcSubjects { 33 | for _, v := range srcVersions { 34 | aGroup.Add(1) 35 | go writeSchemaLocally(srcClient, definedPath, srcSubject, v, &aGroup) 36 | time.Sleep(time.Duration(1) * time.Millisecond) 37 | } 38 | } 39 | aGroup.Wait() 40 | } 41 | 42 | func WriteFromFS(dstClient *SchemaRegistryClient, definedPath string, workingDirectory string) { 43 | listenForInterruption() 44 | 45 | definedPath = CheckPath(definedPath, workingDirectory) 46 | 47 | err := filepath.Walk(definedPath, 48 | func(path string, info os.FileInfo, err error) error { 49 | check(err) 50 | if !info.IsDir() { 51 | writeSchemaToSR(dstClient, path) 52 | } 53 | return nil 54 | }) 55 | check(err) 56 | 57 | if CancelRun != true { 58 | log.Println("Destination Schema Registry Restored From Backup") 59 | } else { 60 | log.Println("Destination Schema Registry Partially Restored From Backup") 61 | } 62 | } 63 | 64 | // Writes the provided file to Schema Registry 65 | func writeSchemaToSR(dstClient *SchemaRegistryClient, filepath string) { 66 | if CancelRun == true { 67 | return 68 | } 69 | id, version, subject, stype := parseFileName(filepath) 70 | if checkSubjectIsAllowed(subject) { 71 | rawSchema, err := ioutil.ReadFile(filepath) 72 | fileString := string(rawSchema) 73 | check(err) 74 | 75 | referenceArray := []SchemaReference{} 76 | 77 | if strings.Contains(fileString, ReferenceSeparator) { 78 | referenceStart := strings.LastIndex(fileString, ReferenceSeparator) + len(ReferenceSeparator) 79 | referenceString := strings.TrimSpace(strings.ReplaceAll(fileString[referenceStart:], "\n", "")) 80 | referenceCollection := strings.Split(referenceString, "|") 81 | for _, reference := range referenceCollection { 82 | if len(reference) != 0 { 83 | thisReference := SchemaReference{} 84 | err := json.Unmarshal([]byte(reference), &thisReference) 85 | check(err) 86 | referenceArray = append(referenceArray, thisReference) 87 | } 88 | } 89 | RegisterReferencesFromLocalFS(referenceArray, dstClient, path.Dir(filepath)) 90 | fileString = fileString[:strings.LastIndex(fileString, ReferenceSeparator)-1] 91 | } 92 | 93 | unescapedSubject, err := url.QueryUnescape(subject) 94 | checkDontFail(err) 95 | log.Printf("Registering Schema with Subject: %s. Version: %v, and ID: %v", unescapedSubject, version, id) 96 | dstClient.RegisterSchemaBySubjectAndIDAndVersion(fileString, unescapedSubject, id, version, stype, referenceArray) 97 | } 98 | } 99 | 100 | // Returns schema metadata for a file given its path 101 | // Returned metadata in order: SchemaID, SchemaVersion, SchemaSubject, SchemaType 102 | func parseFileName(filepath string) (int64, int64, string, string) { 103 | 104 | startFileName := 0 105 | if strings.LastIndex(filepath, "/") != -1 { 106 | startFileName = strings.LastIndex(filepath, "/") + 1 107 | } 108 | if strings.LastIndex(filepath, "\\") != -1 { 109 | startFileName = strings.LastIndex(filepath, "\\") + 1 110 | } 111 | schemaFileName := filepath[startFileName:] 112 | schemaType := schemaFileName[strings.LastIndex(schemaFileName, "-")+1:] 113 | schemaFileNameNoType := schemaFileName[:strings.LastIndex(schemaFileName, "-")] 114 | schemaId, err := strconv.ParseInt(schemaFileNameNoType[strings.LastIndex(schemaFileNameNoType, "-")+1:], 10, 64) 115 | check(err) 116 | schemaFileNameNoId := schemaFileNameNoType[:strings.LastIndex(schemaFileNameNoType, "-")] 117 | schemaVersion, err := strconv.ParseInt(schemaFileNameNoId[strings.LastIndex(schemaFileNameNoId, "-")+1:], 10, 64) 118 | check(err) 119 | schemaSubject := schemaFileNameNoId[:strings.LastIndex(schemaFileNameNoId, "-")] 120 | 121 | return schemaId, schemaVersion, schemaSubject, schemaType 122 | } 123 | 124 | // Writes the provided schema in the given path 125 | func writeSchemaLocally(srcClient *SchemaRegistryClient, pathToWrite string, subject string, version int64, wg *sync.WaitGroup) { 126 | rawSchema := srcClient.GetSchema(subject, version, false) 127 | defer wg.Done() 128 | if CancelRun == true { 129 | return 130 | } 131 | 132 | log.Printf("Writing schema: %s with version: %d and ID: %d", 133 | rawSchema.Subject, rawSchema.Version, rawSchema.Id) 134 | 135 | filename := fmt.Sprintf("%s-%d-%d-%s", url.QueryEscape(rawSchema.Subject), rawSchema.Version, rawSchema.Id, rawSchema.SType) 136 | f, err := os.Create(filepath.Join(pathToWrite, filename)) 137 | 138 | check(err) 139 | defer f.Close() 140 | 141 | _, err = f.WriteString(rawSchema.Schema) 142 | check(err) 143 | if len(rawSchema.References) != 0 { 144 | _, err = f.WriteString("\n") 145 | check(err) 146 | _, err = f.WriteString(ReferenceSeparator) 147 | check(err) 148 | _, err = f.WriteString("\n") 149 | check(err) 150 | for _, oneRef := range rawSchema.References { 151 | jsonRepresentation, err := json.Marshal(oneRef) 152 | check(err) 153 | _, err = f.Write(jsonRepresentation) 154 | check(err) 155 | _, err = f.WriteString("|\n") 156 | check(err) 157 | } 158 | } 159 | 160 | _ = f.Sync() 161 | } 162 | 163 | // Returns a valid local FS path to write the schemas to 164 | func CheckPath(definedPath string, workingDirectory string) string { 165 | 166 | currentPath := filepath.Clean(workingDirectory) 167 | 168 | if definedPath == "" { 169 | definedPath = filepath.Join(currentPath, "SchemaRegistryBackup") 170 | log.Println("Path not defined, using local folder SchemaRegistryBackup") 171 | _ = os.Mkdir(definedPath, 0755) 172 | return definedPath 173 | } else { 174 | if filepath.IsAbs(definedPath) { 175 | if _, err := os.Stat(definedPath); os.IsNotExist(err) { 176 | log.Println("Path: " + definedPath) 177 | log.Fatalln("The directory specified does not exist.") 178 | } 179 | } else { 180 | definedPath = filepath.Join(currentPath, definedPath) 181 | _, err := os.Stat(definedPath) 182 | if os.IsNotExist(err) { 183 | log.Println("Path: " + definedPath) 184 | log.Fatalln("The directory specified does not exist.") 185 | } 186 | } 187 | return definedPath 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /cmd/internals/localFSFunctions_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // localFSFunctions_test.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestParseFileName(t *testing.T) { 15 | regularFilename := "/tmp/xmlToAvro-key-1-10382-AVRO" 16 | trickyFileName := "/tmp/myNew-Topic-value-1-35421-JSON" 17 | currentDirFileName := "someTopic-value-1-65421-PROTO" 18 | windowsFileName := "D:\\Schematest\\SchemaRegistryBackup\\myNew-Topic-value-1-35421-JSON" 19 | 20 | regularID, regularVersion, regularSubject, regularSType := parseFileName(regularFilename) 21 | trickyID, trickyVersion, trickySubject, trickySType := parseFileName(trickyFileName) 22 | currentDirID, currentDirVersion, currentDirSubject, currentDirSType := parseFileName(currentDirFileName) 23 | windowsID, windowsVersion, windowsSubject, windowsSType := parseFileName(windowsFileName) 24 | 25 | assert.True(t, regularID == int64(10382)) 26 | assert.True(t, regularVersion == int64(1)) 27 | assert.True(t, regularSubject == "xmlToAvro-key") 28 | assert.True(t, regularSType == "AVRO") 29 | assert.True(t, trickyID == int64(35421)) 30 | assert.True(t, trickyVersion == int64(1)) 31 | assert.True(t, trickySubject == "myNew-Topic-value") 32 | assert.True(t, trickySType == "JSON") 33 | assert.True(t, currentDirID == int64(65421)) 34 | assert.True(t, currentDirVersion == int64(1)) 35 | assert.True(t, currentDirSubject == "someTopic-value") 36 | assert.True(t, currentDirSType == "PROTO") 37 | assert.True(t, windowsID == int64(35421)) 38 | assert.True(t, windowsVersion == int64(1)) 39 | assert.True(t, windowsSubject == "myNew-Topic-value") 40 | assert.True(t, windowsSType == "JSON") 41 | 42 | } 43 | -------------------------------------------------------------------------------- /cmd/internals/meta.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // meta.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "github.com/prometheus/client_golang/prometheus" 10 | "github.com/prometheus/client_golang/prometheus/promauto" 11 | "net/http" 12 | ) 13 | 14 | var HttpCallTimeout int 15 | var ScrapeInterval int 16 | var Version = "1.3-SNAPSHOT" 17 | var httpClient http.Client 18 | 19 | var SrcSRUrl string 20 | var SrcSRKey string 21 | var SrcSRSecret string 22 | var DestSRUrl string 23 | var DestSRKey string 24 | var DestSRSecret string 25 | var CustomDestinationName string 26 | var CustomSourceName string 27 | var NoPrompt bool 28 | var SyncDeletes bool 29 | var SyncHardDeletes bool 30 | var ThisRun RunMode 31 | var PathToWrite string 32 | var CancelRun bool 33 | var WithMetrics bool 34 | var AllowList StringArrayFlag 35 | var DisallowList StringArrayFlag 36 | var ReferenceSeparator = "=====References=====" 37 | var SchemaLoadType string 38 | 39 | // Define RunMode Enum 40 | type RunMode int 41 | 42 | const ( 43 | SYNC RunMode = iota 44 | BATCH 45 | TOLOCAL 46 | FROMLOCAL 47 | SCHEMALOAD 48 | ) 49 | 50 | func (r RunMode) String() string { 51 | return [...]string{"SYNC", "BATCH", "TOLOCAL", "FROMLOCAL"}[r] 52 | } 53 | 54 | // Define Mode Enum 55 | type Mode int 56 | 57 | const ( 58 | IMPORT Mode = iota 59 | READONLY 60 | READWRITE 61 | ) 62 | 63 | func (m Mode) String() string { 64 | return [...]string{"IMPORT", "READONLY", "READWRITE"}[m] 65 | } 66 | 67 | // Define Compatibility Enum 68 | type Compatibility int 69 | 70 | const ( 71 | BACKWARD Compatibility = iota 72 | BACKWARD_TRANSITIVE 73 | FORWARD 74 | FORWARD_TRANSITIVE 75 | FULL 76 | FULL_TRANSITIVE 77 | NONE 78 | ) 79 | 80 | func (c Compatibility) String() string { 81 | return [...]string{"BACKWARD", "BACKWARD_TRANSITIVE", "FORWARD", "FORWARD_TRANSITIVE", "FULL", "FULL_TRANSITIVE", "NONE"}[c] 82 | } 83 | 84 | // Custom Metrics 85 | var ( 86 | schemasRegistered = promauto.NewCounter(prometheus.CounterOpts{ 87 | Name: "schema_exporter_registered_schemas", 88 | Help: "The total number of registered schemas", 89 | }) 90 | ) 91 | 92 | var ( 93 | schemasSoftDeleted = promauto.NewCounter(prometheus.CounterOpts{ 94 | Name: "schema_exporter_softDeleted_schemas", 95 | Help: "The total number of soft deleted schemas", 96 | }) 97 | ) 98 | 99 | var ( 100 | schemasHardDeleted = promauto.NewCounter(prometheus.CounterOpts{ 101 | Name: "schema_exporter_hardDeleted_schemas", 102 | Help: "The total number of hard deleted schemas", 103 | }) 104 | ) 105 | -------------------------------------------------------------------------------- /cmd/internals/schema-registry-light-client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // schema-registry-light-client.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "bytes" 10 | "encoding/json" 11 | "fmt" 12 | "io/ioutil" 13 | "log" 14 | "net/http" 15 | "net/url" 16 | "sync" 17 | "time" 18 | ) 19 | 20 | /** 21 | Creates a lightweight client to schema registry in order to handle the necessary operations 22 | for syncing schema registries. Comes with helped methods for deletions that are exported. 23 | All functions are methods of SchemaRegistryClient 24 | */ 25 | 26 | var statusError = "Received status code %d instead of 200 for %s, on %s" 27 | 28 | // Base construct of a Schema Registry Client. 29 | // The target parameter will decide what checks will be performed in the environment variables 30 | func NewSchemaRegistryClient(SR string, apiKey string, apiSecret string, target string) *SchemaRegistryClient { 31 | client := SchemaRegistryClient{} 32 | 33 | // If the parameters are empty, go fetch from env 34 | if SR == "" || apiKey == "" || apiSecret == "" { 35 | if target == "dst" { 36 | client = SchemaRegistryClient{SRUrl: DestGetSRUrl(), SRApiKey: DestGetAPIKey(), SRApiSecret: DestGetAPISecret()} 37 | } 38 | if target == "src" { 39 | client = SchemaRegistryClient{SRUrl: SrcGetSRUrl(), SRApiKey: SrcGetAPIKey(), SRApiSecret: SrcGetAPISecret()} 40 | } 41 | } else { 42 | // Enables passing in the vars through flags 43 | client = SchemaRegistryClient{SRUrl: SR, SRApiKey: apiKey, SRApiSecret: apiSecret} 44 | } 45 | 46 | httpClient = http.Client{ 47 | Timeout: time.Second * time.Duration(HttpCallTimeout), 48 | } 49 | 50 | return &client 51 | } 52 | 53 | // Returns whether a proper connection could be made to the Schema Registry by the client 54 | func (src *SchemaRegistryClient) IsReachable() bool { 55 | endpoint := src.SRUrl 56 | req := GetNewRequest("GET", endpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 57 | 58 | res, err := httpClient.Do(req) 59 | if err != nil { 60 | return false 61 | } 62 | defer res.Body.Close() 63 | 64 | if res.StatusCode == 200 { 65 | return true 66 | } else { 67 | return false 68 | } 69 | } 70 | 71 | // Returns all non-deleted (soft or hard deletions) subjects with their versions in the form of a map. 72 | func (src *SchemaRegistryClient) GetSubjectsWithVersions(chanY chan<- map[string][]int64, deleted bool) { 73 | endpoint := "" 74 | if deleted { 75 | endpoint = fmt.Sprintf("%s/subjects?deleted=true", src.SRUrl) 76 | } else { 77 | endpoint = fmt.Sprintf("%s/subjects", src.SRUrl) 78 | } 79 | 80 | req := GetNewRequest("GET", endpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 81 | 82 | res, err := httpClient.Do(req) 83 | check(err) 84 | defer res.Body.Close() 85 | handleNotSuccess(res.Body, res.StatusCode, req.Method, endpoint) 86 | 87 | response := []string{} 88 | 89 | body, err := ioutil.ReadAll(res.Body) 90 | checkDontFail(err) 91 | 92 | err = json.Unmarshal(body, &response) 93 | checkDontFail(err) 94 | 95 | // Convert back to slice for speed of iteration 96 | filteredSubjects := []string{} 97 | for key, _ := range filterListedSubjects(response) { 98 | filteredSubjects = append(filteredSubjects, key) 99 | } 100 | 101 | response = filteredSubjects 102 | 103 | // Start async fetching 104 | var aGroup sync.WaitGroup 105 | aChan := make(chan SubjectWithVersions) 106 | 107 | for _, s := range response { 108 | aGroup.Add(1) 109 | /* 110 | Rate limit: In order not to saturate the SR instances, we limit the rate at which we send schema discovery requests. 111 | The idea being that if one SR instance can handle the load, a cluster should be able to handle it 112 | even more easily. During testing, it was found that 2ms is an ideal delay for best sync performance. 113 | */ 114 | time.Sleep(time.Duration(2) * time.Millisecond) 115 | go src.GetVersions(s, aChan, &aGroup, deleted) 116 | } 117 | go func() { 118 | aGroup.Wait() 119 | close(aChan) 120 | }() 121 | 122 | //Collect SubjectWithVersions 123 | tmpSchemaMap := make(map[string][]int64) 124 | for item := range aChan { 125 | tmpSchemaMap[item.Subject] = item.Versions 126 | } 127 | 128 | // Send back to main thread 129 | chanY <- tmpSchemaMap 130 | } 131 | 132 | // Returns all non-deleted versions (soft or hard deleted) that exist for a given subject. 133 | func (src *SchemaRegistryClient) GetVersions(subject string, chanX chan<- SubjectWithVersions, wg *sync.WaitGroup, deleted bool) { 134 | endpoint := "" 135 | if deleted { 136 | endpoint = fmt.Sprintf("%s/subjects/%s/versions?deleted=true", src.SRUrl, url.QueryEscape(subject)) 137 | } else { 138 | endpoint = fmt.Sprintf("%s/subjects/%s/versions", src.SRUrl, url.QueryEscape(subject)) 139 | } 140 | 141 | defer wg.Done() 142 | req := GetNewRequest("GET", endpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 143 | 144 | res, err := httpClient.Do(req) 145 | if err != nil { 146 | log.Printf(err.Error()) 147 | return 148 | } 149 | defer res.Body.Close() 150 | handleNotSuccess(res.Body, res.StatusCode, req.Method, endpoint) 151 | 152 | response := []int64{} 153 | 154 | body, err := ioutil.ReadAll(res.Body) 155 | checkDontFail(err) 156 | 157 | err = json.Unmarshal(body, &response) 158 | checkDontFail(err) 159 | 160 | pkgSbj := SubjectWithVersions{ 161 | Subject: subject, 162 | Versions: response, 163 | } 164 | 165 | // Send back to retrieving thread 166 | chanX <- pkgSbj 167 | } 168 | 169 | // Performs a check to see if the compatibility level for the backing Schema Registry is set to NONE 170 | func (src *SchemaRegistryClient) IsCompatReady() bool { 171 | response, ok := handleEndpointQuery("config", src) 172 | if !ok { 173 | return false 174 | } 175 | 176 | if response["compatibilityLevel"] == NONE.String() { 177 | return true 178 | } else { 179 | return false 180 | } 181 | } 182 | 183 | // Allows to set a compatibility level for the backing Schema Registry, returns true if successful 184 | func (src *SchemaRegistryClient) SetGlobalCompatibility(comptToSet Compatibility) bool { 185 | endpoint := fmt.Sprintf("%s/config", src.SRUrl) 186 | 187 | compat := CompatRecord{Compatibility: comptToSet.String()} 188 | toSend, err := json.Marshal(compat) 189 | if err != nil { 190 | log.Printf(err.Error()) 191 | return false 192 | } 193 | 194 | req := GetNewRequest("PUT", endpoint, src.SRApiKey, src.SRApiSecret, nil, bytes.NewReader(toSend)) 195 | 196 | res, err := httpClient.Do(req) 197 | if err != nil { 198 | log.Printf(err.Error()) 199 | return false 200 | } 201 | defer res.Body.Close() 202 | 203 | handleNotSuccess(res.Body, res.StatusCode, req.Method, endpoint) 204 | 205 | if res.StatusCode == 200 { 206 | return true 207 | } else { 208 | return false 209 | } 210 | 211 | } 212 | 213 | // Performs a check on the backing Schema Registry to see if it is in global IMPORT mode. 214 | func (src *SchemaRegistryClient) IsImportModeReady() bool { 215 | response, ok := handleEndpointQuery("mode", src) 216 | if !ok { 217 | return false 218 | } 219 | 220 | if response["mode"] == IMPORT.String() { 221 | return true 222 | } else { 223 | return false 224 | } 225 | } 226 | 227 | // Allows to set a global mode for the backing Schema Registry 228 | func (src *SchemaRegistryClient) SetMode(modeToSet Mode) bool { 229 | endpoint := fmt.Sprintf("%s/mode", src.SRUrl) 230 | 231 | mode := ModeRecord{Mode: modeToSet.String()} 232 | modeToSend, err := json.Marshal(mode) 233 | if err != nil { 234 | log.Printf(err.Error()) 235 | return false 236 | } 237 | 238 | req := GetNewRequest("PUT", endpoint, src.SRApiKey, src.SRApiSecret, nil, bytes.NewReader(modeToSend)) 239 | 240 | res, err := httpClient.Do(req) 241 | if err != nil { 242 | log.Printf(err.Error()) 243 | return false 244 | } 245 | defer res.Body.Close() 246 | 247 | handleNotSuccess(res.Body, res.StatusCode, req.Method, endpoint) 248 | 249 | if res.StatusCode == 200 { 250 | return true 251 | } else { 252 | return false 253 | } 254 | 255 | } 256 | 257 | // Returns a SchemaRecord for the given subject and version by querying the backing Schema Registry 258 | func (src *SchemaRegistryClient) GetSchema(subject string, version int64, deleted bool) SchemaRecord { 259 | endpoint := fmt.Sprintf("%s/subjects/%s/versions/%d", src.SRUrl, url.QueryEscape(subject), version) 260 | if deleted { 261 | endpoint = fmt.Sprintf("%s/subjects/%s/versions/%d?deleted=true", src.SRUrl, url.QueryEscape(subject), version) 262 | } 263 | req := GetNewRequest("GET", endpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 264 | 265 | res, err := httpClient.Do(req) 266 | if err != nil { 267 | log.Printf(err.Error()) 268 | log.Println("Could not retrieve schema!") 269 | return SchemaRecord{} 270 | } 271 | defer res.Body.Close() 272 | 273 | handleNotSuccess(res.Body, res.StatusCode, req.Method, endpoint) 274 | 275 | schemaResponse := new(SchemaRecord) 276 | 277 | body, err := ioutil.ReadAll(res.Body) 278 | if err != nil { 279 | log.Printf(err.Error()) 280 | return SchemaRecord{} 281 | } 282 | 283 | err = json.Unmarshal(body, &schemaResponse) 284 | checkDontFail(err) 285 | 286 | return SchemaRecord{Subject: schemaResponse.Subject, Schema: schemaResponse.Schema, Version: schemaResponse.Version, Id: schemaResponse.Id, SType: schemaResponse.SType, References: schemaResponse.References}.setTypeIfEmpty().setReferenceIfEmpty() 287 | } 288 | 289 | // Registers a schema 290 | func (src *SchemaRegistryClient) RegisterSchema(schema string, subject string, SType string, references []SchemaReference) []byte { 291 | return src.RegisterSchemaBySubjectAndIDAndVersion(schema, subject, 0, 0, SType, references) 292 | } 293 | 294 | // Registers a schema with the given SchemaID and SchemaVersion 295 | func (src *SchemaRegistryClient) RegisterSchemaBySubjectAndIDAndVersion(schema string, subject string, id int64, version int64, SType string, references []SchemaReference) []byte { 296 | endpoint := fmt.Sprintf("%s/subjects/%s/versions", src.SRUrl, url.QueryEscape(subject)) 297 | 298 | schemaRequest := SchemaToRegister{} 299 | if id == 0 && version == 0 { 300 | schemaRequest = SchemaToRegister{Schema: schema, SType: SType, References: references} 301 | } else { 302 | schemaRequest = SchemaToRegister{Schema: schema, Id: id, Version: version, SType: SType, References: references} 303 | } 304 | schemaJSON, err := json.Marshal(schemaRequest) 305 | if err != nil { 306 | log.Printf(err.Error()) 307 | } 308 | 309 | req := GetNewRequest("POST", endpoint, src.SRApiKey, src.SRApiSecret, nil, bytes.NewReader(schemaJSON)) 310 | 311 | res, err := httpClient.Do(req) 312 | if err != nil { 313 | log.Printf(err.Error()) 314 | log.Println("Could not register schema!") 315 | return nil 316 | } 317 | defer res.Body.Close() 318 | 319 | body, err := ioutil.ReadAll(res.Body) 320 | checkDontFail(err) 321 | 322 | handleNotSuccess(res.Body, res.StatusCode, req.Method, endpoint) 323 | 324 | if WithMetrics && res.StatusCode == 200 { 325 | schemasRegistered.Inc() 326 | } 327 | 328 | return body 329 | } 330 | 331 | // Deletes all schemas in the backing Schema Registry, including previously soft deleted subjects 332 | // This method does not respect AllowList or DisallowList 333 | func (src *SchemaRegistryClient) DeleteAllSubjectsPermanently() { 334 | destSubjects := make(map[string][]int64) 335 | destChan := make(chan map[string][]int64) 336 | 337 | // Account for allow/disallow lists, since this 338 | // method is expected to delete ALL Schemas, 339 | // The lists are not respected 340 | holderAllow := AllowList 341 | holderDisallow := DisallowList 342 | 343 | AllowList = nil 344 | DisallowList = nil 345 | go src.GetSubjectsWithVersions(destChan, true) 346 | destSubjects = <-destChan 347 | 348 | AllowList = holderAllow 349 | DisallowList = holderDisallow 350 | 351 | //Must perform soft delete before hard delete 352 | for subject, versions := range destSubjects { 353 | for _, version := range versions { 354 | if src.subjectExists(subject) { 355 | if src.PerformSoftDelete(subject, version) { 356 | src.PerformHardDelete(subject, version) 357 | } 358 | } 359 | } 360 | } 361 | } 362 | 363 | // Performs a Soft Delete on the given subject and version on the backing Schema Registry 364 | func (src *SchemaRegistryClient) PerformSoftDelete(subject string, version int64) bool { 365 | endpoint := fmt.Sprintf("%s/subjects/%s/versions/%d", src.SRUrl, url.QueryEscape(subject), version) 366 | req := GetNewRequest("DELETE", endpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 367 | res, err := httpClient.Do(req) 368 | if err != nil { 369 | log.Printf(err.Error()) 370 | return false 371 | } 372 | 373 | // We've confirmed this subject does not exist 374 | if res.StatusCode == 404 { 375 | return true 376 | } 377 | 378 | // Handle referenced subjects 379 | if res.StatusCode == 422 { 380 | 381 | regBody, err := ioutil.ReadAll(res.Body) 382 | check(err) 383 | 384 | errMsg := ErrorMessage{} 385 | 386 | err = json.Unmarshal(regBody, &errMsg) 387 | check(err) 388 | 389 | if errMsg.ErrorCode == 42206 { 390 | referencesEndpoint := fmt.Sprintf("%s/subjects/%s/versions/%d/referencedby", src.SRUrl, url.QueryEscape(subject), version) 391 | findReferencingSchemas := GetNewRequest("GET", referencesEndpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 392 | 393 | refRes, err := httpClient.Do(findReferencingSchemas) 394 | checkDontFail(err) 395 | defer refRes.Body.Close() 396 | refBody, err := ioutil.ReadAll(refRes.Body) 397 | checkDontFail(err) 398 | 399 | idsReferencing := []int64{} 400 | 401 | err = json.Unmarshal(refBody, &idsReferencing) 402 | checkDontFail(err) 403 | 404 | for _, thisId := range idsReferencing { 405 | correlatedSubjectVersionsEndpoint := fmt.Sprintf("%s/schemas/ids/%d/versions", src.SRUrl, thisId) 406 | 407 | findReferencingSubjectVersions := GetNewRequest("GET", correlatedSubjectVersionsEndpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 408 | 409 | refSVRes, err := httpClient.Do(findReferencingSubjectVersions) 410 | checkDontFail(err) 411 | defer refSVRes.Body.Close() 412 | refSVBody, err := ioutil.ReadAll(refSVRes.Body) 413 | checkDontFail(err) 414 | 415 | schemaVersionsReferencing := []SubjectVersion{} 416 | 417 | err = json.Unmarshal(refSVBody, &schemaVersionsReferencing) 418 | checkDontFail(err) 419 | 420 | for _, subjectVersion := range schemaVersionsReferencing { 421 | if src.PerformSoftDelete(subjectVersion.Subject, subjectVersion.Version) { 422 | src.PerformHardDelete(subjectVersion.Subject, subjectVersion.Version) 423 | } 424 | } 425 | } 426 | 427 | // Attempt to delete original schema now that it isn't being referenced anymore. 428 | return src.PerformSoftDelete(subject, version) 429 | 430 | } else { 431 | return false 432 | } 433 | } 434 | 435 | return handleDeletesHTTPResponse(res.Body, res.StatusCode, req.Method, endpoint, "Soft", subject, version) 436 | } 437 | 438 | // Performs a Hard Delete on the given subject and version on the backing Schema Registry 439 | // NOTE: A Hard Delete should only be performed after a soft delete 440 | func (src *SchemaRegistryClient) PerformHardDelete(subject string, version int64) bool { 441 | endpoint := fmt.Sprintf("%s/subjects/%s/versions/%d?permanent=true", src.SRUrl, url.QueryEscape(subject), version) 442 | req := GetNewRequest("DELETE", endpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 443 | res, err := httpClient.Do(req) 444 | if err != nil { 445 | log.Printf(err.Error()) 446 | return false 447 | } 448 | 449 | return handleDeletesHTTPResponse(res.Body, res.StatusCode, req.Method, endpoint, "Hard", subject, version) 450 | } 451 | 452 | // Returns a map with the [ID][Subject:Versions] state of the backing Schema Registry for only the soft deleted SubjectVersions 453 | func (src *SchemaRegistryClient) GetSoftDeletedIDs() map[int64]map[string][]int64 { 454 | 455 | responseWithDeletes := src.getSchemaList(true) 456 | responseWithOutDeletes := src.getSchemaList(false) 457 | 458 | diff := GetIDDiff(responseWithDeletes, responseWithOutDeletes) 459 | return filterIDs(diff) 460 | } 461 | 462 | // Returns a dump of all Schemas mapped to their IDs from the backing Schema Registry 463 | // The parameter specifies whether to show soft deleted schemas as well 464 | func (src *SchemaRegistryClient) getSchemaList(deleted bool) map[int64]map[string][]int64 { 465 | endpoint := fmt.Sprintf("%s/schemas?deleted=%v", src.SRUrl, deleted) 466 | if !deleted { 467 | endpoint = fmt.Sprintf("%s/schemas", src.SRUrl) 468 | } 469 | 470 | req := GetNewRequest("GET", endpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 471 | res, err := httpClient.Do(req) 472 | check(err) 473 | defer res.Body.Close() 474 | 475 | response := []SchemaExtraction{} 476 | 477 | body, err := ioutil.ReadAll(res.Body) 478 | checkDontFail(err) 479 | 480 | err = json.Unmarshal(body, &response) 481 | checkDontFail(err) 482 | 483 | responseMap := make(map[int64]map[string][]int64) 484 | 485 | for _, schema := range response { 486 | currentStateOfID, haveSeenIDBefore := responseMap[schema.Id] 487 | if haveSeenIDBefore { 488 | _, haveSeenSubject := currentStateOfID[schema.Subject] 489 | if haveSeenSubject { 490 | responseMap[schema.Id][schema.Subject] = append(responseMap[schema.Id][schema.Subject], schema.Version) 491 | } else { 492 | tempMap := responseMap[schema.Id] 493 | tempMap[schema.Subject] = []int64{schema.Version} 494 | responseMap[schema.Id] = tempMap 495 | } 496 | } else { 497 | responseMap[schema.Id] = map[string][]int64{schema.Subject: {schema.Version}} 498 | } 499 | 500 | } 501 | 502 | return responseMap 503 | } 504 | 505 | // Checks if the subject is in the backing schema registry, regardless of it is soft deleted or not. 506 | func (src *SchemaRegistryClient) subjectExists(subject string) bool { 507 | endpoint := fmt.Sprintf("%s/subjects/%s/versions?deleted=true", src.SRUrl, url.QueryEscape(subject)) 508 | sbjReq := GetNewRequest("GET", endpoint, src.SRApiKey, src.SRApiSecret, nil, nil) 509 | response, err := httpClient.Do(sbjReq) 510 | check(err) 511 | defer response.Body.Close() 512 | 513 | if response.StatusCode == 200 { 514 | return true 515 | } else { 516 | return false 517 | } 518 | } 519 | 520 | func (src *SchemaRegistryClient) schemaIsRegisteredUnderSubject(subject string, schemaType string, schema string, references []SchemaReference) bool { 521 | endpoint := fmt.Sprintf("%s/subjects/%s", src.SRUrl, url.QueryEscape(subject)) 522 | 523 | schemaRequest := SchemaToRegister{Schema: schema, SType: schemaType, References: references} 524 | 525 | schemaJSON, err := json.Marshal(schemaRequest) 526 | if err != nil { 527 | log.Printf(err.Error()) 528 | } 529 | 530 | sbjReq := GetNewRequest("POST", endpoint, src.SRApiKey, src.SRApiSecret, nil, bytes.NewReader(schemaJSON)) 531 | response, err := httpClient.Do(sbjReq) 532 | defer response.Body.Close() 533 | 534 | if response.StatusCode > 400 { 535 | return false 536 | } 537 | 538 | return true 539 | } 540 | -------------------------------------------------------------------------------- /cmd/internals/schema-registry-light-client_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // schema-registry-light-client_test.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "encoding/json" 10 | "fmt" 11 | testingUtils "github.com/abraham-leal/ccloud-schema-exporter/cmd/testingUtils" 12 | "github.com/stretchr/testify/assert" 13 | "github.com/testcontainers/testcontainers-go" 14 | "io/ioutil" 15 | "log" 16 | "net/http" 17 | "sync" 18 | "testing" 19 | ) 20 | 21 | var testClient *SchemaRegistryClient 22 | var mockSchema = "{\"type\":\"record\",\"name\":\"value_newnew\",\"namespace\":\"com.mycorp.mynamespace\",\"doc\":\"Sample schema to help you get started.\",\"fields\":[{\"name\":\"this\",\"type\":\"int\",\"doc\":\"The int type is a 32-bit signed integer.\"},{\"name\":\"onefield\",\"type\":[\"null\",\"string\"],\"default\":null}]}" 23 | var testingSubject = "test-key" 24 | var newSubject = "newSubject-key" 25 | var SRUrl = "http://localhost:8081" 26 | var localKafkaContainer testcontainers.Container 27 | var localSchemaRegistrySrcContainer testcontainers.Container 28 | 29 | func TestMainStack(t *testing.T) { 30 | setup() 31 | t.Run("TIsCompatReady", func(t *testing.T) { TIsCompatReady(t) }) 32 | t.Run("TSetCompat", func(t *testing.T) { TSetCompat(t) }) 33 | t.Run("TIsReachable", func(t *testing.T) { TIsReachable(t) }) 34 | t.Run("TSetMode", func(t *testing.T) { TSetMode(t) }) 35 | t.Run("TIsImportModeReady", func(t *testing.T) { TIsImportModeReady(t) }) 36 | t.Run("TGetSubjectWithVersions", func(t *testing.T) { TGetSubjectWithVersions(t) }) 37 | t.Run("TGetVersions", func(t *testing.T) { TGetVersions(t) }) 38 | t.Run("TGetSchema", func(t *testing.T) { TGetSchema(t) }) 39 | t.Run("TRegisterSchemaBySubjectAndIDAndVersion", func(t *testing.T) { TRegisterSchemaBySubjectAndIDAndVersion(t) }) 40 | t.Run("TFilterIDs", func(t *testing.T) { TFilterIDs(t) }) 41 | t.Run("TFilterListedSubjects", func(t *testing.T) { TFilterListedSubjects(t) }) 42 | t.Run("TFilterListedSubjectsVersions", func(t *testing.T) { TFilterListedSubjectsVersions(t) }) 43 | t.Run("TPerformSoftDelete", func(t *testing.T) { TPerformSoftDelete(t) }) 44 | t.Run("TPerformHardDelete", func(t *testing.T) { TPerformHardDelete(t) }) 45 | t.Run("TGetSoftDeletedIds", func(t *testing.T) { TGetSoftDeletedIDs(t) }) 46 | t.Run("TDeleteAllSubjectsPermanently", func(t *testing.T) { TDeleteAllSubjectsPermanently(t) }) 47 | tearDown() 48 | } 49 | 50 | func setup() { 51 | 52 | localKafkaContainer, localSchemaRegistrySrcContainer = testingUtils.GetBaseInfra("clients") 53 | 54 | srcSRPort, err := localSchemaRegistrySrcContainer.MappedPort(testingUtils.Ctx, "8081") 55 | checkFail(err, "Not Able to get SRC SR Port") 56 | 57 | SRUrl = "http://localhost:" + srcSRPort.Port() 58 | 59 | testClient = NewSchemaRegistryClient(SRUrl, "testUser", "testPass", "src") 60 | 61 | //Initial schema 62 | setImportMode() 63 | } 64 | 65 | func tearDown() { 66 | err := localSchemaRegistrySrcContainer.Terminate(testingUtils.Ctx) 67 | checkFail(err, "Could not terminate source sr") 68 | err = localKafkaContainer.Terminate(testingUtils.Ctx) 69 | checkFail(err, "Could not terminate kafka") 70 | } 71 | 72 | func TIsReachable(t *testing.T) { 73 | falseTestClient := NewSchemaRegistryClient("http://localhost:8083", "testUser", "testPass", "src") 74 | 75 | assert.True(t, testClient.IsReachable()) 76 | assert.False(t, falseTestClient.IsReachable()) 77 | } 78 | 79 | func TSetCompat(t *testing.T) { 80 | endpoint := fmt.Sprintf("%s/config", SRUrl) 81 | req := GetNewRequest("GET", endpoint, "testUser", "testPass", nil, nil) 82 | // Test Set READWRITE 83 | testClient.SetGlobalCompatibility(FULL) 84 | assert.True(t, performQuery(req)["compatibilityLevel"] == FULL.String()) 85 | // Test Set IMPORT 86 | testClient.SetGlobalCompatibility(BACKWARD) 87 | assert.True(t, performQuery(req)["compatibilityLevel"] == BACKWARD.String()) 88 | // Test Set READWRITE 89 | testClient.SetGlobalCompatibility(FORWARD) 90 | assert.True(t, performQuery(req)["compatibilityLevel"] == FORWARD.String()) 91 | // Test Set NONE 92 | testClient.SetGlobalCompatibility(NONE) 93 | assert.True(t, performQuery(req)["compatibilityLevel"] == NONE.String()) 94 | } 95 | 96 | func TIsCompatReady(t *testing.T) { 97 | testClient.SetGlobalCompatibility(FULL) 98 | assert.False(t, testClient.IsCompatReady()) 99 | testClient.SetGlobalCompatibility(NONE) 100 | assert.True(t, testClient.IsCompatReady()) 101 | } 102 | 103 | func TSetMode(t *testing.T) { 104 | endpoint := fmt.Sprintf("%s/mode", SRUrl) 105 | req := GetNewRequest("GET", endpoint, "testUser", "testPass", nil, nil) 106 | // Test Set READWRITE 107 | testClient.SetMode(READWRITE) 108 | assert.True(t, performQuery(req)["mode"] == READWRITE.String()) 109 | // Test Set IMPORT 110 | testClient.SetMode(IMPORT) 111 | assert.True(t, performQuery(req)["mode"] == IMPORT.String()) 112 | // Test Set READWRITE 113 | testClient.SetMode(READONLY) 114 | assert.True(t, performQuery(req)["mode"] == READONLY.String()) 115 | 116 | testClient.SetMode(IMPORT) 117 | } 118 | 119 | func TIsImportModeReady(t *testing.T) { 120 | testClient.SetMode(IMPORT) 121 | assert.True(t, testClient.IsImportModeReady()) 122 | testClient.SetMode(READWRITE) 123 | assert.False(t, testClient.IsImportModeReady()) 124 | testClient.SetMode(IMPORT) 125 | } 126 | 127 | func TGetSubjectWithVersions(t *testing.T) { 128 | testClient.RegisterSchemaBySubjectAndIDAndVersion(mockSchema, testingSubject, 10001, 1, "AVRO", []SchemaReference{}) 129 | 130 | aChan := make(chan map[string][]int64) 131 | go testClient.GetSubjectsWithVersions(aChan, false) 132 | result := <-aChan 133 | 134 | assert.NotNil(t, result[testingSubject]) 135 | assert.Equal(t, []int64{1}, result[testingSubject]) 136 | } 137 | 138 | func TGetVersions(t *testing.T) { 139 | var aGroup sync.WaitGroup 140 | 141 | aChan := make(chan SubjectWithVersions) 142 | go testClient.GetVersions(testingSubject, aChan, &aGroup, false) 143 | aGroup.Add(1) 144 | result := <-aChan 145 | aGroup.Wait() 146 | 147 | correctResult := SubjectWithVersions{ 148 | Subject: testingSubject, 149 | Versions: []int64{1}, 150 | } 151 | 152 | assert.Equal(t, correctResult, result) 153 | } 154 | 155 | func TGetSchema(t *testing.T) { 156 | record := testClient.GetSchema(testingSubject, 1, false) 157 | assert.Equal(t, mockSchema, record.Schema) 158 | } 159 | 160 | func TRegisterSchemaBySubjectAndIDAndVersion(t *testing.T) { 161 | testClient.RegisterSchemaBySubjectAndIDAndVersion(mockSchema, newSubject, 10001, 1, "AVRO", []SchemaReference{}) 162 | record := testClient.GetSchema(newSubject, 1, false) 163 | assert.Equal(t, mockSchema, record.Schema) 164 | 165 | testClient.PerformSoftDelete(newSubject, 1) 166 | testClient.PerformHardDelete(newSubject, 1) 167 | } 168 | 169 | func TGetSoftDeletedIDs(t *testing.T) { 170 | testClient.RegisterSchemaBySubjectAndIDAndVersion(mockSchema, newSubject, 10001, 1, "AVRO", []SchemaReference{}) 171 | testClient.PerformSoftDelete(newSubject, 1) 172 | result := testClient.GetSoftDeletedIDs() 173 | expected := map[int64]map[string][]int64{ 174 | 10001: {newSubject: {1}}} 175 | 176 | testClient.PerformHardDelete(newSubject, 1) 177 | assert.Equal(t, expected, result) 178 | } 179 | 180 | func TFilterIDs(t *testing.T) { 181 | 182 | myIDs := map[int64]map[string][]int64{ 183 | 10001: {testingSubject: []int64{1}}, 184 | 10002: {newSubject: []int64{1}}, 185 | } 186 | 187 | // Test Allow lists 188 | AllowList = StringArrayFlag{ 189 | newSubject: true, 190 | } 191 | DisallowList = nil 192 | 193 | expected := map[int64]map[string][]int64{ 194 | 10002: {newSubject: []int64{1}}, 195 | } 196 | 197 | filtered := filterIDs(myIDs) 198 | 199 | assert.Equal(t, expected, filtered) 200 | 201 | // Test DisAllow lists 202 | AllowList = nil 203 | DisallowList = StringArrayFlag{ 204 | newSubject: true, 205 | } 206 | 207 | myIDs = map[int64]map[string][]int64{ 208 | 10001: {testingSubject: []int64{1}}, 209 | 10002: {newSubject: []int64{1}}, 210 | } 211 | 212 | expected = map[int64]map[string][]int64{ 213 | 10001: {testingSubject: []int64{1}}, 214 | } 215 | 216 | filtered = filterIDs(myIDs) 217 | 218 | assert.Equal(t, expected, filtered) 219 | 220 | // Test Both 221 | myIDs = map[int64]map[string][]int64{ 222 | 10001: {testingSubject: []int64{1}}, 223 | 10002: {newSubject: []int64{1}}, 224 | 10003: {"hello": []int64{1}}, 225 | 10004: {"IAmSubject": []int64{1}}, 226 | } 227 | AllowList = StringArrayFlag{ 228 | newSubject: true, 229 | testingSubject: true, 230 | "hello": true, 231 | } 232 | DisallowList = StringArrayFlag{ 233 | "hello": true, 234 | } 235 | 236 | // Expect hello to be disallowed 237 | expected = map[int64]map[string][]int64{ 238 | 10001: {testingSubject: []int64{1}}, 239 | 10002: {newSubject: []int64{1}}, 240 | } 241 | 242 | filtered = filterIDs(myIDs) 243 | 244 | assert.Equal(t, expected, filtered) 245 | 246 | AllowList = nil 247 | DisallowList = nil 248 | } 249 | 250 | func TFilterListedSubjects(t *testing.T) { 251 | mySubjects := []string{testingSubject, newSubject} 252 | 253 | // Test Allow lists 254 | AllowList = StringArrayFlag{ 255 | newSubject: true, 256 | } 257 | DisallowList = nil 258 | 259 | expected := map[string]bool{ 260 | newSubject: true, 261 | } 262 | assert.Equal(t, expected, filterListedSubjects(mySubjects)) 263 | 264 | // Test DisAllow lists 265 | AllowList = nil 266 | DisallowList = StringArrayFlag{ 267 | newSubject: true, 268 | } 269 | 270 | expected = map[string]bool{ 271 | testingSubject: true, 272 | } 273 | assert.Equal(t, expected, filterListedSubjects(mySubjects)) 274 | 275 | // Test Both 276 | mySubjects = []string{testingSubject, newSubject, "hello", "ImASubject"} 277 | AllowList = StringArrayFlag{ 278 | newSubject: true, 279 | testingSubject: true, 280 | "hello": true, 281 | } 282 | DisallowList = StringArrayFlag{ 283 | "hello": true, 284 | } 285 | 286 | // Expect hello to be disallowed 287 | expected = map[string]bool{ 288 | newSubject: true, 289 | testingSubject: true, 290 | } 291 | assert.Equal(t, expected, filterListedSubjects(mySubjects)) 292 | 293 | AllowList = nil 294 | DisallowList = nil 295 | 296 | } 297 | 298 | func TFilterListedSubjectsVersions(t *testing.T) { 299 | mySubjects := []SubjectVersion{ 300 | {newSubject, 1}, 301 | {testingSubject, 1}, 302 | } 303 | 304 | // Test Allow lists 305 | AllowList = StringArrayFlag{ 306 | newSubject: true, 307 | } 308 | DisallowList = nil 309 | 310 | expected := []SubjectVersion{{Subject: newSubject, Version: 1}} 311 | assert.Equal(t, expected, filterListedSubjectsVersions(mySubjects)) 312 | 313 | // Test DisAllow lists 314 | AllowList = nil 315 | DisallowList = StringArrayFlag{ 316 | newSubject: true, 317 | } 318 | 319 | expected = []SubjectVersion{{Subject: testingSubject, Version: 1}} 320 | assert.Equal(t, expected, filterListedSubjectsVersions(mySubjects)) 321 | 322 | // Test Both 323 | mySubjects = []SubjectVersion{ 324 | {newSubject, 1}, 325 | {testingSubject, 1}, 326 | {"hello", 1}, 327 | {"ImASubject", 1}, 328 | } 329 | 330 | AllowList = StringArrayFlag{ 331 | newSubject: true, 332 | testingSubject: true, 333 | "hello": true, 334 | } 335 | DisallowList = StringArrayFlag{ 336 | "hello": true, 337 | } 338 | 339 | // Expect hello to be disallowed, expect ImASubject to not be included regardless 340 | expected = []SubjectVersion{{Subject: testingSubject, Version: 1}, {Subject: newSubject, Version: 1}} 341 | areEqual := compareSlices(expected, filterListedSubjectsVersions(mySubjects)) 342 | assert.True(t, areEqual) 343 | 344 | AllowList = nil 345 | DisallowList = nil 346 | 347 | } 348 | 349 | func TPerformSoftDelete(t *testing.T) { 350 | //Soft delete it 351 | testClient.PerformSoftDelete(testingSubject, 1) 352 | //Check for it 353 | checkIfSchemaRegistered := testClient.GetSchema(testingSubject, 1, false) 354 | assert.Equal(t, "", checkIfSchemaRegistered.Schema) 355 | } 356 | 357 | func TPerformHardDelete(t *testing.T) { 358 | //Hard delete it 359 | testClient.PerformHardDelete(testingSubject, 1) 360 | 361 | //Check if it is still an ID 362 | checkIfIDRegistered := testClient.GetSoftDeletedIDs() 363 | assert.Nil(t, checkIfIDRegistered[10001]) 364 | } 365 | 366 | func TDeleteAllSubjectsPermanently(t *testing.T) { 367 | /* 368 | By testing: 369 | GetSubjectsWithVersions 370 | PerformSoftDelete 371 | and PerformHardDelete 372 | We inherently test this method. 373 | */ 374 | assert.True(t, true) 375 | } 376 | 377 | func setImportMode() { 378 | if !testClient.IsImportModeReady() { 379 | err := testClient.SetMode(IMPORT) 380 | if err == false { 381 | log.Fatalln("Could not set registry to IMPORT ModeRecord.") 382 | } 383 | } 384 | } 385 | 386 | func performQuery(req *http.Request) map[string]string { 387 | response := map[string]string{} 388 | 389 | res, err := httpClient.Do(req) 390 | if err != nil { 391 | log.Println(err.Error()) 392 | return nil 393 | } 394 | body, err := ioutil.ReadAll(res.Body) 395 | if err != nil { 396 | log.Printf(err.Error()) 397 | } 398 | err = json.Unmarshal(body, &response) 399 | if err != nil { 400 | log.Printf(err.Error()) 401 | } 402 | 403 | return response 404 | 405 | } 406 | 407 | func compareSlices(a, b []SubjectVersion) bool { 408 | if len(a) != len(b) { 409 | return false 410 | } 411 | 412 | sliceAMap := map[SubjectVersion]bool{} 413 | sliceBMap := map[SubjectVersion]bool{} 414 | 415 | for _, val := range a { 416 | sliceAMap[val] = false 417 | } 418 | for _, val := range b { 419 | sliceBMap[val] = false 420 | } 421 | 422 | for val, _ := range sliceAMap { 423 | _, exists := sliceBMap[val] 424 | if !exists { 425 | return false 426 | } 427 | } 428 | return true 429 | } 430 | -------------------------------------------------------------------------------- /cmd/internals/schemaLoads.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "log" 8 | "os" 9 | "path/filepath" 10 | "sort" 11 | "strings" 12 | ) 13 | 14 | type SchemaLoader struct { 15 | dstClient *SchemaRegistryClient 16 | schemasType SchemaType // Define the Loader Type 17 | schemaRecords map[SchemaDescriptor]map[int64]map[string]interface{} // Internal map of SchemaDescriptor -> version -> unstructured schema 18 | path string 19 | } 20 | 21 | type SchemaDescriptor struct { 22 | namespace string 23 | name string 24 | } 25 | 26 | // Define SchemaType enum 27 | type SchemaType int 28 | 29 | const ( 30 | AVRO SchemaType = iota 31 | PROTOBUF 32 | JSON 33 | ) 34 | 35 | func (st SchemaType) String() string { 36 | return [...]string{"AVRO", "PROTOBUF", "JSON"}[st] 37 | } 38 | 39 | // Types that do not need further resolution 40 | var nativeTypes = map[string]struct{}{ 41 | "null": {}, "boolean": {}, "int": {}, "long": {}, "float": {}, "double": {}, "bytes": {}, 42 | "string": {}, "record": {}, "enum": {}, "array": {}, "map": {}, "decimal": {}, "fixed": {}, "uuid": {}, 43 | "date": {}, "time-millis": {}, "time-micros": {}, "timestamp-millis": {}, "timestamp-micros": {}, 44 | "local-timestamp-millis": {}, "local-timestamp-micros": {}, "duration": {}, 45 | } 46 | 47 | /* 48 | A SchemaLoader takes in a SchemaType and a path and will load the schemas in the path onto Schema Registry. 49 | Schema Loaders take in the schemas in their natural form, and registers them, this means that if the underlying 50 | schema 51 | */ 52 | func NewSchemaLoader(schemaType string, dstClient *SchemaRegistryClient, givenPath string, workingDirectory string) *SchemaLoader { 53 | if strings.EqualFold(schemaType, AVRO.String()) { 54 | return &SchemaLoader{ 55 | dstClient: dstClient, 56 | schemasType: AVRO, 57 | schemaRecords: map[SchemaDescriptor]map[int64]map[string]interface{}{}, 58 | path: CheckPath(givenPath, workingDirectory), 59 | } 60 | } else if strings.EqualFold(schemaType, PROTOBUF.String()) { 61 | log.Fatalln("The Protobuf schema load is not supported yet.") 62 | } else if strings.EqualFold(schemaType, JSON.String()) { 63 | log.Fatalln("The Json schema load is not supported yet.") 64 | } 65 | 66 | log.Fatalln("This type of schema load is not supported, and there are no plans for support.") 67 | return nil 68 | } 69 | 70 | func (sl *SchemaLoader) Run() { 71 | listenForInterruption() 72 | sl.loadFromPath() 73 | 74 | if sl.schemasType == AVRO { 75 | for schemaDesc, schemaVersions := range sl.schemaRecords { 76 | if CancelRun == true { 77 | return 78 | } 79 | versions := make([]int64, 0) 80 | for versionNumber, _ := range schemaVersions { 81 | versions = append(versions, versionNumber) 82 | } 83 | sort.Slice(versions, func(i, j int) bool { return versions[i] < versions[j] }) 84 | for _, sortedVersion := range versions { 85 | sl.maybeRegisterAvroSchema(schemaDesc, sortedVersion, schemaVersions[sortedVersion]) 86 | } 87 | } 88 | } 89 | } 90 | 91 | func (sl *SchemaLoader) loadFromPath() { 92 | 93 | if sl.schemasType == AVRO { 94 | err := filepath.Walk(sl.path, sl.loadAvroFiles) 95 | check(err) 96 | } 97 | 98 | if CancelRun != true { 99 | log.Println("Successfully read schemas") 100 | } else { 101 | log.Println("Interrupted while reading schemas") 102 | } 103 | 104 | } 105 | 106 | func (sl *SchemaLoader) maybeRegisterAvroSchema(desc SchemaDescriptor, version int64, fullSchema map[string]interface{}) bool { 107 | 108 | thisSchemaName := fmt.Sprintf("%s.%s", desc.namespace, desc.name) 109 | thisSchemaReferences := []SchemaReference{} 110 | 111 | // Check there are fields to the schema 112 | if fullSchema["fields"] != nil { 113 | thisSchemaReferences = sl.getReferencesForAvroSchema(thisSchemaName, fullSchema["fields"]) 114 | } 115 | 116 | mapAsJsonBytes, err := json.Marshal(fullSchema) 117 | check(err) 118 | mapAsJsonString := string(mapAsJsonBytes) 119 | 120 | thisSchemaSubject := thisSchemaName + "-value" 121 | 122 | if checkSubjectIsAllowed(thisSchemaName) && !sl.dstClient.schemaIsRegisteredUnderSubject(thisSchemaSubject, 123 | "AVRO", mapAsJsonString, thisSchemaReferences) { 124 | log.Println(fmt.Sprintf("Registering schema not previously registered: %s with version: %d", thisSchemaSubject, version)) 125 | sl.dstClient.RegisterSchema( 126 | mapAsJsonString, 127 | thisSchemaSubject, 128 | "AVRO", 129 | thisSchemaReferences) 130 | return true 131 | } 132 | return false 133 | } 134 | 135 | func (sl *SchemaLoader) loadAvroFiles(path string, info os.FileInfo, err error) error { 136 | check(err) 137 | 138 | if !info.IsDir() { 139 | currentSchema, err := os.Open(path) 140 | checkDontFail(err) 141 | defer currentSchema.Close() 142 | 143 | jsonBytes, _ := ioutil.ReadAll(currentSchema) 144 | 145 | var schemaStruct map[string]interface{} 146 | json.Unmarshal(jsonBytes, &schemaStruct) 147 | 148 | thisSchemaDescription := SchemaDescriptor{ 149 | namespace: fmt.Sprintf("%v", schemaStruct["namespace"]), 150 | name: fmt.Sprintf("%v", schemaStruct["name"]), 151 | } 152 | 153 | thisSchemaVersion := int64(len(sl.schemaRecords[thisSchemaDescription])) 154 | 155 | if thisSchemaVersion == 0 { 156 | sl.schemaRecords[thisSchemaDescription] = map[int64]map[string]interface{}{ 157 | thisSchemaVersion: schemaStruct, 158 | } 159 | } else { 160 | newVersion := sl.schemaRecords[thisSchemaDescription] 161 | newVersion[thisSchemaVersion] = schemaStruct 162 | sl.schemaRecords[thisSchemaDescription] = newVersion 163 | } 164 | 165 | } 166 | return nil 167 | } 168 | 169 | func (sl *SchemaLoader) getReferencesForAvroSchema(thisSchemaName string, fields interface{}) []SchemaReference { 170 | references := []SchemaReference{} 171 | 172 | switch theseFields := fields.(type) { 173 | case []interface{}: 174 | for _, oneField := range theseFields { 175 | switch typecastedField := oneField.(type) { 176 | case map[string]interface{}: 177 | switch typeOfTypeField := typecastedField["type"].(type) { 178 | case string: 179 | sl.resolveReferenceAndRegister(typeOfTypeField, &references) 180 | case []interface{}: 181 | for _, singleTypeInArray := range typeOfTypeField { 182 | switch thisFieldType := singleTypeInArray.(type) { 183 | case string: 184 | sl.resolveReferenceAndRegister(thisFieldType, &references) 185 | default: 186 | log.Println("Could not cast a type to string: " + fmt.Sprintf("%v", thisFieldType)) 187 | } 188 | } 189 | case map[string]interface{}: 190 | values, valuesExist := typeOfTypeField["values"] 191 | items, itemsExist := typeOfTypeField["items"] 192 | if valuesExist && !itemsExist { 193 | switch valuesFields := values.(type) { 194 | case string: 195 | sl.resolveReferenceAndRegister(valuesFields, &references) 196 | case []interface{}: 197 | for _, singleTypeInArray := range valuesFields { 198 | switch thisFieldType := singleTypeInArray.(type) { 199 | case string: 200 | sl.resolveReferenceAndRegister(thisFieldType, &references) 201 | default: 202 | log.Println("Could not cast a type to string: " + fmt.Sprintf("%v", thisFieldType)) 203 | } 204 | } 205 | default: 206 | log.Println("Could not parse avro array: " + fmt.Sprintf("%v", valuesFields)) 207 | } 208 | } 209 | if itemsExist && !valuesExist { 210 | switch itemsTypes := items.(type) { 211 | case string: 212 | sl.resolveReferenceAndRegister(itemsTypes, &references) 213 | case []interface{}: 214 | for _, singleTypeInArray := range itemsTypes { 215 | switch thisFieldType := singleTypeInArray.(type) { 216 | case string: 217 | sl.resolveReferenceAndRegister(thisFieldType, &references) 218 | default: 219 | log.Println("Could not cast a type to string: " + fmt.Sprintf("%v", thisFieldType)) 220 | } 221 | } 222 | default: 223 | log.Println("Could not parse avro map: " + fmt.Sprintf("%v", itemsTypes)) 224 | } 225 | } 226 | default: 227 | log.Println("Could not get types from schema") 228 | log.Println(fmt.Sprintf("%v", typeOfTypeField)) 229 | } 230 | default: 231 | log.Println("Could not typecast input") 232 | } 233 | } 234 | default: 235 | log.Println("Could not define the field array in this schema") 236 | log.Println(fmt.Sprintf("%v", theseFields)) 237 | } 238 | return references 239 | } 240 | 241 | func (sl *SchemaLoader) resolveReferenceAndRegister(referenceName string, references *[]SchemaReference) { 242 | // If this is a reference, then fill array and register that first 243 | //Build SchemaDescriptor for the reference 244 | _, exists := nativeTypes[referenceName] 245 | if !exists { 246 | thisReferenceDescriptor := GetAvroSchemaDescriptor(referenceName) 247 | schemaFullName := fmt.Sprintf("%s.%s", thisReferenceDescriptor.namespace, thisReferenceDescriptor.name) 248 | 249 | versions, refExists := sl.schemaRecords[thisReferenceDescriptor] 250 | if !refExists { 251 | log.Fatalln("Reference doesn't exist: " + fmt.Sprintf("%v", thisReferenceDescriptor)) 252 | } 253 | latestVersionForReference := int64(len(versions)) 254 | 255 | sl.registerReferenceSet(versions, thisReferenceDescriptor) 256 | 257 | thisReference := SchemaReference{ 258 | Name: schemaFullName, // The type referenced 259 | Subject: schemaFullName + "-value", // The SchemaDescriptor 260 | Version: latestVersionForReference, // Latest version of schema descriptor 261 | } 262 | 263 | if !referenceIsInSlice(thisReference, *references) { 264 | *references = append(*references, thisReference) 265 | } 266 | } 267 | } 268 | 269 | func (sl *SchemaLoader) registerReferenceSet(versionsMap map[int64]map[string]interface{}, descriptor SchemaDescriptor) { 270 | versionsSlice := make([]int64, 0) 271 | for versionNumber, _ := range versionsMap { 272 | versionsSlice = append(versionsSlice, versionNumber) 273 | } 274 | sort.Slice(versionsSlice, func(i, j int) bool { return versionsSlice[i] < versionsSlice[j] }) 275 | 276 | for _, sortedVersion := range versionsSlice { 277 | if sl.maybeRegisterAvroSchema(descriptor, sortedVersion, sl.schemaRecords[descriptor][sortedVersion]) { 278 | log.Println(fmt.Sprintf("Registered schema reference not previously registered: %s.%s with version: %d", 279 | descriptor.namespace, descriptor.name, sortedVersion)) 280 | } 281 | } 282 | } 283 | -------------------------------------------------------------------------------- /cmd/internals/syncSchemas.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | // 4 | // syncSchemas.go 5 | // Copyright 2020 Abraham Leal 6 | // 7 | 8 | import ( 9 | "log" 10 | "reflect" 11 | "strconv" 12 | "time" 13 | ) 14 | 15 | func Sync(srcClient *SchemaRegistryClient, destClient *SchemaRegistryClient) { 16 | 17 | listenForInterruption() 18 | 19 | // Set up soft Deleted IDs in destination for interpretation by the destination registry 20 | if SyncDeletes { 21 | syncExistingSoftDeletedSubjects(srcClient, destClient) 22 | } 23 | 24 | //Begin sync 25 | for { 26 | if CancelRun == true { 27 | return 28 | } 29 | beginSync := time.Now() 30 | 31 | srcSubjects, destSubjects := GetCurrentSubjectsStates(srcClient, destClient) 32 | 33 | if !reflect.DeepEqual(srcSubjects, destSubjects) { 34 | diff := GetSubjectDiff(srcSubjects, destSubjects) 35 | // Perform sync 36 | initialSync(diff, srcClient, destClient) 37 | //Perform soft delete check 38 | if SyncDeletes { 39 | syncSoftDeletes(destSubjects, srcSubjects, destClient) 40 | } 41 | } 42 | 43 | // Perform hard delete check 44 | if SyncHardDeletes { 45 | syncHardDeletes(srcClient, destClient) 46 | } 47 | 48 | syncDuration := time.Since(beginSync) 49 | log.Printf("Finished sync in %d ms", syncDuration.Milliseconds()) 50 | 51 | time.Sleep(time.Duration(ScrapeInterval) * time.Second) 52 | } 53 | 54 | } 55 | 56 | func initialSync(diff map[string][]int64, srcClient *SchemaRegistryClient, destClient *SchemaRegistryClient) { 57 | if len(diff) != 0 { 58 | log.Println("Source registry has values that Destination does not, syncing...") 59 | for subject, versions := range diff { 60 | for _, v := range versions { 61 | schema := srcClient.GetSchema(subject, v, false) 62 | RegisterReferences(schema, srcClient, destClient, false) 63 | log.Println("Registering new schema: " + schema.Subject + 64 | " with version: " + strconv.FormatInt(schema.Version, 10) + 65 | " and ID: " + strconv.FormatInt(schema.Id, 10) + 66 | " and Type: " + schema.SType) 67 | destClient.RegisterSchemaBySubjectAndIDAndVersion(schema.Schema, 68 | schema.Subject, 69 | schema.Id, 70 | schema.Version, 71 | schema.SType, 72 | schema.References) 73 | } 74 | } 75 | } 76 | } 77 | 78 | func syncSoftDeletes(destSubjects map[string][]int64, srcSubjects map[string][]int64, destClient *SchemaRegistryClient) { 79 | diff := GetSubjectDiff(destSubjects, srcSubjects) 80 | if len(diff) != 0 { 81 | log.Println("Source registry has deletes that Destination does not, syncing...") 82 | for subject, versions := range diff { 83 | for _, v := range versions { 84 | destClient.PerformSoftDelete(subject, v) 85 | } 86 | } 87 | } 88 | } 89 | 90 | func syncHardDeletes(srcClient *SchemaRegistryClient, destClient *SchemaRegistryClient) { 91 | permDel := GetIDDiff(destClient.GetSoftDeletedIDs(), srcClient.GetSoftDeletedIDs()) 92 | if len(permDel) != 0 { 93 | for id, subjectVersionsMap := range permDel { 94 | for subject, versions := range subjectVersionsMap { 95 | for _, version := range versions { 96 | log.Printf("Discovered Hard Deleted Schema with ID %d, Subject %s, and Version: %d", 97 | id, subject, version) 98 | destClient.PerformHardDelete(subject, version) 99 | } 100 | } 101 | } 102 | } 103 | } 104 | 105 | func syncExistingSoftDeletedSubjects(srcClient *SchemaRegistryClient, destClient *SchemaRegistryClient) { 106 | softDel := GetIDDiff(srcClient.GetSoftDeletedIDs(), destClient.GetSoftDeletedIDs()) 107 | if len(softDel) != 0 { 108 | log.Println("There are soft Deleted IDs in the source. Sinking to the destination at startup...") 109 | for _, meta := range softDel { 110 | for sbj, versions := range meta { 111 | for _, version := range versions { 112 | softDeletedSchema := srcClient.GetSchema(sbj, version, true) 113 | destClient.RegisterSchemaBySubjectAndIDAndVersion(softDeletedSchema.Schema, 114 | softDeletedSchema.Subject, softDeletedSchema.Id, softDeletedSchema.Version, softDeletedSchema.SType, softDeletedSchema.References) 115 | destClient.PerformSoftDelete(softDeletedSchema.Subject, softDeletedSchema.Version) 116 | } 117 | } 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /cmd/testingUtils/testing_helpers.go: -------------------------------------------------------------------------------- 1 | package testingUtils 2 | 3 | import ( 4 | "context" 5 | "github.com/docker/go-connections/nat" 6 | "github.com/testcontainers/testcontainers-go" 7 | "github.com/testcontainers/testcontainers-go/wait" 8 | "log" 9 | "net/http" 10 | "time" 11 | ) 12 | 13 | var cpTestVersion = "7.4.1" 14 | var Ctx = context.Background() 15 | 16 | func GetBaseInfra(networkName string) (kafkaContainer testcontainers.Container, schemaRegistryContainer testcontainers.Container) { 17 | var network = testcontainers.NetworkRequest{ 18 | Name: networkName, 19 | Driver: "bridge", 20 | } 21 | 22 | provider, err := testcontainers.NewDockerProvider() 23 | if err != nil { 24 | log.Fatal(err) 25 | } 26 | 27 | if _, err := provider.GetNetwork(Ctx, network); err != nil { 28 | if _, err := provider.CreateNetwork(Ctx, network); err != nil { 29 | log.Fatal(err) 30 | } 31 | } 32 | 33 | kafkaContainer, err = testcontainers.GenericContainer(Ctx, 34 | testcontainers.GenericContainerRequest{ 35 | ContainerRequest: testcontainers.ContainerRequest{ 36 | Image: "confluentinc/confluent-local:" + cpTestVersion, 37 | ExposedPorts: []string{"29092/tcp", "29093/tcp", "9092/tcp"}, 38 | WaitingFor: wait.ForListeningPort("29092/tcp"), 39 | Name: "broker" + networkName, 40 | Env: map[string]string{ 41 | "CLUSTER_ID": "E__VgOY5Tna5qbyDTtFbTg", 42 | "KAFKA_PROCESS_ROLES": "broker,controller", 43 | "KAFKA_NODE_ID": "1", 44 | "KAFKA_CONTROLLER_QUORUM_VOTERS": "1@broker" + networkName + ":29093", 45 | "KAFKA_CONTROLLER_LISTENER_NAMES": "CONTROLLER", 46 | "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP": "PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT", 47 | "KAFKA_LISTENERS": "PLAINTEXT://broker" + networkName + ":29092,CONTROLLER://broker" + networkName + ":29093,PLAINTEXT_HOST://0.0.0.0:9092", 48 | "KAFKA_ADVERTISED_LISTENERS": "PLAINTEXT://broker" + networkName + ":29092, PLAINTEXT_HOST://localhost:9092", 49 | "KAFKA_INTER_BROKER_LISTENER_NAME": "PLAINTEXT", 50 | "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR": "1", 51 | "KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS": "0", 52 | "KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR": "1", 53 | "KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR": "1", 54 | "KAFKA_TRANSACTION_STATE_LOG_MIN_ISR": "1", 55 | "KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR": "1", 56 | }, 57 | Networks: []string{networkName}, 58 | }, 59 | Started: true, 60 | }) 61 | CheckFail(err, "Kafka was not able to start") 62 | 63 | schemaRegistryContainer, err = testcontainers.GenericContainer(Ctx, 64 | testcontainers.GenericContainerRequest{ 65 | ContainerRequest: testcontainers.ContainerRequest{ 66 | Image: "confluentinc/cp-schema-registry:" + cpTestVersion, 67 | ExposedPorts: []string{"8081/tcp"}, 68 | WaitingFor: GetSRWaitStrategy("8081"), 69 | Name: "schema-registry-src-" + networkName, 70 | Env: map[string]string{ 71 | "SCHEMA_REGISTRY_HOST_NAME": "schema-registry-src", 72 | "SCHEMA_REGISTRY_SCHEMA_REGISTRY_GROUP_ID": "schema-src", 73 | "SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS": "broker" + networkName + ":29092", 74 | "SCHEMA_REGISTRY_KAFKASTORE_TOPIC": "_schemas", 75 | "SCHEMA_REGISTRY_KAFKASTORE_TOPIC_REPLICATION_FACTOR": "1", 76 | "SCHEMA_REGISTRY_MODE_MUTABILITY": "true", 77 | }, 78 | Networks: []string{networkName}, 79 | }, 80 | Started: true, 81 | }) 82 | CheckFail(err, "Source SR was not able to start") 83 | 84 | return kafkaContainer, schemaRegistryContainer 85 | } 86 | 87 | // Pass in: Port to wait for 200 return 88 | func GetSRWaitStrategy(port string) wait.Strategy { 89 | var i int 90 | return wait.ForHTTP("/"). 91 | WithPort(nat.Port(port + "/tcp")). 92 | WithStartupTimeout(time.Second * 30). 93 | WithMethod(http.MethodGet). 94 | WithStatusCodeMatcher(func(status int) bool { i++; return i > 1 && status == 200 }) 95 | } 96 | 97 | // Simple check function that will fail all if there is an error present and allows a custom message to be printed 98 | func CheckFail(e error, msg string) { 99 | if e != nil { 100 | log.Println(e) 101 | log.Fatalln(msg) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /cmd/trustedEntities/LetsEncryptCA.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFYDCCBEigAwIBAgIQQAF3ITfU6UK47naqPGQKtzANBgkqhkiG9w0BAQsFADA/ 3 | MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT 4 | DkRTVCBSb290IENBIFgzMB4XDTIxMDEyMDE5MTQwM1oXDTI0MDkzMDE4MTQwM1ow 5 | TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh 6 | cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwggIiMA0GCSqGSIb3DQEB 7 | AQUAA4ICDwAwggIKAoICAQCt6CRz9BQ385ueK1coHIe+3LffOJCMbjzmV6B493XC 8 | ov71am72AE8o295ohmxEk7axY/0UEmu/H9LqMZshftEzPLpI9d1537O4/xLxIZpL 9 | wYqGcWlKZmZsj348cL+tKSIG8+TA5oCu4kuPt5l+lAOf00eXfJlII1PoOK5PCm+D 10 | LtFJV4yAdLbaL9A4jXsDcCEbdfIwPPqPrt3aY6vrFk/CjhFLfs8L6P+1dy70sntK 11 | 4EwSJQxwjQMpoOFTJOwT2e4ZvxCzSow/iaNhUd6shweU9GNx7C7ib1uYgeGJXDR5 12 | bHbvO5BieebbpJovJsXQEOEO3tkQjhb7t/eo98flAgeYjzYIlefiN5YNNnWe+w5y 13 | sR2bvAP5SQXYgd0FtCrWQemsAXaVCg/Y39W9Eh81LygXbNKYwagJZHduRze6zqxZ 14 | Xmidf3LWicUGQSk+WT7dJvUkyRGnWqNMQB9GoZm1pzpRboY7nn1ypxIFeFntPlF4 15 | FQsDj43QLwWyPntKHEtzBRL8xurgUBN8Q5N0s8p0544fAQjQMNRbcTa0B7rBMDBc 16 | SLeCO5imfWCKoqMpgsy6vYMEG6KDA0Gh1gXxG8K28Kh8hjtGqEgqiNx2mna/H2ql 17 | PRmP6zjzZN7IKw0KKP/32+IVQtQi0Cdd4Xn+GOdwiK1O5tmLOsbdJ1Fu/7xk9TND 18 | TwIDAQABo4IBRjCCAUIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw 19 | SwYIKwYBBQUHAQEEPzA9MDsGCCsGAQUFBzAChi9odHRwOi8vYXBwcy5pZGVudHJ1 20 | c3QuY29tL3Jvb3RzL2RzdHJvb3RjYXgzLnA3YzAfBgNVHSMEGDAWgBTEp7Gkeyxx 21 | +tvhS5B1/8QVYIWJEDBUBgNVHSAETTBLMAgGBmeBDAECATA/BgsrBgEEAYLfEwEB 22 | ATAwMC4GCCsGAQUFBwIBFiJodHRwOi8vY3BzLnJvb3QteDEubGV0c2VuY3J5cHQu 23 | b3JnMDwGA1UdHwQ1MDMwMaAvoC2GK2h0dHA6Ly9jcmwuaWRlbnRydXN0LmNvbS9E 24 | U1RST09UQ0FYM0NSTC5jcmwwHQYDVR0OBBYEFHm0WeZ7tuXkAXOACIjIGlj26Ztu 25 | MA0GCSqGSIb3DQEBCwUAA4IBAQAKcwBslm7/DlLQrt2M51oGrS+o44+/yQoDFVDC 26 | 5WxCu2+b9LRPwkSICHXM6webFGJueN7sJ7o5XPWioW5WlHAQU7G75K/QosMrAdSW 27 | 9MUgNTP52GE24HGNtLi1qoJFlcDyqSMo59ahy2cI2qBDLKobkx/J3vWraV0T9VuG 28 | WCLKTVXkcGdtwlfFRjlBz4pYg1htmf5X6DYO8A4jqv2Il9DjXA6USbW1FzXSLr9O 29 | he8Y4IWS6wY7bCkjCWDcRQJMEhg76fsO3txE+FiYruq9RUWhiF1myv4Q6W+CyBFC 30 | Dfvp7OOGAN6dEOM4+qR9sdjoSYKEBpsr6GtPAQw4dy753ec5 31 | -----END CERTIFICATE----- 32 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/abraham-leal/ccloud-schema-exporter 2 | 3 | go 1.20 4 | 5 | require ( 6 | github.com/prometheus/client_golang v1.16.0 7 | github.com/stretchr/testify v1.8.4 8 | github.com/testcontainers/testcontainers-go v0.22.0 9 | ) 10 | 11 | require ( 12 | dario.cat/mergo v1.0.0 // indirect 13 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect 14 | github.com/Microsoft/go-winio v0.6.1 // indirect 15 | github.com/beorn7/perks v1.0.1 // indirect 16 | github.com/cenkalti/backoff/v4 v4.2.0 // indirect 17 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 18 | github.com/containerd/containerd v1.7.3 // indirect 19 | github.com/cpuguy83/dockercfg v0.3.1 // indirect 20 | github.com/davecgh/go-spew v1.1.1 // indirect 21 | github.com/docker/distribution v2.8.2+incompatible // indirect 22 | github.com/docker/docker v24.0.5+incompatible // indirect 23 | github.com/docker/go-connections v0.4.0 // indirect 24 | github.com/docker/go-units v0.5.0 // indirect 25 | github.com/gogo/protobuf v1.3.2 // indirect 26 | github.com/golang/protobuf v1.5.3 // indirect 27 | github.com/google/uuid v1.3.0 // indirect 28 | github.com/klauspost/compress v1.16.0 // indirect 29 | github.com/magiconair/properties v1.8.7 // indirect 30 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 31 | github.com/moby/patternmatcher v0.5.0 // indirect 32 | github.com/moby/sys/sequential v0.5.0 // indirect 33 | github.com/moby/term v0.5.0 // indirect 34 | github.com/morikuni/aec v1.0.0 // indirect 35 | github.com/opencontainers/go-digest v1.0.0 // indirect 36 | github.com/opencontainers/image-spec v1.1.0-rc4 // indirect 37 | github.com/opencontainers/runc v1.1.5 // indirect 38 | github.com/pkg/errors v0.9.1 // indirect 39 | github.com/pmezard/go-difflib v1.0.0 // indirect 40 | github.com/prometheus/client_model v0.3.0 // indirect 41 | github.com/prometheus/common v0.42.0 // indirect 42 | github.com/prometheus/procfs v0.10.1 // indirect 43 | github.com/sirupsen/logrus v1.9.0 // indirect 44 | golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect 45 | golang.org/x/mod v0.9.0 // indirect 46 | golang.org/x/net v0.9.0 // indirect 47 | golang.org/x/sys v0.8.0 // indirect 48 | golang.org/x/tools v0.7.0 // indirect 49 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect 50 | google.golang.org/grpc v1.57.0 // indirect 51 | google.golang.org/protobuf v1.30.0 // indirect 52 | gopkg.in/yaml.v3 v3.0.1 // indirect 53 | ) 54 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= 2 | dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= 3 | github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= 4 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= 5 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= 6 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 7 | github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= 8 | github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= 9 | github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek= 10 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 11 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 12 | github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= 13 | github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= 14 | github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= 15 | github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 16 | github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= 17 | github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= 18 | github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= 19 | github.com/containerd/containerd v1.7.3 h1:cKwYKkP1eTj54bP3wCdXXBymmKRQMrWjkLSWZZJDa8o= 20 | github.com/containerd/containerd v1.7.3/go.mod h1:32FOM4/O0RkNg7AjQj3hDzN9cUGtu+HMvaKUNiqCZB8= 21 | github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= 22 | github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= 23 | github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= 24 | github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= 25 | github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= 26 | github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= 27 | github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= 28 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 29 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 30 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 31 | github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= 32 | github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= 33 | github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= 34 | github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= 35 | github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= 36 | github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= 37 | github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= 38 | github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= 39 | github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= 40 | github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= 41 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 42 | github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 43 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 44 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 45 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 46 | github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= 47 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 48 | github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= 49 | github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 50 | github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 51 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 52 | github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= 53 | github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= 54 | github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 55 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 56 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 57 | github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= 58 | github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= 59 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 60 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 61 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 62 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 63 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 64 | github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= 65 | github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= 66 | github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= 67 | github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= 68 | github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= 69 | github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= 70 | github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= 71 | github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= 72 | github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= 73 | github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= 74 | github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= 75 | github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= 76 | github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= 77 | github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= 78 | github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= 79 | github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= 80 | github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= 81 | github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= 82 | github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= 83 | github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= 84 | github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= 85 | github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= 86 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 87 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 88 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 89 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 90 | github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= 91 | github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= 92 | github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= 93 | github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= 94 | github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= 95 | github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= 96 | github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= 97 | github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= 98 | github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= 99 | github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 100 | github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= 101 | github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= 102 | github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= 103 | github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= 104 | github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 105 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 106 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 107 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 108 | github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= 109 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 110 | github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= 111 | github.com/testcontainers/testcontainers-go v0.22.0 h1:hOK4NzNu82VZcKEB1aP9LO1xYssVFMvlfeuDW9JMmV0= 112 | github.com/testcontainers/testcontainers-go v0.22.0/go.mod h1:k0YiPa26xJCRUbUkYqy5rY6NGvSbVCeUBXCvucscBR4= 113 | github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= 114 | github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= 115 | github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= 116 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 117 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 118 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 119 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 120 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 121 | golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4= 122 | golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= 123 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 124 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 125 | golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= 126 | golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 127 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 128 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 129 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 130 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 131 | golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 132 | golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= 133 | golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= 134 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 135 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 136 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 137 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 138 | golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= 139 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 140 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 141 | golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 142 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 143 | golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 144 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 145 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 146 | golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 147 | golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 148 | golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 149 | golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 150 | golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 151 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 152 | golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= 153 | golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 154 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 155 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 156 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 157 | golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= 158 | golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= 159 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 160 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 161 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 162 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 163 | golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= 164 | golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= 165 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 166 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 167 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 168 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 169 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= 170 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= 171 | google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= 172 | google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= 173 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 174 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 175 | google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 176 | google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= 177 | google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 178 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 179 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 180 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 181 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 182 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 183 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 184 | gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= 185 | -------------------------------------------------------------------------------- /samples/Grafana_Sample_Dashboard_JSON.json: -------------------------------------------------------------------------------- 1 | { 2 | "__inputs": [ 3 | { 4 | "name": "DS_PROMETHEUS", 5 | "label": "Prometheus", 6 | "description": "", 7 | "type": "datasource", 8 | "pluginId": "prometheus", 9 | "pluginName": "Prometheus" 10 | } 11 | ], 12 | "__requires": [ 13 | { 14 | "type": "grafana", 15 | "id": "grafana", 16 | "name": "Grafana", 17 | "version": "7.1.3" 18 | }, 19 | { 20 | "type": "panel", 21 | "id": "graph", 22 | "name": "Graph", 23 | "version": "" 24 | }, 25 | { 26 | "type": "datasource", 27 | "id": "prometheus", 28 | "name": "Prometheus", 29 | "version": "1.0.0" 30 | } 31 | ], 32 | "annotations": { 33 | "list": [ 34 | { 35 | "builtIn": 1, 36 | "datasource": "-- Grafana --", 37 | "enable": true, 38 | "hide": true, 39 | "iconColor": "rgba(0, 211, 255, 1)", 40 | "name": "Annotations & Alerts", 41 | "type": "dashboard" 42 | } 43 | ] 44 | }, 45 | "editable": true, 46 | "gnetId": null, 47 | "graphTooltip": 0, 48 | "id": null, 49 | "links": [], 50 | "panels": [ 51 | { 52 | "aliasColors": {}, 53 | "bars": false, 54 | "dashLength": 10, 55 | "dashes": false, 56 | "datasource": "${DS_PROMETHEUS}", 57 | "fieldConfig": { 58 | "defaults": { 59 | "custom": {} 60 | }, 61 | "overrides": [] 62 | }, 63 | "fill": 1, 64 | "fillGradient": 0, 65 | "gridPos": { 66 | "h": 6, 67 | "w": 8, 68 | "x": 0, 69 | "y": 0 70 | }, 71 | "hiddenSeries": false, 72 | "id": 10, 73 | "legend": { 74 | "avg": false, 75 | "current": false, 76 | "max": false, 77 | "min": false, 78 | "show": true, 79 | "total": false, 80 | "values": false 81 | }, 82 | "lines": true, 83 | "linewidth": 1, 84 | "nullPointMode": "null", 85 | "percentage": false, 86 | "pluginVersion": "7.1.3", 87 | "pointradius": 2, 88 | "points": false, 89 | "renderer": "flot", 90 | "seriesOverrides": [], 91 | "spaceLength": 10, 92 | "stack": false, 93 | "steppedLine": false, 94 | "targets": [ 95 | { 96 | "expr": "schema_exporter_registered_schemas", 97 | "interval": "", 98 | "legendFormat": "Registered Schemas", 99 | "refId": "A" 100 | } 101 | ], 102 | "thresholds": [], 103 | "timeFrom": null, 104 | "timeRegions": [], 105 | "timeShift": null, 106 | "title": "# of Registered Schemas", 107 | "tooltip": { 108 | "shared": true, 109 | "sort": 0, 110 | "value_type": "individual" 111 | }, 112 | "type": "graph", 113 | "xaxis": { 114 | "buckets": null, 115 | "mode": "time", 116 | "name": null, 117 | "show": true, 118 | "values": [] 119 | }, 120 | "yaxes": [ 121 | { 122 | "format": "short", 123 | "label": null, 124 | "logBase": 1, 125 | "max": null, 126 | "min": null, 127 | "show": true 128 | }, 129 | { 130 | "format": "short", 131 | "label": null, 132 | "logBase": 1, 133 | "max": null, 134 | "min": null, 135 | "show": true 136 | } 137 | ], 138 | "yaxis": { 139 | "align": false, 140 | "alignLevel": null 141 | } 142 | }, 143 | { 144 | "aliasColors": {}, 145 | "bars": false, 146 | "dashLength": 10, 147 | "dashes": false, 148 | "datasource": "${DS_PROMETHEUS}", 149 | "fieldConfig": { 150 | "defaults": { 151 | "custom": {} 152 | }, 153 | "overrides": [] 154 | }, 155 | "fill": 1, 156 | "fillGradient": 0, 157 | "gridPos": { 158 | "h": 6, 159 | "w": 8, 160 | "x": 8, 161 | "y": 0 162 | }, 163 | "hiddenSeries": false, 164 | "id": 12, 165 | "legend": { 166 | "avg": false, 167 | "current": false, 168 | "max": false, 169 | "min": false, 170 | "show": true, 171 | "total": false, 172 | "values": false 173 | }, 174 | "lines": true, 175 | "linewidth": 1, 176 | "nullPointMode": "null", 177 | "percentage": false, 178 | "pluginVersion": "7.1.3", 179 | "pointradius": 2, 180 | "points": false, 181 | "renderer": "flot", 182 | "seriesOverrides": [], 183 | "spaceLength": 10, 184 | "stack": false, 185 | "steppedLine": false, 186 | "targets": [ 187 | { 188 | "expr": "schema_exporter_softDeleted_schemas", 189 | "interval": "", 190 | "legendFormat": "Soft Deleted Schemas", 191 | "refId": "A" 192 | } 193 | ], 194 | "thresholds": [], 195 | "timeFrom": null, 196 | "timeRegions": [], 197 | "timeShift": null, 198 | "title": "# of Soft Deleted Schemas", 199 | "tooltip": { 200 | "shared": true, 201 | "sort": 0, 202 | "value_type": "individual" 203 | }, 204 | "type": "graph", 205 | "xaxis": { 206 | "buckets": null, 207 | "mode": "time", 208 | "name": null, 209 | "show": true, 210 | "values": [] 211 | }, 212 | "yaxes": [ 213 | { 214 | "format": "short", 215 | "label": null, 216 | "logBase": 1, 217 | "max": null, 218 | "min": null, 219 | "show": true 220 | }, 221 | { 222 | "format": "short", 223 | "label": null, 224 | "logBase": 1, 225 | "max": null, 226 | "min": null, 227 | "show": true 228 | } 229 | ], 230 | "yaxis": { 231 | "align": false, 232 | "alignLevel": null 233 | } 234 | }, 235 | { 236 | "aliasColors": {}, 237 | "bars": false, 238 | "dashLength": 10, 239 | "dashes": false, 240 | "datasource": "${DS_PROMETHEUS}", 241 | "fieldConfig": { 242 | "defaults": { 243 | "custom": {} 244 | }, 245 | "overrides": [] 246 | }, 247 | "fill": 1, 248 | "fillGradient": 0, 249 | "gridPos": { 250 | "h": 6, 251 | "w": 8, 252 | "x": 16, 253 | "y": 0 254 | }, 255 | "hiddenSeries": false, 256 | "id": 14, 257 | "legend": { 258 | "avg": false, 259 | "current": false, 260 | "max": false, 261 | "min": false, 262 | "show": true, 263 | "total": false, 264 | "values": false 265 | }, 266 | "lines": true, 267 | "linewidth": 1, 268 | "nullPointMode": "null", 269 | "percentage": false, 270 | "pluginVersion": "7.1.3", 271 | "pointradius": 2, 272 | "points": false, 273 | "renderer": "flot", 274 | "seriesOverrides": [], 275 | "spaceLength": 10, 276 | "stack": false, 277 | "steppedLine": false, 278 | "targets": [ 279 | { 280 | "expr": "schema_exporter_hardDeleted_schemas", 281 | "interval": "", 282 | "legendFormat": "Hard Deleted Schemas", 283 | "refId": "A" 284 | } 285 | ], 286 | "thresholds": [], 287 | "timeFrom": null, 288 | "timeRegions": [], 289 | "timeShift": null, 290 | "title": "# of Hard Deleted Schemas", 291 | "tooltip": { 292 | "shared": true, 293 | "sort": 0, 294 | "value_type": "individual" 295 | }, 296 | "type": "graph", 297 | "xaxis": { 298 | "buckets": null, 299 | "mode": "time", 300 | "name": null, 301 | "show": true, 302 | "values": [] 303 | }, 304 | "yaxes": [ 305 | { 306 | "format": "short", 307 | "label": null, 308 | "logBase": 1, 309 | "max": null, 310 | "min": null, 311 | "show": true 312 | }, 313 | { 314 | "format": "short", 315 | "label": null, 316 | "logBase": 1, 317 | "max": null, 318 | "min": null, 319 | "show": true 320 | } 321 | ], 322 | "yaxis": { 323 | "align": false, 324 | "alignLevel": null 325 | } 326 | }, 327 | { 328 | "aliasColors": {}, 329 | "bars": false, 330 | "dashLength": 10, 331 | "dashes": false, 332 | "datasource": "${DS_PROMETHEUS}", 333 | "fieldConfig": { 334 | "defaults": { 335 | "custom": {} 336 | }, 337 | "overrides": [] 338 | }, 339 | "fill": 1, 340 | "fillGradient": 0, 341 | "gridPos": { 342 | "h": 7, 343 | "w": 12, 344 | "x": 0, 345 | "y": 6 346 | }, 347 | "hiddenSeries": false, 348 | "id": 4, 349 | "legend": { 350 | "avg": false, 351 | "current": false, 352 | "max": false, 353 | "min": false, 354 | "show": true, 355 | "total": false, 356 | "values": false 357 | }, 358 | "lines": true, 359 | "linewidth": 1, 360 | "nullPointMode": "null", 361 | "percentage": false, 362 | "pluginVersion": "7.1.3", 363 | "pointradius": 2, 364 | "points": false, 365 | "renderer": "flot", 366 | "seriesOverrides": [], 367 | "spaceLength": 10, 368 | "stack": false, 369 | "steppedLine": false, 370 | "targets": [ 371 | { 372 | "expr": "go_gc_duration_seconds{quantile=\"1\"}", 373 | "interval": "", 374 | "legendFormat": "Duration 100%", 375 | "refId": "A" 376 | }, 377 | { 378 | "expr": "go_gc_duration_seconds{quantile=\"0.75\"}", 379 | "interval": "", 380 | "legendFormat": "Duration 75%", 381 | "refId": "B" 382 | } 383 | ], 384 | "thresholds": [], 385 | "timeFrom": null, 386 | "timeRegions": [], 387 | "timeShift": null, 388 | "title": "Garbage Collection Time (Seconds)", 389 | "tooltip": { 390 | "shared": true, 391 | "sort": 0, 392 | "value_type": "individual" 393 | }, 394 | "type": "graph", 395 | "xaxis": { 396 | "buckets": null, 397 | "mode": "time", 398 | "name": null, 399 | "show": true, 400 | "values": [] 401 | }, 402 | "yaxes": [ 403 | { 404 | "$$hashKey": "object:112", 405 | "format": "s", 406 | "label": null, 407 | "logBase": 1, 408 | "max": null, 409 | "min": null, 410 | "show": true 411 | }, 412 | { 413 | "$$hashKey": "object:113", 414 | "format": "short", 415 | "label": null, 416 | "logBase": 1, 417 | "max": null, 418 | "min": null, 419 | "show": true 420 | } 421 | ], 422 | "yaxis": { 423 | "align": false, 424 | "alignLevel": null 425 | } 426 | }, 427 | { 428 | "aliasColors": {}, 429 | "bars": false, 430 | "dashLength": 10, 431 | "dashes": false, 432 | "datasource": "${DS_PROMETHEUS}", 433 | "fieldConfig": { 434 | "defaults": { 435 | "custom": {} 436 | }, 437 | "overrides": [] 438 | }, 439 | "fill": 1, 440 | "fillGradient": 0, 441 | "gridPos": { 442 | "h": 7, 443 | "w": 12, 444 | "x": 12, 445 | "y": 6 446 | }, 447 | "hiddenSeries": false, 448 | "id": 6, 449 | "legend": { 450 | "avg": false, 451 | "current": false, 452 | "max": false, 453 | "min": false, 454 | "show": true, 455 | "total": false, 456 | "values": false 457 | }, 458 | "lines": true, 459 | "linewidth": 1, 460 | "nullPointMode": "null", 461 | "percentage": false, 462 | "pluginVersion": "7.1.3", 463 | "pointradius": 2, 464 | "points": false, 465 | "renderer": "flot", 466 | "seriesOverrides": [], 467 | "spaceLength": 10, 468 | "stack": false, 469 | "steppedLine": false, 470 | "targets": [ 471 | { 472 | "expr": "go_goroutines", 473 | "interval": "", 474 | "legendFormat": "Running", 475 | "refId": "A" 476 | } 477 | ], 478 | "thresholds": [], 479 | "timeFrom": null, 480 | "timeRegions": [], 481 | "timeShift": null, 482 | "title": "Number of GoRoutines", 483 | "tooltip": { 484 | "shared": true, 485 | "sort": 0, 486 | "value_type": "individual" 487 | }, 488 | "type": "graph", 489 | "xaxis": { 490 | "buckets": null, 491 | "mode": "time", 492 | "name": null, 493 | "show": true, 494 | "values": [] 495 | }, 496 | "yaxes": [ 497 | { 498 | "$$hashKey": "object:199", 499 | "format": "short", 500 | "label": null, 501 | "logBase": 1, 502 | "max": null, 503 | "min": null, 504 | "show": true 505 | }, 506 | { 507 | "$$hashKey": "object:200", 508 | "format": "short", 509 | "label": null, 510 | "logBase": 1, 511 | "max": null, 512 | "min": null, 513 | "show": true 514 | } 515 | ], 516 | "yaxis": { 517 | "align": false, 518 | "alignLevel": null 519 | } 520 | }, 521 | { 522 | "aliasColors": {}, 523 | "bars": false, 524 | "dashLength": 10, 525 | "dashes": false, 526 | "datasource": "${DS_PROMETHEUS}", 527 | "description": "", 528 | "fieldConfig": { 529 | "defaults": { 530 | "custom": {} 531 | }, 532 | "overrides": [] 533 | }, 534 | "fill": 1, 535 | "fillGradient": 0, 536 | "gridPos": { 537 | "h": 6, 538 | "w": 12, 539 | "x": 0, 540 | "y": 13 541 | }, 542 | "hiddenSeries": false, 543 | "id": 2, 544 | "legend": { 545 | "avg": false, 546 | "current": false, 547 | "max": false, 548 | "min": false, 549 | "show": true, 550 | "total": false, 551 | "values": false 552 | }, 553 | "lines": true, 554 | "linewidth": 1, 555 | "nullPointMode": "null", 556 | "percentage": false, 557 | "pluginVersion": "7.1.3", 558 | "pointradius": 2, 559 | "points": false, 560 | "renderer": "flot", 561 | "seriesOverrides": [], 562 | "spaceLength": 10, 563 | "stack": false, 564 | "steppedLine": false, 565 | "targets": [ 566 | { 567 | "expr": "go_memstats_alloc_bytes", 568 | "interval": "", 569 | "legendFormat": "Memory Allocated to Heap", 570 | "refId": "B" 571 | }, 572 | { 573 | "expr": "go_memstats_heap_inuse_bytes", 574 | "interval": "", 575 | "legendFormat": "Total In Use", 576 | "refId": "A" 577 | } 578 | ], 579 | "thresholds": [], 580 | "timeFrom": null, 581 | "timeRegions": [], 582 | "timeShift": null, 583 | "title": "Schema Exporter Memory Usage (Bytes)", 584 | "tooltip": { 585 | "shared": true, 586 | "sort": 0, 587 | "value_type": "individual" 588 | }, 589 | "type": "graph", 590 | "xaxis": { 591 | "buckets": null, 592 | "mode": "time", 593 | "name": null, 594 | "show": true, 595 | "values": [] 596 | }, 597 | "yaxes": [ 598 | { 599 | "$$hashKey": "object:74", 600 | "format": "bytes", 601 | "label": null, 602 | "logBase": 1, 603 | "max": null, 604 | "min": null, 605 | "show": true 606 | }, 607 | { 608 | "$$hashKey": "object:75", 609 | "format": "short", 610 | "label": null, 611 | "logBase": 1, 612 | "max": null, 613 | "min": null, 614 | "show": true 615 | } 616 | ], 617 | "yaxis": { 618 | "align": false, 619 | "alignLevel": null 620 | } 621 | }, 622 | { 623 | "aliasColors": {}, 624 | "bars": false, 625 | "dashLength": 10, 626 | "dashes": false, 627 | "datasource": "${DS_PROMETHEUS}", 628 | "fieldConfig": { 629 | "defaults": { 630 | "custom": {} 631 | }, 632 | "overrides": [] 633 | }, 634 | "fill": 1, 635 | "fillGradient": 0, 636 | "gridPos": { 637 | "h": 6, 638 | "w": 12, 639 | "x": 12, 640 | "y": 13 641 | }, 642 | "hiddenSeries": false, 643 | "id": 8, 644 | "legend": { 645 | "avg": false, 646 | "current": false, 647 | "max": false, 648 | "min": false, 649 | "show": true, 650 | "total": false, 651 | "values": false 652 | }, 653 | "lines": true, 654 | "linewidth": 1, 655 | "nullPointMode": "null", 656 | "percentage": false, 657 | "pluginVersion": "7.1.3", 658 | "pointradius": 2, 659 | "points": false, 660 | "renderer": "flot", 661 | "seriesOverrides": [], 662 | "spaceLength": 10, 663 | "stack": false, 664 | "steppedLine": false, 665 | "targets": [ 666 | { 667 | "expr": "rate(go_memstats_alloc_bytes_total[1m])", 668 | "interval": "", 669 | "legendFormat": "Memory Consumption", 670 | "refId": "A" 671 | } 672 | ], 673 | "thresholds": [], 674 | "timeFrom": null, 675 | "timeRegions": [], 676 | "timeShift": null, 677 | "title": "Memory Consumption Per Second (Bytes/S)", 678 | "tooltip": { 679 | "shared": true, 680 | "sort": 0, 681 | "value_type": "individual" 682 | }, 683 | "type": "graph", 684 | "xaxis": { 685 | "buckets": null, 686 | "mode": "time", 687 | "name": null, 688 | "show": true, 689 | "values": [] 690 | }, 691 | "yaxes": [ 692 | { 693 | "$$hashKey": "object:140", 694 | "format": "Bps", 695 | "label": null, 696 | "logBase": 1, 697 | "max": null, 698 | "min": null, 699 | "show": true 700 | }, 701 | { 702 | "$$hashKey": "object:141", 703 | "format": "short", 704 | "label": null, 705 | "logBase": 1, 706 | "max": null, 707 | "min": null, 708 | "show": true 709 | } 710 | ], 711 | "yaxis": { 712 | "align": false, 713 | "alignLevel": null 714 | } 715 | } 716 | ], 717 | "schemaVersion": 26, 718 | "style": "dark", 719 | "tags": [], 720 | "templating": { 721 | "list": [] 722 | }, 723 | "time": { 724 | "from": "now-5m", 725 | "to": "now" 726 | }, 727 | "timepicker": {}, 728 | "timezone": "", 729 | "title": "Schema Exporter Monitoring", 730 | "uid": "nBgce-JGz", 731 | "version": 1 732 | } 733 | -------------------------------------------------------------------------------- /samples/SampleGrafanaDashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abraham-leal/ccloud-schema-exporter/7153aa0071c092d80e45ae144629826f602566c2/samples/SampleGrafanaDashboard.png -------------------------------------------------------------------------------- /samples/bump_ids.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # author: aleal@confluent.io @ github.com/abraham-leal 3 | set -e 4 | 5 | ## Check for provided arguments 6 | if (( "$#" < 1 )); then 7 | echo "Illegal number of parameters." 8 | echo 9 | echo "Usage: 10 | $0 [Optional: Beginning Schema ID, Default: 10,000] [Optional: Basic Auth Credentials in form of :] 11 | 12 | Example: 13 | $0 http://localhost:8081 500 userAdmin:adminPassword 14 | " 15 | exit 16 | fi 17 | 18 | ## Capture Schema Registry URL 19 | SRURL_INPUT=$1 20 | if [[ $SRURL_INPUT == *'/' ]] 21 | then 22 | SRURL=$(echo $SRURL_INPUT | sed 's/.$//') 23 | else 24 | SRURL=$SRURL_INPUT 25 | fi 26 | 27 | ## Default beginning ID if not set 28 | if [[ -z $2 ]] 29 | then 30 | echo "No beginning ID provided, starting at 10000" 31 | SRBEGINNING=10000 32 | else 33 | echo "Starting SR at "$2 34 | SRBEGINNING=$2 35 | fi 36 | 37 | ## Default beginning ID if not set 38 | if [[ -z $3 ]] 39 | then 40 | echo "No auth provided, proceeding without it" 41 | AUTH="" 42 | else 43 | AUTH=$3 44 | USERNAME=$(echo $AUTH | sed 's/:.*//') 45 | echo "Using username: "$USERNAME 46 | fi 47 | 48 | DUMMY_SUBJECT="dummy" 49 | DUMMY_SUBJECT_2="dummy2" 50 | SUBJECTMODE_URL=$SRURL'/mode/'$DUMMY_SUBJECT 51 | IMPORT_PAYLOAD='{"mode": "IMPORT"}' 52 | READWRITE_PAYLOAD='{"mode": "IMPORT"}' 53 | DUMMYSCHEMA_IMPORT="{\"schema\": \"{\\\"type\\\": \\\"bytes\\\"}\", \"id\": $SRBEGINNING, \"version\": 1}" 54 | DUMMY_SCHEMA='{"schema": "{\"type\": \"string\"}"}' 55 | SUBJECTSCHEMA_ENDPOINT=$SRURL'/subjects/'$DUMMY_SUBJECT'/versions' 56 | SUBJECTSCHEMA_ENDPOINT_2=$SRURL'/subjects/'$DUMMY_SUBJECT_2'/versions' 57 | CONTENT='Content-Type: application/json' 58 | 59 | ## Check we are dealing with a brand new SR 60 | RESULT_SUBJECTS=$(curl -u $AUTH --silent -k $SRURL"/schemas?deleted=true") 61 | if [ "$RESULT_SUBJECTS" != "[]" ] 62 | then 63 | echo "ERROR: Schema Registry is not empty. Aborting." 64 | exit 1 65 | fi 66 | 67 | ## Set IMPORT on a single subject 68 | RESULT_SETIMPORT=$(curl -u $AUTH --silent -k -X PUT -d "$IMPORT_PAYLOAD" -H "$CONTENT" $SUBJECTMODE_URL) 69 | 70 | ## Register dummy schema 71 | RESULT_REGISTER=$(curl -u $AUTH --silent -k -X POST -d "$DUMMYSCHEMA_IMPORT" -H "$CONTENT" $SUBJECTSCHEMA_ENDPOINT) 72 | 73 | ## Set READWRITE on a single subject 74 | RESULT_SETRW=$(curl -u $AUTH --silent -k -X PUT -d "$READWRITE_PAYLOAD" -H "$CONTENT" $SUBJECTMODE_URL) 75 | 76 | ## Soft delete dummy schema to maintain the beginning ID 77 | RESULT_DELETE=$(curl -u $AUTH --silent -k -X DELETE $SUBJECTSCHEMA_ENDPOINT"/1") 78 | HOLD=$(curl -u $AUTH --silent -k -X DELETE $SUBJECTSCHEMA_ENDPOINT"/1?permanent=true") 79 | 80 | ## Register another dummy and confirm ID=SRBEGINNING+1 <- Sanity Check 81 | OUTPUT=$(curl -u $AUTH --silent -k -X POST -d "$DUMMY_SCHEMA" -H "$CONTENT" $SUBJECTSCHEMA_ENDPOINT_2) 82 | SR_REGISTERED=$(echo $OUTPUT | tr -d -c 0-9) 83 | if (( $SR_REGISTERED >= $SRBEGINNING )); then 84 | echo "Validation Test Passed." 85 | echo "Schema Registry ID space now starts at "$SRBEGINNING+ 86 | else 87 | echo "Validation Test Failed." 88 | fi 89 | 90 | ## Cleanup 91 | HOLD=$(curl -u $AUTH --silent -k -X DELETE $SUBJECTSCHEMA_ENDPOINT_2"/1") 92 | HOLD=$(curl -u $AUTH --silent -k -X DELETE $SUBJECTSCHEMA_ENDPOINT_2"/1?permanent=true") 93 | -------------------------------------------------------------------------------- /samples/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | ccloud-schema-exporter: 5 | image: abrahamleal/ccloud-schema-exporter:latest 6 | environment: 7 | SRC_SR_URL: ${SRC_SR_URL} 8 | SRC_API_KEY: ${SRC_API_KEY} 9 | SRC_API_SECRET: ${SRC_API_SECRET} 10 | DST_SR_URL: ${DST_SR_URL} 11 | DST_API_KEY: ${DST_API_KEY} 12 | DST_API_SECRET: ${DST_API_SECRET} 13 | --------------------------------------------------------------------------------