├── .github ├── pull_request_template.md └── workflows │ ├── open_api_spec.yml │ ├── semantic-release.yml │ └── unittests.yml ├── .gitignore ├── .golangci.yml ├── .snyk ├── .tekton ├── patchman-engine-pull-request.yaml └── patchman-engine-push.yaml ├── .vscode └── settings.example.json ├── COMMITMENT ├── Dockerfile ├── Jenkinsfile-smoke.groovy ├── LICENSE ├── README.md ├── VERSION ├── base ├── api │ └── client.go ├── base.go ├── candlepin │ └── candlepin.go ├── core │ ├── config.go │ ├── gintesting.go │ ├── probes.go │ └── probes_test.go ├── database │ ├── baseline.go │ ├── baseline_test.go │ ├── batch.go │ ├── batch_test.go │ ├── database.go │ ├── database_test.go │ ├── dbtest.go │ ├── query.go │ ├── query_test.go │ ├── setup.go │ ├── setup_test.go │ ├── testing.go │ ├── utils.go │ └── utils_test.go ├── deprecations │ ├── base.go │ └── deprecations.go ├── errors.go ├── inventory │ └── inventory.go ├── metrics │ └── metrics.go ├── models │ └── models.go ├── mqueue │ ├── event.go │ ├── message.go │ ├── metrics.go │ ├── mqueue.go │ ├── mqueue_impl_gokafka.go │ ├── mqueue_test.go │ ├── payload_tracker_event.go │ ├── platform_event.go │ ├── platform_event_test.go │ ├── template_event.go │ └── testing.go ├── notification │ └── notification.go ├── rbac │ ├── rbac.go │ └── rbac_test.go ├── types │ └── timestamp.go ├── utils │ ├── awscloudwatch.go │ ├── config.go │ ├── config_test.go │ ├── core.go │ ├── core_test.go │ ├── gin.go │ ├── gin_test.go │ ├── http.go │ ├── http_test.go │ ├── identity.go │ ├── identity_test.go │ ├── log.go │ ├── log_test.go │ ├── metrics.go │ ├── openapi.go │ ├── rpm.go │ ├── rpm_test.go │ ├── testing.go │ ├── vmaas.go │ ├── vmaas_inputs_test.go │ └── vmaas_test.go └── vmaas │ └── vmaas.go ├── build_deploy.sh ├── conf ├── admin_api.env ├── cdappconfig.json ├── common.env ├── database.env ├── database_admin.env ├── evaluator_common.env ├── evaluator_recalc.env ├── evaluator_upload.env ├── gorun.env ├── kafka.env ├── listener.env ├── local.env ├── manager.env ├── platform.env ├── test.env └── vmaas_sync.env ├── dashboards ├── app-sre │ └── grafana-dashboard-insights-patchman-engine-general.configmap.yaml ├── grafana │ ├── dashboards │ │ └── dashboard.yml │ └── datasources │ │ └── prometheus_ds.yml └── prometheus │ └── prometheus.yml ├── database_admin ├── check-upgraded.sh ├── config.go ├── config.sql ├── entrypoint.sh ├── migrate.go ├── migrations │ ├── 100_create_schema.up.sql │ ├── 101_baseline_timestamps.down.sql │ ├── 101_baseline_timestamps.up.sql │ ├── 102_systems_status_divergent.down.sql │ ├── 102_systems_status_divergent.up.sql │ ├── 103_system_advisory_status.down.sql │ ├── 103_system_advisory_status.up.sql │ ├── 104_applicable_advisories.down.sql │ ├── 104_applicable_advisories.up.sql │ ├── 105_applicable_advisories_count.down.sql │ ├── 105_applicable_advisories_count.up.sql │ ├── 106_alphanumeric_collation.down.sql │ ├── 106_alphanumeric_collation.up.sql │ ├── 107_applicable_advisories.down.sql │ ├── 107_applicable_advisories.up.sql │ ├── 108_add_rhsm-system-profile-bridge.up.sql │ ├── 109_update_status.down.sql │ ├── 109_update_status.up.sql │ ├── 110_package_account_data_applicable.up.sql │ ├── 110_package_account_data_installable.down.sql │ ├── 111_immutable.down.sql │ ├── 111_immutable.up.sql │ ├── 112_advisory_cache_valid.down.sql │ ├── 112_advisory_cache_valid.up.sql │ ├── 113_satellite_managed.down.sql │ ├── 113_satellite_managed.up.sql │ ├── 114_built_pkgcache.down.sql │ ├── 114_built_pkgcache.up.sql │ ├── 115_system_packages2.down.sql │ ├── 115_system_packages2.up.sql │ ├── 116_delete_system_package2.down.sql │ ├── 116_delete_system_package2.up.sql │ ├── 117_migrate_system_package2.down.sql │ ├── 117_migrate_system_package2.up.sql │ ├── 118_migrate_system_package2.down.sql │ ├── 118_migrate_system_package2.up.sql │ ├── 119_migrate_system_package2.up.sql │ ├── 120_add_new_yupana_reporters.up.sql │ ├── 121_packages_applicable.down.sql │ ├── 121_packages_applicable.up.sql │ ├── 122_remove_system_package.down.sql │ ├── 122_remove_system_package.up.sql │ ├── 123_yum_updates_unchanged_trigger.up.sql │ ├── 124_templates.down.sql │ ├── 124_templates.up.sql │ ├── 125_yum_checksum.down.sql │ ├── 125_yum_checksum.up.sql │ ├── 126_template_environment.down.sql │ ├── 126_template_environment.up.sql │ ├── 127_template_arch_version.down.sql │ ├── 127_template_arch_version.up.sql │ ├── 128_system_platform_bootc.down.sql │ ├── 128_system_platform_bootc.up.sql │ ├── 129_create_pg_repack.up.sql │ ├── 130_recreate_pg_repack_extension.up.sql │ ├── 131_remove_advisory_old_package_data.up.sql │ ├── 132_on_system_update_no_lock.up.sql │ └── 133_null_advisory_timestamps.up.sql ├── schema │ ├── clear_db.sql │ ├── create_schema.sql │ └── create_users.sql ├── schema_test.go └── update.go ├── deploy └── clowdapp.yaml ├── dev ├── create_inventory_hosts.sql ├── database │ ├── Dockerfile │ ├── custom.conf │ ├── init.sh │ ├── pg_hba.conf │ └── secrets │ │ ├── create_pg_certs.sh │ │ ├── pg.crt │ │ ├── pg.csr │ │ ├── pg.key │ │ ├── pgca.crt │ │ ├── pgca.key │ │ └── pgca.srl ├── grafana │ ├── Dockerfile │ └── extract_dashboard.py ├── kafka │ ├── Dockerfile │ ├── entrypoint.sh │ ├── secrets │ │ ├── broker_creds │ │ ├── ca.crt │ │ ├── create_certs.sh │ │ ├── kafka.broker.keystore.jks │ │ └── kafka.broker.truststore.jks │ ├── setup.sh │ └── upload.json ├── scripts │ ├── advisories_list.sh │ ├── advisory_detail.sh │ ├── advisory_systems.sh │ ├── docker-compose-entrypoint.sh │ ├── ephemeral-port-forward.sh │ ├── identity-system.sh │ ├── identity.sh │ ├── kafka-send-message.sh │ ├── metrics.sh │ ├── platform_delete.sh │ ├── platform_sync.sh │ ├── platform_toggle_upload.sh │ ├── platform_upload.sh │ ├── platform_vmaas_updates.sh │ ├── psql.sh │ ├── send-kafka-msg.go │ ├── system_detail.sh │ ├── system_packages.sh │ ├── systems_applicable.sh │ ├── systems_list.sh │ ├── wait-for-kafka.sh │ └── wait-for-services.sh ├── sonar │ ├── Dockerfile │ ├── docker-compose.yml │ └── run.sh ├── test_data.sql ├── test_generate_data.sql └── version_data.sql ├── docker-compose.prod.yml ├── docker-compose.test.yml ├── docker-compose.yml ├── docs ├── admin │ └── openapi.json ├── docs.go ├── docs_test.go ├── md │ ├── architecture.md │ ├── database.md │ └── graphics │ │ ├── db_diagram.png │ │ ├── icon_gopher.png │ │ ├── icon_postgresql.png │ │ ├── icon_python.png │ │ ├── icon_typescript.png │ │ ├── schema.dot │ │ └── schema.png └── v3 │ └── openapi.json ├── evaluator ├── evaluate.go ├── evaluate_advisories.go ├── evaluate_advisories_test.go ├── evaluate_baseline.go ├── evaluate_baseline_test.go ├── evaluate_packages.go ├── evaluate_packages_test.go ├── evaluate_test.go ├── metrics.go ├── notifications.go ├── notifications_test.go ├── package_cache.go ├── package_cache_test.go ├── remediations.go ├── remediations_test.go ├── status.go ├── status_test.go └── vmaas_cache.go ├── go.mod ├── go.sum ├── listener ├── common_test.go ├── events.go ├── events_test.go ├── listener.go ├── listener_test.go ├── metrics.go ├── rhsm.go ├── rhsm_test.go ├── template_test.go ├── templates.go ├── upload.go └── upload_test.go ├── main.go ├── manager ├── config │ └── config.go ├── controllers │ ├── advisories.go │ ├── advisories_export.go │ ├── advisories_export_test.go │ ├── advisories_test.go │ ├── advisory_detail.go │ ├── advisory_detail_test.go │ ├── advisory_systems.go │ ├── advisory_systems_export.go │ ├── advisory_systems_export_test.go │ ├── advisory_systems_test.go │ ├── common_attributes.go │ ├── common_test.go │ ├── filter.go │ ├── filter_test.go │ ├── package_detail.go │ ├── package_detail_test.go │ ├── package_systems.go │ ├── package_systems_export.go │ ├── package_systems_export_test.go │ ├── package_systems_test.go │ ├── package_versions.go │ ├── package_versions_test.go │ ├── packages.go │ ├── packages_export.go │ ├── packages_export_test.go │ ├── packages_test.go │ ├── paging.go │ ├── paging_test.go │ ├── status.go │ ├── structures.go │ ├── system_advisories.go │ ├── system_advisories_export.go │ ├── system_advisories_export_test.go │ ├── system_advisories_test.go │ ├── system_delete.go │ ├── system_delete_test.go │ ├── system_detail.go │ ├── system_detail_test.go │ ├── system_packages.go │ ├── system_packages_export.go │ ├── system_packages_export_test.go │ ├── system_packages_test.go │ ├── systems.go │ ├── systems_advisories_view.go │ ├── systems_advisories_view_test.go │ ├── systems_auth_test.go │ ├── systems_export.go │ ├── systems_export_test.go │ ├── systems_ids_test.go │ ├── systems_test.go │ ├── systemtags.go │ ├── systemtags_test.go │ ├── template_subscribed_systems_update.go │ ├── template_subscribed_systems_update_test.go │ ├── template_systems.go │ ├── template_systems_delete.go │ ├── template_systems_delete_test.go │ ├── template_systems_export.go │ ├── template_systems_export_test.go │ ├── template_systems_test.go │ ├── template_systems_update.go │ ├── template_systems_update_test.go │ ├── templates.go │ ├── templates_test.go │ ├── test_utils.go │ ├── utils.go │ └── utils_test.go ├── kafka │ ├── kafka.go │ └── kafka_test.go ├── manager.go ├── middlewares │ ├── authentication.go │ ├── authentication_test.go │ ├── db.go │ ├── deprecations.go │ ├── limits.go │ ├── logger.go │ ├── prometheus.go │ ├── rbac.go │ ├── rbac_test.go │ ├── swagger.go │ └── timeout.go ├── models │ └── models_test.go └── routes │ └── routes.go ├── platform ├── candlepin.go ├── inventory.go ├── platform.go ├── rbac.go └── vmaas.go ├── pr_check.sh ├── renovate.json ├── scripts ├── check-caches.sh ├── check-deploy-envs.sh ├── check-dockercomposes.sh ├── check-openapi-docs.sh ├── colorize.sh ├── entrypoint.sh ├── export_local_env.sh ├── feed_db.go ├── generate_docs.sh ├── go_test.sh ├── go_test_db.sh ├── go_test_on_ci.sh ├── grafana-json-to-yaml.sh ├── increment_version.sh ├── re-calc.sh ├── sync.sh └── try_export_clowder_params.sh ├── sonar-project.properties ├── tasks ├── caches │ ├── caches.go │ ├── metrics.go │ ├── refresh_advisory_caches.go │ ├── refresh_advisory_caches_test.go │ ├── refresh_packages_caches.go │ └── refresh_packages_caches_test.go ├── cleaning │ ├── clean_advisory_account_data.go │ ├── clean_unused_data.go │ └── clean_unused_data_test.go ├── common.go ├── config.go ├── repack │ └── repack.go ├── system_culling │ ├── culling.go │ ├── metrics.go │ ├── system_culling.go │ └── system_culling_test.go └── vmaas_sync │ ├── advisory_sync.go │ ├── advisory_sync_test.go │ ├── dbchange.go │ ├── metrics.go │ ├── metrics_cyndi.go │ ├── metrics_cyndi_test.go │ ├── metrics_db.go │ ├── metrics_db_test.go │ ├── metrics_test.go │ ├── package_sync.go │ ├── package_sync_test.go │ ├── repo_based.go │ ├── repo_based_test.go │ ├── repo_sync.go │ ├── send_messages.go │ ├── vmaas_sync.go │ └── vmaas_sync_test.go └── turnpike ├── admin_api.go ├── auth └── turnpike.go └── controllers ├── admin.go ├── database.go └── pprof.go /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Secure Coding Practices Checklist GitHub Link 2 | - https://github.com/RedHatInsights/secure-coding-checklist 3 | 4 | ## Secure Coding Checklist 5 | - [x] Input Validation 6 | - [x] Output Encoding 7 | - [x] Authentication and Password Management 8 | - [x] Session Management 9 | - [x] Access Control 10 | - [x] Cryptographic Practices 11 | - [x] Error Handling and Logging 12 | - [x] Data Protection 13 | - [x] Communication Security 14 | - [x] System Configuration 15 | - [x] Database Security 16 | - [x] File Management 17 | - [x] Memory Management 18 | - [x] General Coding Practices 19 | -------------------------------------------------------------------------------- /.github/workflows/open_api_spec.yml: -------------------------------------------------------------------------------- 1 | name: OpenAPIv3 2 | 3 | on: 4 | push: 5 | pull_request: 6 | types: [ "opened", "synchronize", "reopened" ] 7 | 8 | jobs: 9 | spec_validation: 10 | name: Spec validation 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - uses: actions/checkout@v4 15 | - name: Spec validation with OpenAPIv3 (docs/v3/openapi.json) 16 | run: docker run --rm -v ${PWD}/docs:/docs:Z openapitools/openapi-generator-cli:v6.0.1 validate -i /docs/v3/openapi.json 17 | 18 | gen_client: 19 | name: Generate Python Client 20 | runs-on: ubuntu-latest 21 | env: 22 | working-directory: ./docs 23 | needs: [spec_validation] 24 | 25 | steps: 26 | - uses: actions/checkout@v4 27 | - name: Generate Python client with OpenAPIv3 (docs/v3/openapi.json) 28 | run: docker run --rm -v ${PWD}/docs/v3:/local:Z openapitools/openapi-generator-cli:v6.0.1 generate -i /local/openapi.json -g python -o /local/client 29 | - uses: actions/upload-artifact@v4 30 | with: 31 | name: Python-Client 32 | path: ${{env.working-directory}}/client 33 | -------------------------------------------------------------------------------- /.github/workflows/semantic-release.yml: -------------------------------------------------------------------------------- 1 | name: Semantic release 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4 13 | with: 14 | token: ${{ secrets.VMAAS_BOT_TOKEN }} 15 | - name: increment version, commit, push 16 | run: | 17 | DOC_FILE="docs/v3/openapi.json docs/admin/openapi.json" 18 | CLOWDER_FILE=deploy/clowdapp.yaml 19 | VERSION=$(cat VERSION) 20 | [ "$(git log -1 --pretty=%B)" == "$VERSION" ] && exit 0 21 | RELEASE_TYPE=$(git log -1 | tail -n1) # Check release type (/major, /minor, /patch (default)) 22 | VERSION_NEXT=$(./scripts/increment_version.sh $VERSION $RELEASE_TYPE) 23 | sed -i 's|\("version": "\)[^"]*\("\)$|'"\1$VERSION_NEXT\2|;" $DOC_FILE 24 | echo $VERSION_NEXT >VERSION 25 | git config --global user.name 'semantic-release' 26 | git config --global user.email '' 27 | git commit -am "${VERSION_NEXT}" 28 | git push 29 | git tag ${VERSION_NEXT} 30 | git push origin ${VERSION_NEXT} 31 | -------------------------------------------------------------------------------- /.github/workflows/unittests.yml: -------------------------------------------------------------------------------- 1 | 2 | name: 🕵️ Test suite 3 | 4 | on: 5 | push: 6 | pull_request: 7 | types: [ "opened", "synchronize", "reopened" ] 8 | 9 | jobs: 10 | unit_tests: 11 | name: Unit Tests 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: Run unit tests 17 | run: docker compose -f docker-compose.test.yml up --build --exit-code-from test 18 | - uses: codecov/codecov-action@v4 19 | with: 20 | files: ./coverage.txt 21 | flags: unittests 22 | name: codecov-umbrella 23 | verbose: true 24 | env: 25 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | **/__pycache__ 3 | .venv 4 | .vscode 5 | coverage.txt 6 | vendor 7 | docs/swagger* 8 | -------------------------------------------------------------------------------- /.snyk: -------------------------------------------------------------------------------- 1 | # Snyk (https://snyk.io) policy file, patches or ignores known vulnerabilities. 2 | version: v1.25.0 3 | ignore: 4 | SNYK-GOLANG-GITHUBCOMJACKCPGPROTO3-6371506: 5 | - "*": 6 | reason: Not a runtime dependency 7 | SNYK-GOLANG-GITHUBCOMMATTNGOSQLITE3-6139875: 8 | - "*": 9 | reason: Not a runtime dependency 10 | SNYK-GOLANG-GITHUBCOMDOCKERDOCKERLIBNETWORK-6474507: 11 | - "*": 12 | reason: Not a runtime dependency 13 | -------------------------------------------------------------------------------- /.vscode/settings.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "go.testEnvFile": "${workspaceFolder}/conf/local.env", 3 | "go.testEnvVars": { 4 | "ACG_CONFIG": "${workspaceFolder}/conf/cdappconfig.json", 5 | "DB_SSLROOTCERT": "${workspaceFolder}/dev/database/secrets/pgca.crt", 6 | "KAFKA_SSL_CERT": "${workspaceFolder}/dev/kafka/secrets/ca.crt" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /COMMITMENT: -------------------------------------------------------------------------------- 1 | GPL Cooperation Commitment 2 | Version 1.0 3 | 4 | Before filing or continuing to prosecute any legal proceeding or claim 5 | (other than a Defensive Action) arising from termination of a Covered 6 | License, we commit to extend to the person or entity ('you') accused 7 | of violating the Covered License the following provisions regarding 8 | cure and reinstatement, taken from GPL version 3. As used here, the 9 | term 'this License' refers to the specific Covered License being 10 | enforced. 11 | 12 | However, if you cease all violation of this License, then your 13 | license from a particular copyright holder is reinstated (a) 14 | provisionally, unless and until the copyright holder explicitly 15 | and finally terminates your license, and (b) permanently, if the 16 | copyright holder fails to notify you of the violation by some 17 | reasonable means prior to 60 days after the cessation. 18 | 19 | Moreover, your license from a particular copyright holder is 20 | reinstated permanently if the copyright holder notifies you of the 21 | violation by some reasonable means, this is the first time you 22 | have received notice of violation of this License (for any work) 23 | from that copyright holder, and you cure the violation prior to 30 24 | days after your receipt of the notice. 25 | 26 | We intend this Commitment to be irrevocable, and binding and 27 | enforceable against us and assignees of or successors to our 28 | copyrights. 29 | 30 | Definitions 31 | 32 | 'Covered License' means the GNU General Public License, version 2 33 | (GPLv2), the GNU Lesser General Public License, version 2.1 34 | (LGPLv2.1), or the GNU Library General Public License, version 2 35 | (LGPLv2), all as published by the Free Software Foundation. 36 | 37 | 'Defensive Action' means a legal proceeding or claim that We bring 38 | against you in response to a prior proceeding or claim initiated by 39 | you or your affiliate. 40 | 41 | 'We' means each contributor to this repository as of the date of 42 | inclusion of this file, including subsidiaries of a corporate 43 | contributor. 44 | 45 | This work is available under a Creative Commons Attribution-ShareAlike 46 | 4.0 International license (https://creativecommons.org/licenses/by-sa/4.0/). 47 | -------------------------------------------------------------------------------- /Jenkinsfile-smoke.groovy: -------------------------------------------------------------------------------- 1 | @Library("github.com/RedHatInsights/insights-pipeline-lib@v3") _ 2 | 3 | if (env.CHANGE_ID) { 4 | execSmokeTest ( 5 | ocDeployerBuilderPath: "patchman", 6 | ocDeployerComponentPath: "patchman", 7 | ocDeployerServiceSets: "patchman,ingress,inventory,platform-mq,rbac", 8 | iqePlugins: ["iqe-patchman-plugin"], 9 | pytestMarker: "patch_smoke", 10 | ) 11 | } 12 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | v3.7.165 2 | -------------------------------------------------------------------------------- /base/api/client.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "app/base/utils" 5 | "bytes" 6 | "context" 7 | "io" 8 | "net/http" 9 | 10 | "github.com/bytedance/sonic" 11 | "github.com/pkg/errors" 12 | ) 13 | 14 | type Client struct { 15 | HTTPClient *http.Client 16 | Debug bool 17 | DefaultHeaders map[string]string 18 | } 19 | 20 | func (o *Client) Request(ctx *context.Context, method, url string, 21 | requestPtr interface{}, responseOutPtr interface{}) (*http.Response, error) { 22 | body := &bytes.Buffer{} 23 | if requestPtr != nil { 24 | err := sonic.ConfigDefault.NewEncoder(body).Encode(requestPtr) 25 | if err != nil { 26 | return nil, errors.Wrap(err, "JSON encoding failed") 27 | } 28 | } 29 | 30 | httpReq, err := http.NewRequestWithContext(*ctx, method, url, body) 31 | if err != nil { 32 | return nil, errors.Wrap(err, "Request failed") 33 | } 34 | httpReq.Header.Add("Content-Type", "application/json") 35 | addHeaders(httpReq, o.DefaultHeaders) 36 | 37 | httpResp, err := utils.CallAPI(o.HTTPClient, httpReq, o.Debug) 38 | if err != nil { 39 | return httpResp, errors.Wrap(err, "Request failed") 40 | } 41 | 42 | err = sonic.ConfigDefault.NewDecoder(httpResp.Body).Decode(responseOutPtr) 43 | if err != nil { 44 | if errors.Is(err, io.EOF) { 45 | // empty response body 46 | return httpResp, nil 47 | } 48 | return httpResp, errors.Wrap(err, "Response body reading failed") 49 | } 50 | 51 | return httpResp, nil 52 | } 53 | 54 | func addHeaders(request *http.Request, headersMap map[string]string) { 55 | if headersMap == nil { 56 | return 57 | } 58 | for k, v := range headersMap { 59 | request.Header.Add(k, v) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /base/base.go: -------------------------------------------------------------------------------- 1 | package base 2 | 3 | import ( 4 | "app/base/utils" 5 | "context" 6 | "os" 7 | "os/signal" 8 | "strings" 9 | "syscall" 10 | 11 | "github.com/gin-gonic/gin" 12 | ) 13 | 14 | const VMaaSAPIPrefix = "/api/v3" 15 | const RBACApiPrefix = "/api/rbac/v1" 16 | 17 | var Context context.Context 18 | var CancelContext context.CancelFunc 19 | 20 | func init() { 21 | Context, CancelContext = context.WithCancel(context.Background()) 22 | } 23 | 24 | func HandleSignals() { 25 | c := make(chan os.Signal, 1) 26 | signal.Notify(c, syscall.SIGINT, syscall.SIGTERM) 27 | go func() { 28 | <-c 29 | CancelContext() 30 | utils.LogInfo("SIGTERM/SIGINT handled") 31 | }() 32 | } 33 | 34 | func remove(r rune) rune { 35 | if r == 0 { 36 | return -1 37 | } 38 | return r 39 | } 40 | 41 | // Removes characters, which are not accepted by postgresql driver 42 | // in parameter values 43 | func RemoveInvalidChars(s string) string { 44 | return strings.Map(remove, s) 45 | } 46 | 47 | // TryExposeOnMetricsPort Expose app on required port if set 48 | func TryExposeOnMetricsPort(app *gin.Engine) { 49 | metricsPort := utils.CoreCfg.MetricsPort 50 | if metricsPort == -1 { 51 | return // Do not expose extra metrics port if not set 52 | } 53 | err := utils.RunServer(Context, app, metricsPort) 54 | if err != nil { 55 | utils.LogError("err", err.Error()) 56 | panic(err) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /base/core/config.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "app/base/database" 5 | "app/base/metrics" 6 | "app/base/utils" 7 | "testing" 8 | ) 9 | 10 | var ( 11 | DefaultLimit = 20 12 | DefaultOffset = 0 13 | testSetupRan = false 14 | dbWait = utils.PodConfig.GetString("wait_for_db", "empty") 15 | ) 16 | 17 | func ConfigureApp() { 18 | utils.ConfigureLogging() 19 | database.Configure() 20 | metrics.Configure() 21 | database.DBWait(dbWait) 22 | } 23 | 24 | func SetupTestEnvironment() { 25 | utils.SetDefaultEnvOrFail("LOG_LEVEL", "debug") 26 | ConfigureApp() 27 | } 28 | 29 | func SetupTest(t *testing.T) { 30 | if !testSetupRan { 31 | utils.SkipWithoutDB(t) 32 | SetupTestEnvironment() 33 | testSetupRan = true 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /base/core/gintesting.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "app/base/database" 5 | "app/base/utils" 6 | "app/manager/middlewares" 7 | 8 | "github.com/gin-gonic/gin" 9 | ) 10 | 11 | const LatestAPIVersion = 3 12 | 13 | type ContextKV struct { 14 | Key string 15 | Value any 16 | } 17 | 18 | var V3APICtx = ContextKV{Key: utils.KeyApiver, Value: 3} 19 | 20 | func InitRouter(handler gin.HandlerFunc, contextKVs ...ContextKV) *gin.Engine { 21 | return InitRouterWithPath(handler, "/", contextKVs...) 22 | } 23 | 24 | func InitRouterWithParams(handler gin.HandlerFunc, account int, method, path string, 25 | contextKVs ...ContextKV) *gin.Engine { 26 | router := gin.Default() 27 | router.Use(middlewares.RequestResponseLogger()) 28 | router.Use(middlewares.MockAuthenticator(account)) 29 | if database.DB != nil { 30 | router.Use(middlewares.DatabaseWithContext()) 31 | } 32 | router.Use(func(c *gin.Context) { 33 | // set default api version for tests to latest 34 | c.Set(utils.KeyApiver, LatestAPIVersion) 35 | for _, kv := range contextKVs { 36 | c.Set(kv.Key, kv.Value) 37 | } 38 | }) 39 | router.Handle(method, path, handler) 40 | return router 41 | } 42 | 43 | func InitRouterWithPath(handler gin.HandlerFunc, path string, contextKVs ...ContextKV) *gin.Engine { 44 | return InitRouterWithParams(handler, 1, "GET", path, contextKVs...) 45 | } 46 | 47 | func InitRouterWithAccount(handler gin.HandlerFunc, path string, account int, contextKVs ...ContextKV) *gin.Engine { 48 | return InitRouterWithParams(handler, account, "GET", path, contextKVs...) 49 | } 50 | -------------------------------------------------------------------------------- /base/core/probes.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "app/base/database" 5 | "net/http" 6 | 7 | "github.com/gin-gonic/gin" 8 | ) 9 | 10 | func Liveness(c *gin.Context) { 11 | c.JSON(http.StatusOK, "ok") 12 | } 13 | 14 | func Readiness(c *gin.Context) { 15 | sqlDB, err := database.DB.DB() 16 | if err != nil { 17 | c.JSON(http.StatusServiceUnavailable, gin.H{"err": err.Error()}) 18 | return 19 | } 20 | err = sqlDB.Ping() 21 | if err != nil { 22 | c.JSON(http.StatusServiceUnavailable, gin.H{"err": err.Error()}) 23 | return 24 | } 25 | c.JSON(http.StatusOK, "ok") 26 | } 27 | 28 | func InitProbes(app *gin.Engine) { 29 | // public routes - deprecated 30 | app.GET("/liveness", Liveness) 31 | app.GET("/readiness", Readiness) 32 | 33 | // public routes 34 | app.GET("/healthz", Liveness) 35 | app.GET("/livez", Liveness) 36 | app.GET("/readyz", Readiness) 37 | } 38 | -------------------------------------------------------------------------------- /base/core/probes_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "app/base/database" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestLiveness(t *testing.T) { 13 | w := httptest.NewRecorder() 14 | req, _ := http.NewRequest("GET", "/", nil) 15 | InitRouter(Liveness).ServeHTTP(w, req) 16 | assert.Equal(t, http.StatusOK, w.Code) 17 | } 18 | 19 | func TestReadiness(t *testing.T) { 20 | SetupTest(t) 21 | 22 | w := httptest.NewRecorder() 23 | req, _ := http.NewRequest("GET", "/", nil) 24 | InitRouter(Readiness).ServeHTTP(w, req) 25 | assert.Equal(t, http.StatusOK, w.Code) 26 | } 27 | 28 | func TestReadinessFail(t *testing.T) { 29 | SetupTest(t) 30 | 31 | sqlDB, _ := database.DB.DB() 32 | assert.Nil(t, sqlDB.Close()) 33 | w := httptest.NewRecorder() 34 | req, _ := http.NewRequest("GET", "/", nil) 35 | InitRouter(Readiness).ServeHTTP(w, req) 36 | assert.Equal(t, http.StatusServiceUnavailable, w.Code) 37 | } 38 | -------------------------------------------------------------------------------- /base/database/baseline.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "app/base/models" 5 | "app/base/utils" 6 | "time" 7 | 8 | "github.com/bytedance/sonic" 9 | ) 10 | 11 | type BaselineConfig struct { 12 | // Filter applicable advisories (updates) by the latest publish time. 13 | ToTime time.Time `json:"to_time" example:"2022-12-31T12:00:00-04:00"` 14 | } 15 | 16 | func GetBaselineConfig(system *models.SystemPlatform) *BaselineConfig { 17 | if system.BaselineID == nil { 18 | return nil 19 | } 20 | 21 | var jsonb [][]byte 22 | err := DB.Table("baseline"). 23 | Where("id = ? and rh_account_id = ?", system.BaselineID, system.RhAccountID). 24 | Pluck("config", &jsonb).Error 25 | if err != nil { 26 | utils.LogError("baseline_id", system.BaselineID, "err", err.Error(), 27 | "Unable to load baseline config from db") 28 | return nil 29 | } 30 | 31 | var config BaselineConfig 32 | if len(jsonb) > 0 && len(jsonb[0]) == 0 { 33 | utils.LogDebug("baseline_id", system.BaselineID, "Empty baseline config found") 34 | return nil 35 | } 36 | 37 | err = sonic.Unmarshal(jsonb[0], &config) 38 | if err != nil { 39 | utils.LogError("err", err.Error(), "baseline_id", system.BaselineID, 40 | "config", string(jsonb[0]), "Can't parse baseline") 41 | return nil 42 | } 43 | return &config 44 | } 45 | -------------------------------------------------------------------------------- /base/database/baseline_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "app/base/models" 5 | "app/base/utils" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestBaselineConfig(t *testing.T) { 12 | utils.SkipWithoutDB(t) 13 | Configure() 14 | 15 | // system without baseline 16 | system := models.SystemPlatform{ID: 8, RhAccountID: 1, BaselineID: nil} 17 | baselineConfig := GetBaselineConfig(&system) 18 | assert.Nil(t, baselineConfig) 19 | 20 | // system with existing baseline 21 | system = models.SystemPlatform{ID: int64(1), RhAccountID: 1, BaselineID: utils.PtrInt64(1)} 22 | baselineConfig = GetBaselineConfig(&system) 23 | assert.Equal(t, "2010-09-22 00:00:00+00", baselineConfig.ToTime.Format("2006-01-02 15:04:05-07")) 24 | 25 | baselineID := CreateBaselineWithConfig(t, "", nil, nil, nil) 26 | // baseline with empty config 27 | system = models.SystemPlatform{ID: 1, RhAccountID: 1, BaselineID: &baselineID} 28 | baselineConfig = GetBaselineConfig(&system) 29 | assert.Nil(t, baselineConfig) 30 | DeleteBaseline(t, baselineID) 31 | } 32 | -------------------------------------------------------------------------------- /base/database/database.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "gorm.io/gorm" 5 | "gorm.io/gorm/clause" 6 | ) 7 | 8 | // Appends `ON CONFLICT (key...) DO UPDATE SET (fields) to following insert query 9 | func OnConflictUpdate(db *gorm.DB, key string, updateCols ...string) *gorm.DB { 10 | return OnConflictUpdateMulti(db, []string{key}, updateCols...) 11 | } 12 | 13 | // Appends `ON CONFLICT (key...) DO UPDATE SET (fields) to following insert query with multiple key fields 14 | func OnConflictUpdateMulti(db *gorm.DB, keys []string, updateCols ...string) *gorm.DB { 15 | confilctColumns := []clause.Column{} 16 | for _, key := range keys { 17 | confilctColumns = append(confilctColumns, clause.Column{Name: key}) 18 | } 19 | return db.Clauses(clause.OnConflict{ 20 | Columns: confilctColumns, 21 | DoUpdates: clause.AssignmentColumns(updateCols), 22 | }) 23 | } 24 | 25 | type UpExpr struct { 26 | Name string 27 | Expr string 28 | } 29 | 30 | func OnConflictDoUpdateExpr(db *gorm.DB, keys []string, updateExprs ...UpExpr) *gorm.DB { 31 | updateColsValues := make(map[string]interface{}) 32 | for _, v := range updateExprs { 33 | updateColsValues[v.Name] = v.Expr 34 | } 35 | confilctColumns := []clause.Column{} 36 | for _, key := range keys { 37 | confilctColumns = append(confilctColumns, clause.Column{Name: key}) 38 | } 39 | if len(updateColsValues) > 0 { 40 | return db.Clauses(clause.OnConflict{ 41 | Columns: confilctColumns, 42 | DoUpdates: clause.Assignments(updateColsValues), 43 | }) 44 | } 45 | return db 46 | } 47 | -------------------------------------------------------------------------------- /base/database/database_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "app/base" 5 | "app/base/utils" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestOnConflictDoUpdate(t *testing.T) { 12 | utils.SkipWithoutDB(t) 13 | Configure() 14 | 15 | err := DB.AutoMigrate(&TestTable{}) 16 | assert.NoError(t, err) 17 | err = DB.Unscoped().Delete(&TestTable{}, "true").Error 18 | assert.NoError(t, err) 19 | 20 | obj := TestTable{ 21 | Name: "Bla", 22 | Email: "Bla", 23 | } 24 | 25 | assert.Equal(t, nil, OnConflictUpdate(DB, "id", "name", "email").Create(&obj).Error) 26 | 27 | var read TestTable 28 | DB.Find(&read, obj.ID) 29 | 30 | assert.Equal(t, obj.ID, read.ID) 31 | assert.Equal(t, obj.Name, read.Name) 32 | assert.Equal(t, obj.Email, read.Email) 33 | 34 | obj.Name = "" 35 | 36 | assert.Equal(t, nil, OnConflictUpdate(DB, "id", "name", "email").Create(&obj).Error) 37 | 38 | DB.Find(&read, obj.ID) 39 | 40 | assert.Equal(t, obj.ID, read.ID) 41 | assert.Equal(t, obj.Name, read.Name) 42 | assert.Equal(t, obj.Email, read.Email) 43 | } 44 | 45 | func TestCancelContext(t *testing.T) { 46 | utils.SkipWithoutDB(t) 47 | Configure() 48 | 49 | tx := DB.WithContext(base.Context).Begin() 50 | base.CancelContext() 51 | err := tx.Exec("select pg_sleep(1)").Error 52 | assert.NotNil(t, err) 53 | assert.Equal(t, "context canceled", err.Error()) 54 | } 55 | 56 | func TestStatementTimeout(t *testing.T) { 57 | utils.CoreCfg.DBStatementTimeoutMs = 100 58 | utils.SkipWithoutDB(t) 59 | Configure() 60 | 61 | err := DB.Exec("select pg_sleep(10)").Error 62 | assert.NotNil(t, err) 63 | assert.Equal(t, "ERROR: canceling statement due to statement timeout (SQLSTATE 57014)", err.Error()) 64 | } 65 | -------------------------------------------------------------------------------- /base/database/dbtest.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | type TestTable struct { 4 | ID uint `gorm:"primaryKey"` 5 | Name string `gorm:"unique"` 6 | Email string 7 | } 8 | 9 | type TestTableSlice []TestTable 10 | -------------------------------------------------------------------------------- /base/database/setup_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "app/base/utils" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | func TestDBCheck(t *testing.T) { 10 | utils.SkipWithoutDB(t) 11 | Configure() 12 | } 13 | 14 | func TestAdditionalParams(t *testing.T) { 15 | utils.SkipWithoutDB(t) 16 | Configure() 17 | 18 | assert.True(t, len(AdvisoryTypes) == 5) 19 | } 20 | -------------------------------------------------------------------------------- /base/database/utils_test.go: -------------------------------------------------------------------------------- 1 | package database 2 | 3 | import ( 4 | "app/base/utils" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | var ( 11 | // counts of systems from system_platform JOIN inventory.hosts 12 | nGroup1 int64 = 7 13 | nGroup2 int64 = 2 14 | nUngrouped int64 = 7 15 | nAll int64 = 18 16 | ) 17 | 18 | var testCases = []map[int64]map[string]string{ 19 | {nGroup1: {utils.KeyGrouped: `{"[{\"id\":\"inventory-group-1\"}]"}`}}, 20 | {nGroup2: {utils.KeyGrouped: `{"[{\"id\":\"inventory-group-2\"}]"}`}}, 21 | {nGroup1 + nGroup2: {utils.KeyGrouped: `{"[{\"id\":\"inventory-group-1\"}]","[{\"id\":\"inventory-group-2\"}]"}`}}, 22 | {nGroup1 + nUngrouped: { 23 | utils.KeyGrouped: `{"[{\"id\":\"inventory-group-1\"}]"}`, 24 | utils.KeyUngrouped: "[]", 25 | }}, 26 | {nUngrouped: { 27 | utils.KeyGrouped: `{"[{\"id\":\"non-existing-group\"}]"}`, 28 | utils.KeyUngrouped: "[]", 29 | }}, 30 | {0: {utils.KeyGrouped: `{"[{\"id\":\"non-existing-group\"}]"}`}}, 31 | {nUngrouped: {utils.KeyUngrouped: "[]"}}, 32 | {nAll: {}}, 33 | {nAll: nil}, 34 | } 35 | 36 | func TestInventoryHostsJoin(t *testing.T) { 37 | utils.SkipWithoutDB(t) 38 | Configure() 39 | 40 | for _, tc := range testCases { 41 | for expectedCount, groups := range tc { 42 | var count int64 43 | InventoryHostsJoin(DB.Table("system_platform sp"), groups).Count(&count) 44 | assert.Equal(t, expectedCount, count) 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /base/deprecations/deprecations.go: -------------------------------------------------------------------------------- 1 | package deprecations 2 | 3 | import ( 4 | "app/base/utils" 5 | "time" 6 | 7 | "github.com/gin-gonic/gin" 8 | ) 9 | 10 | // Deprecate maximum `limit` 11 | func DeprecateLimit() Deprecation { 12 | return limitDeprecation{ 13 | deprecationTimestamp: time.Date(2024, 3, 1, 0, 0, 0, 0, time.UTC), 14 | message: "limit must be in [1, 100]", 15 | shouldDeprecate: func(c *gin.Context) bool { 16 | limit, err := utils.LoadParamInt(c, "limit", 20, true) 17 | if err == nil && (limit < 1 || limit > 100) { 18 | return true 19 | } 20 | return false 21 | }, 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /base/errors.go: -------------------------------------------------------------------------------- 1 | package base 2 | 3 | import ( 4 | stdErrors "errors" 5 | 6 | "github.com/pkg/errors" 7 | ) 8 | 9 | var ( 10 | ErrDatabase = errors.New("database error") 11 | ErrKafka = errors.New("kafka error") 12 | ErrBadRequest = errors.New("bad request") 13 | ErrNotFound = errors.New("not found") 14 | ErrFatal = errors.New("fatal error restarting pod") 15 | ) 16 | 17 | func WrapFatalError(err error, message string) error { 18 | return wrapErrors(err, message) 19 | } 20 | 21 | func WrapFatalDBError(err error, message string) error { 22 | return wrapErrors(err, message, ErrFatal, ErrDatabase) 23 | } 24 | 25 | func WrapFatalKafkaError(err error, message string) error { 26 | return wrapErrors(err, message, ErrFatal, ErrKafka) 27 | } 28 | 29 | func wrapErrors(err error, message string, errs ...error) error { 30 | if err == nil { 31 | return nil 32 | } 33 | errsJoined := stdErrors.Join(errs...) 34 | err = stdErrors.Join(errsJoined, err) 35 | return errors.Wrap(err, message) 36 | } 37 | -------------------------------------------------------------------------------- /base/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "app/base/mqueue" 5 | "app/base/utils" 6 | "os" 7 | "strings" 8 | 9 | "github.com/prometheus/client_golang/prometheus" 10 | ) 11 | 12 | var ( 13 | KafkaConnectionErrorCnt = prometheus.NewCounterVec(prometheus.CounterOpts{ 14 | Help: "Counter vector measuring Kafka connection issues when trying to read or write a message", 15 | Namespace: "patchman_engine", 16 | Subsystem: "core", 17 | Name: "kafka_connection_errors", 18 | }, []string{"type"}) 19 | 20 | EngineVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 21 | Help: "Patchman project deployment information", 22 | Namespace: "patchman_engine", 23 | Subsystem: "core", 24 | Name: "info", 25 | }, []string{"version"}) 26 | ) 27 | 28 | func init() { 29 | if utils.CoreCfg.KafkaAddress != "" { 30 | prometheus.MustRegister(KafkaConnectionErrorCnt) 31 | } 32 | prometheus.MustRegister(EngineVersion) 33 | engineVersion, _ := os.ReadFile("VERSION") 34 | EngineVersion.WithLabelValues(strings.TrimSuffix(string(engineVersion), "\n")).Set(1) 35 | } 36 | 37 | func Configure() { 38 | if utils.CoreCfg.KafkaAddress != "" { 39 | mqueue.SetKafkaErrorReadCnt(KafkaConnectionErrorCnt.WithLabelValues("read")) 40 | mqueue.SetKafkaErrorWriteCnt(KafkaConnectionErrorCnt.WithLabelValues("write")) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /base/mqueue/event.go: -------------------------------------------------------------------------------- 1 | package mqueue 2 | 3 | import ( 4 | "app/base/utils" 5 | "time" 6 | 7 | "github.com/bytedance/sonic" 8 | "github.com/lestrrat-go/backoff/v2" 9 | "golang.org/x/net/context" 10 | ) 11 | 12 | var BatchSize = utils.PodConfig.GetInt("msg_batch_size", 4000) 13 | 14 | var policy = backoff.Exponential( 15 | backoff.WithMinInterval(time.Second), 16 | backoff.WithMaxRetries(5), 17 | ) 18 | 19 | type EventHandler func(message PlatformEvent) error 20 | 21 | type MessageData interface { 22 | WriteEvents(ctx context.Context, w Writer) error 23 | } 24 | 25 | // Performs parsing of kafka message, and then dispatches this message into provided functions 26 | func MakeMessageHandler(eventHandler EventHandler) MessageHandler { 27 | return func(m KafkaMessage) error { 28 | var event PlatformEvent 29 | err := sonic.Unmarshal(m.Value, &event) 30 | // Not a fatal error, invalid data format, log and skip 31 | if err != nil { 32 | utils.LogError("err", err.Error(), "Could not deserialize platform event") 33 | return nil 34 | } 35 | return eventHandler(event) 36 | } 37 | } 38 | 39 | func SendMessages(ctx context.Context, w Writer, data MessageData) error { 40 | return data.WriteEvents(ctx, w) 41 | } 42 | -------------------------------------------------------------------------------- /base/mqueue/message.go: -------------------------------------------------------------------------------- 1 | package mqueue 2 | 3 | import "github.com/bytedance/sonic" 4 | 5 | func MessageFromJSON(k string, v interface{}) (KafkaMessage, error) { 6 | var m KafkaMessage 7 | var err error 8 | 9 | m.Key = []byte(k) 10 | m.Value, err = sonic.Marshal(v) 11 | return m, err 12 | } 13 | -------------------------------------------------------------------------------- /base/mqueue/metrics.go: -------------------------------------------------------------------------------- 1 | package mqueue 2 | 3 | type Counter interface { 4 | Inc() 5 | } 6 | 7 | var ( 8 | kafkaErrorReadCnt Counter = &emptyCnt{} 9 | kafkaErrorWriteCnt Counter = &emptyCnt{} 10 | ) 11 | 12 | func SetKafkaErrorReadCnt(cnt Counter) { 13 | kafkaErrorReadCnt = cnt 14 | } 15 | 16 | func SetKafkaErrorWriteCnt(cnt Counter) { 17 | kafkaErrorWriteCnt = cnt 18 | } 19 | 20 | type emptyCnt struct{} 21 | 22 | func (t *emptyCnt) Inc() {} 23 | -------------------------------------------------------------------------------- /base/mqueue/mqueue_test.go: -------------------------------------------------------------------------------- 1 | package mqueue 2 | 3 | import ( 4 | "app/base/utils" 5 | "context" 6 | "errors" 7 | "sync" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | const id = "99c0ffee-0000-0000-0000-0000c0ffee99" 14 | const someid = "99c0ffee-0000-0000-0000-0000000050de" 15 | 16 | var msg = KafkaMessage{Value: []byte(`{"id": "` + id + `", "type": "delete"}`)} 17 | 18 | func TestParseEvents(t *testing.T) { 19 | reached := false 20 | 21 | err := MakeMessageHandler(func(event PlatformEvent) error { 22 | assert.Equal(t, event.ID, id) 23 | assert.Equal(t, *event.Type, "delete") 24 | reached = true 25 | return nil 26 | })(msg) 27 | 28 | assert.True(t, reached, "Event handler should have been called") 29 | assert.NoError(t, err) 30 | } 31 | 32 | func TestRoundTripKafkaGo(t *testing.T) { 33 | utils.SkipWithoutPlatform(t) 34 | reader := NewKafkaReaderFromEnv("test") 35 | 36 | var eventOut PlatformEvent 37 | go reader.HandleMessages(MakeMessageHandler(func(event PlatformEvent) error { 38 | eventOut = event 39 | return nil 40 | })) 41 | 42 | writer := NewKafkaWriterFromEnv("test") 43 | eventIn := PlatformEvent{ID: someid} 44 | assert.NoError(t, writePlatformEvents(context.Background(), writer, eventIn)) 45 | utils.AssertEqualWait(t, 10, func() (exp, act interface{}) { 46 | return eventIn.ID, eventOut.ID 47 | }) 48 | } 49 | 50 | func TestSpawnReader(t *testing.T) { 51 | var nReaders int32 52 | wg := sync.WaitGroup{} 53 | SpawnReader(&wg, "", CreateCountedMockReader(&nReaders), 54 | MakeMessageHandler(func(_ PlatformEvent) error { return nil })) 55 | wg.Wait() 56 | assert.Equal(t, 1, int(nReaders)) 57 | } 58 | 59 | func TestRetry(t *testing.T) { 60 | i := 0 61 | handler := func(_ PlatformEvent) error { 62 | i++ 63 | if i < 2 { 64 | return errors.New("Failed") 65 | } 66 | return nil 67 | } 68 | 69 | // Without retry handler should fail 70 | assert.Error(t, MakeMessageHandler(handler)(msg)) 71 | 72 | // With retry we handler should eventually succeed 73 | assert.NoError(t, MakeRetryingHandler(MakeMessageHandler(handler))(msg)) 74 | } 75 | -------------------------------------------------------------------------------- /base/mqueue/payload_tracker_event.go: -------------------------------------------------------------------------------- 1 | package mqueue 2 | 3 | import ( 4 | "app/base/types" 5 | "app/base/utils" 6 | "time" 7 | 8 | "github.com/bytedance/sonic" 9 | "github.com/pkg/errors" 10 | "golang.org/x/net/context" 11 | ) 12 | 13 | type PayloadTrackerEvent struct { 14 | Service string `json:"service"` 15 | OrgID *string `json:"org_id,omitempty"` 16 | RequestID *string `json:"request_id"` 17 | InventoryID string `json:"inventory_id"` 18 | Status string `json:"status"` 19 | StatusMsg string `json:"status_msg,omitempty"` 20 | Date *types.Rfc3339TimestampWithZ `json:"date"` 21 | } 22 | 23 | type PayloadTrackerEvents []PayloadTrackerEvent 24 | 25 | var enablePayloadTracker = utils.PodConfig.GetBool("payload_tracker", true) 26 | 27 | func (event *PayloadTrackerEvent) write(ctx context.Context, w Writer) error { 28 | data, err := sonic.Marshal(event) 29 | if err != nil { 30 | return errors.Wrap(err, "Serializing event") 31 | } 32 | msg := KafkaMessage{Value: data} 33 | return w.WriteMessages(ctx, msg) 34 | } 35 | 36 | func writeEvent(ctx context.Context, w Writer, event *PayloadTrackerEvent, 37 | timestamp *types.Rfc3339TimestampWithZ) (err error) { 38 | if event.RequestID != nil && event.OrgID != nil { 39 | // Send only messages from listener and evaluator-upload 40 | event.Service = "patchman" 41 | event.Date = timestamp 42 | err = event.write(ctx, w) 43 | } 44 | return err 45 | } 46 | 47 | func (events PayloadTrackerEvents) WriteEvents(ctx context.Context, w Writer) error { 48 | if !enablePayloadTracker { 49 | return nil 50 | } 51 | var err error 52 | now := types.Rfc3339TimestampWithZ(time.Now()) 53 | for i := range events { 54 | err = writeEvent(ctx, w, &events[i], &now) 55 | } 56 | return err 57 | } 58 | 59 | func (event *PayloadTrackerEvent) WriteEvents(ctx context.Context, w Writer) error { 60 | if !enablePayloadTracker { 61 | return nil 62 | } 63 | now := types.Rfc3339TimestampWithZ(time.Now()) 64 | err := writeEvent(ctx, w, event, &now) 65 | return err 66 | } 67 | -------------------------------------------------------------------------------- /base/mqueue/platform_event_test.go: -------------------------------------------------------------------------------- 1 | package mqueue 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/bytedance/sonic" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestWriteEventsOfInventoryAccounts(t *testing.T) { 12 | const ( 13 | acc = 1 14 | inv2 = "00000000-0000-0000-0000-000000000002" 15 | inv3 = "00000000-0000-0000-0000-000000000003" 16 | ) 17 | 18 | var writer Writer = &MockKafkaWriter{} 19 | 20 | orgID := "org_1" 21 | var invs EvalDataSlice = []EvalData{ 22 | {InventoryID: inv2, RhAccountID: acc, OrgID: &orgID}, 23 | {InventoryID: inv3, RhAccountID: acc, OrgID: &orgID}} 24 | 25 | assert.Nil(t, SendMessages(context.Background(), writer, &invs)) 26 | 27 | mockWriter := writer.(*MockKafkaWriter) 28 | assert.True(t, len(mockWriter.Messages) > 0) 29 | 30 | var event PlatformEvent 31 | assert.Nil(t, sonic.Unmarshal(mockWriter.Messages[0].Value, &event)) 32 | assert.Equal(t, orgID, event.GetOrgID()) 33 | assert.Equal(t, acc, event.AccountID) 34 | assert.True(t, len(event.SystemIDs) == 2) 35 | assert.Equal(t, inv2, event.SystemIDs[0]) 36 | assert.Equal(t, inv3, event.SystemIDs[1]) 37 | } 38 | -------------------------------------------------------------------------------- /base/mqueue/template_event.go: -------------------------------------------------------------------------------- 1 | package mqueue 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // nolint:lll 8 | // copied from https://github.com/content-services/content-sources-backend/blob/6fe3dd9409cfc048eefb07d60d31574da2a47217/pkg/api/templates.go#L20-L30 9 | // importing "github.com/content-services/content-sources-backend/pkg/api" 10 | // adds too many dependencies and some are incompatible 11 | type TemplateResponse struct { 12 | UUID string `json:"uuid" readonly:"true"` 13 | Name string `json:"name"` // Name of the template 14 | OrgID string `json:"org_id"` // Organization ID of the owner 15 | Description *string `json:"description"` // Description of the template 16 | Arch string `json:"arch"` // Architecture of the template 17 | Version string `json:"version"` // Version of the template 18 | Date time.Time `json:"date"` // Latest date to include snapshots for 19 | RepositoryUUIDS []string `json:"repository_uuids"` // Repositories added to the template 20 | EnvironmentID string `json:"client_environment_id"` // Environment ID used by subscription-manager & candlepin 21 | } 22 | 23 | type TemplateEvent struct { 24 | ID string `json:"id"` 25 | Type string `json:"type"` 26 | Source string `json:"source"` 27 | Subject string `json:"subject"` 28 | Time time.Time `json:"time"` 29 | OrgID string `json:"redhatorgid"` 30 | Data []TemplateResponse `json:"data"` 31 | } 32 | -------------------------------------------------------------------------------- /base/mqueue/testing.go: -------------------------------------------------------------------------------- 1 | package mqueue 2 | 3 | import ( 4 | "context" 5 | "sync/atomic" 6 | ) 7 | 8 | type mockReader struct{} 9 | 10 | func (t *mockReader) HandleMessages(_ MessageHandler) {} 11 | func (t *mockReader) Close() error { return nil } 12 | 13 | // Count how many times reader is created. 14 | func CreateCountedMockReader(cnt *int32) CreateReader { 15 | return func(_ string) Reader { 16 | reader := &mockReader{} 17 | atomic.AddInt32(cnt, 1) 18 | return reader 19 | } 20 | } 21 | 22 | type MockKafkaWriter struct { 23 | Messages []KafkaMessage 24 | } 25 | 26 | func (t *MockKafkaWriter) WriteMessages(_ context.Context, ev ...KafkaMessage) error { 27 | t.Messages = append(t.Messages, ev...) 28 | return nil 29 | } 30 | 31 | func MockCreateKafkaWriter(writer Writer) CreateWriter { 32 | return func(_ string) Writer { 33 | return writer 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /base/rbac/rbac.go: -------------------------------------------------------------------------------- 1 | package rbac 2 | 3 | import "github.com/bytedance/sonic" 4 | 5 | type AccessPagination struct { 6 | Data []Access `json:"data"` 7 | } 8 | 9 | type Access struct { 10 | Permission string `json:"permission"` 11 | ResourceDefinitions []ResourceDefinition `json:"resourceDefinitions"` 12 | } 13 | 14 | type ResourceDefinition struct { 15 | AttributeFilter AttributeFilter `json:"attributeFilter,omitempty"` 16 | } 17 | 18 | type AttributeFilterValue []*string 19 | 20 | type AttributeFilter struct { 21 | Key string `json:"key"` 22 | Value AttributeFilterValue `json:"value"` 23 | Operation string `json:"operation"` 24 | } 25 | 26 | type inventoryGroup struct { 27 | ID *string `json:"id,omitempty"` 28 | Name *string `json:"name,omitempty"` 29 | } 30 | 31 | type InventoryGroup []inventoryGroup 32 | 33 | func (a *AttributeFilterValue) UnmarshalJSON(data []byte) error { 34 | var ( 35 | array []*string 36 | value *string 37 | err error 38 | ) 39 | 40 | if err = sonic.Unmarshal(data, &array); err != nil { 41 | // parsing of AttributeFilter Value into []*string failed 42 | // try to parse it as *string 43 | if err = sonic.Unmarshal(data, &value); err != nil { 44 | // fail, the value is neither []*string nor *string 45 | return err 46 | } 47 | if value != nil { 48 | // according to RBAC team, value is a single string value 49 | // not comma delimited strings, multiple values are always in array 50 | array = append(array, value) 51 | } 52 | } 53 | if array == nil && value == nil { 54 | // in this case we got `"value": null` 55 | // we should apply the permission to systems with no inventory groups 56 | array = append(array, value) 57 | } 58 | 59 | *a = array 60 | return nil 61 | } 62 | -------------------------------------------------------------------------------- /base/rbac/rbac_test.go: -------------------------------------------------------------------------------- 1 | package rbac 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/bytedance/sonic" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | var data = []byte(` 11 | { 12 | "resourceDefinitions": [ 13 | {"attributeFilter": { 14 | "key": "single_string", 15 | "operation": "equal", 16 | "value": "string" 17 | }}, 18 | {"attributeFilter": { 19 | "key": "comma_separated", 20 | "operation": "equal", 21 | "value": "comma,separated" 22 | }}, 23 | {"attributeFilter": { 24 | "key": "null", 25 | "operation": "equal", 26 | "value": null 27 | }}, 28 | {"attributeFilter": { 29 | "key": "string_array", 30 | "operation": "in", 31 | "value": ["string", "array"] 32 | }}, 33 | {"attributeFilter": { 34 | "key": "string_array_with_null", 35 | "operation": "in", 36 | "value": ["string", "array", null] 37 | }}, 38 | {"attributeFilter": { 39 | "key": "null_array", 40 | "operation": "in", 41 | "value": [null] 42 | }}, 43 | {"attributeFilter": { 44 | "key": "empty_array", 45 | "operation": "in", 46 | "value": [] 47 | }} 48 | ] 49 | } 50 | `) 51 | 52 | func TestParsing(t *testing.T) { 53 | stringS := "string" 54 | commaS := "comma,separated" 55 | arrayS := "array" 56 | 57 | expected := []ResourceDefinition{ 58 | {AttributeFilter: AttributeFilter{Operation: "equal", Key: "single_string", Value: []*string{&stringS}}}, 59 | {AttributeFilter: AttributeFilter{Operation: "equal", Key: "comma_separated", Value: []*string{&commaS}}}, 60 | {AttributeFilter: AttributeFilter{Operation: "equal", Key: "null", Value: []*string{nil}}}, 61 | {AttributeFilter: AttributeFilter{Operation: "in", Key: "string_array", Value: []*string{&stringS, &arrayS}}}, 62 | {AttributeFilter: AttributeFilter{Operation: "in", Key: "string_array_with_null", 63 | Value: []*string{&stringS, &arrayS, nil}}}, 64 | {AttributeFilter: AttributeFilter{Operation: "in", Key: "null_array", Value: []*string{nil}}}, 65 | {AttributeFilter: AttributeFilter{Operation: "in", Key: "empty_array", Value: []*string{}}}, 66 | } 67 | 68 | var v Access 69 | err := sonic.Unmarshal(data, &v) 70 | if assert.NoError(t, err) { 71 | assert.Equal(t, expected, v.ResourceDefinitions) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /base/types/timestamp.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/bytedance/sonic" 7 | ) 8 | 9 | // Go datetime parser does not like slightly incorrect RFC 3339 which we are using (missing Z ) 10 | const Rfc3339NoTz = "2006-01-02T15:04:05-07:00" 11 | 12 | // timestamp format coming from vmaas /dbchange 13 | const Rfc3339NoT = "2006-01-02 15:04:05.000000+00" 14 | 15 | type Rfc3339Timestamp time.Time 16 | type Rfc3339TimestampWithZ time.Time 17 | type Rfc3339TimestampNoT time.Time 18 | 19 | func unmarshalTimestamp(data []byte, format string) (time.Time, error) { 20 | var jd string 21 | var err error 22 | var t time.Time 23 | if err = sonic.Unmarshal(data, &jd); err != nil { 24 | return t, err 25 | } 26 | t, err = time.Parse(format, jd) 27 | return t, err 28 | } 29 | 30 | func (d Rfc3339Timestamp) MarshalJSON() ([]byte, error) { 31 | return sonic.Marshal(d.Time().Format(Rfc3339NoTz)) 32 | } 33 | 34 | func (d *Rfc3339Timestamp) UnmarshalJSON(data []byte) error { 35 | t, err := unmarshalTimestamp(data, Rfc3339NoTz) 36 | if err != nil { 37 | // parsing failed, try to parse timestamp without T 38 | t, err = unmarshalTimestamp(data, Rfc3339NoT) 39 | if err != nil { 40 | // parsing failed, try to parse timestamp with Z 41 | t, err = unmarshalTimestamp(data, time.RFC3339) 42 | } 43 | } 44 | *d = Rfc3339Timestamp(t) 45 | return err 46 | } 47 | 48 | func (d *Rfc3339Timestamp) Time() *time.Time { 49 | if d == nil { 50 | return nil 51 | } 52 | return (*time.Time)(d) 53 | } 54 | 55 | func (d Rfc3339TimestampWithZ) MarshalJSON() ([]byte, error) { 56 | return sonic.Marshal(d.Time().Format(time.RFC3339)) 57 | } 58 | 59 | func (d *Rfc3339TimestampWithZ) UnmarshalJSON(data []byte) error { 60 | t, err := unmarshalTimestamp(data, time.RFC3339) 61 | *d = Rfc3339TimestampWithZ(t) 62 | return err 63 | } 64 | 65 | func (d *Rfc3339TimestampWithZ) Time() *time.Time { 66 | if d == nil { 67 | return nil 68 | } 69 | return (*time.Time)(d) 70 | } 71 | 72 | func (d *Rfc3339TimestampNoT) Time() *time.Time { 73 | if d == nil { 74 | return nil 75 | } 76 | return (*time.Time)(d) 77 | } 78 | -------------------------------------------------------------------------------- /base/utils/awscloudwatch.go: -------------------------------------------------------------------------------- 1 | // nolint:lll 2 | // inspired by: https://github.com/RedHatInsights/insights-ingress-go/blob/3ea33a8d793c2154f7cfa12057ca005c5f6031fa/logger/logger.go 3 | // 4 | // https://github.com/kdar/logrus-cloudwatchlogs 5 | package utils 6 | 7 | import ( 8 | "os" 9 | "time" 10 | 11 | "github.com/aws/aws-sdk-go/aws" 12 | "github.com/aws/aws-sdk-go/aws/credentials" 13 | lc "github.com/redhatinsights/platform-go-middlewares/logging/cloudwatch" 14 | log "github.com/sirupsen/logrus" 15 | ) 16 | 17 | var hook *lc.Hook 18 | 19 | // Try to init CloudWatch logging 20 | func trySetupCloudWatchLogging() { 21 | key := CoreCfg.CloudWatchAccessKeyID 22 | if key == "" { 23 | LogInfo("config for aws CloudWatch not loaded") 24 | return 25 | } 26 | 27 | secret := FailIfEmpty(CoreCfg.CloudWatchSecretAccesskey, "CW_AWS_SECRET_ACCESS_KEY") 28 | region := CoreCfg.CloudWatchRegion 29 | group := CoreCfg.CloudWatchLogGroup 30 | 31 | hostname, err := os.Hostname() 32 | if err != nil { 33 | LogError("err", err.Error(), "unable to get hostname to set CloudWatch log_stream") 34 | return 35 | } 36 | 37 | log.SetFormatter(&log.JSONFormatter{ 38 | TimestampFormat: "2006-01-02T15:04:05.999Z", 39 | FieldMap: log.FieldMap{ 40 | log.FieldKeyTime: "@timestamp", 41 | log.FieldKeyLevel: "level", 42 | log.FieldKeyMsg: "message", 43 | }, 44 | }) 45 | 46 | cred := credentials.NewStaticCredentials(key, secret, "") 47 | awsconf := aws.NewConfig().WithRegion(region).WithCredentials(cred) 48 | hook, err = lc.NewBatchingHook(group, hostname, awsconf, 10*time.Second) 49 | if err != nil { 50 | LogError("err", err.Error(), "unable to setup CloudWatch logging") 51 | return 52 | } 53 | log.AddHook(hook) 54 | log.Info("CloudWatch logging configured") 55 | } 56 | 57 | func FlushLogs() { 58 | if hook != nil { 59 | _ = hook.Flush() 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /base/utils/config_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | const myTestEnv = "MY_TEST_CONFIG" 11 | 12 | func TestPodConfig(t *testing.T) { 13 | os.Setenv(myTestEnv, "bool_val1=true;bool_val2=false;no_val;int_val=123;int64_val=17179869184;str_val=a b c") 14 | pc := ReadPodConfig(myTestEnv) 15 | assert.True(t, pc.GetBool("bool_val1", false)) 16 | assert.False(t, pc.GetBool("bool_val2", true)) 17 | assert.True(t, pc.GetBool("no_val", false)) 18 | assert.Equal(t, 123, pc.GetInt("int_val", 999)) 19 | assert.Equal(t, int64(17179869184), pc.GetInt64("int64_val", 999)) 20 | assert.Equal(t, "a b c", pc.GetString("str_val", "nope")) 21 | 22 | // check defaults 23 | assert.True(t, pc.GetBool("undefined", true)) 24 | assert.Equal(t, 1, pc.GetInt("undefined", 1)) 25 | assert.Equal(t, int64(1), pc.GetInt64("undefined", 1)) 26 | assert.Equal(t, "nope", pc.GetString("undefined", "nope")) 27 | } 28 | -------------------------------------------------------------------------------- /base/utils/core_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | func TestRecoverAndLogPanics(t *testing.T) { 10 | ConfigureLogging() 11 | 12 | logHook := NewTestLogHook() 13 | log.AddHook(logHook) 14 | 15 | func() { 16 | defer LogPanics(false) 17 | panic("We crashed") 18 | }() 19 | 20 | assert.Equal(t, 1, len(logHook.LogEntries)) 21 | } 22 | -------------------------------------------------------------------------------- /base/utils/gin_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "github.com/gin-gonic/gin" 6 | log "github.com/sirupsen/logrus" 7 | "github.com/stretchr/testify/assert" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func TestRunServer(t *testing.T) { 13 | ConfigureLogging() 14 | 15 | var hook = NewTestLogHook() 16 | log.AddHook(hook) 17 | 18 | ctx, cancel := context.WithCancel(context.Background()) 19 | 20 | go func() { 21 | time.Sleep(time.Millisecond * 100) 22 | cancel() 23 | }() 24 | err := RunServer(ctx, gin.Default(), 8888) 25 | assert.Nil(t, err) 26 | AssertEqualWait(t, 1, func() (exp, act interface{}) { 27 | return 1, len(hook.LogEntries) 28 | }) 29 | assert.Equal(t, "server closed successfully", hook.LogEntries[0].Message) 30 | } 31 | -------------------------------------------------------------------------------- /base/utils/http_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func createFirstlyFailingCallFun() func() (interface{}, *http.Response, error) { 12 | i := 0 13 | httpCallFun := func() (interface{}, *http.Response, error) { 14 | if i < 2 { 15 | i++ 16 | return nil, nil, errors.New("testing error") 17 | } 18 | strPtr := "some data" 19 | return &strPtr, nil, nil 20 | } 21 | return httpCallFun 22 | } 23 | 24 | func TestHTTPCallRetrySucc(t *testing.T) { 25 | // nolint:bodyclose 26 | data, err := HTTPCallRetry(createFirstlyFailingCallFun(), false, 0) 27 | dataStrPtr := data.(*string) 28 | assert.Nil(t, err) 29 | assert.Equal(t, "some data", *dataStrPtr) 30 | } 31 | 32 | func TestHTTPCallRetryFail(t *testing.T) { 33 | // nolint:bodyclose 34 | data, err := HTTPCallRetry(createFirstlyFailingCallFun(), false, 1) 35 | assert.NotNil(t, err) 36 | assert.Equal(t, "HTTP retry call failed, attempts: 2", err.Error()) 37 | assert.Nil(t, data) 38 | } 39 | -------------------------------------------------------------------------------- /base/utils/identity.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "encoding/base64" 5 | 6 | "github.com/bytedance/sonic" 7 | "github.com/redhatinsights/platform-go-middlewares/identity" 8 | ) 9 | 10 | func ParseXRHID(identityString string) (*identity.XRHID, error) { 11 | var xrhid identity.XRHID 12 | 13 | decoded, err := base64.StdEncoding.DecodeString(identityString) 14 | if err != nil { 15 | return nil, err 16 | } 17 | err = sonic.Unmarshal(decoded, &xrhid) 18 | if err != nil { 19 | return nil, err 20 | } 21 | return &xrhid, nil 22 | } 23 | -------------------------------------------------------------------------------- /base/utils/identity_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestParseIdentity(t *testing.T) { 10 | str := "eyJlbnRpdGxlbWVudHMiOnsiaW5zaWdodHMiOnsiaXNfZW50aXRsZWQiOnRydWV9LCJjb3N0X21hbmFnZW1lbnQiOnsiaXNfZW50aXRsZWQiOnRydWV9LCJhbnNpYmxlIjp7ImlzX2VudGl0bGVkIjp0cnVlfSwib3BlbnNoaWZ0Ijp7ImlzX2VudGl0bGVkIjp0cnVlfSwic21hcnRfbWFuYWdlbWVudCI6eyJpc19lbnRpdGxlZCI6dHJ1ZX0sIm1pZ3JhdGlvbnMiOnsiaXNfZW50aXRsZWQiOnRydWV9fSwiaWRlbnRpdHkiOnsiaW50ZXJuYWwiOnsiYXV0aF90aW1lIjoyOTksImF1dGhfdHlwZSI6ImJhc2ljLWF1dGgiLCJvcmdfaWQiOiIxMTc4OTc3MiJ9LCJhY2NvdW50X251bWJlciI6IjYwODk3MTkiLCJ1c2VyIjp7ImZpcnN0X25hbWUiOiJJbnNpZ2h0cyIsImlzX2FjdGl2ZSI6dHJ1ZSwiaXNfaW50ZXJuYWwiOmZhbHNlLCJsYXN0X25hbWUiOiJRQSIsImxvY2FsZSI6ImVuX1VTIiwiaXNfb3JnX2FkbWluIjp0cnVlLCJ1c2VybmFtZSI6Imluc2lnaHRzLXFhIiwiZW1haWwiOiJqbmVlZGxlK3FhQHJlZGhhdC5jb20ifSwidHlwZSI6IlVzZXIifX0=" //nolint:lll 11 | xrhid, err := ParseXRHID(str) 12 | 13 | assert.Equal(t, nil, err) 14 | assert.Equal(t, "6089719", xrhid.Identity.AccountNumber) 15 | } 16 | -------------------------------------------------------------------------------- /base/utils/log_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | log "github.com/sirupsen/logrus" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestInitLogging(t *testing.T) { 12 | assert.Nil(t, os.Setenv("LOG_STYLE", "json")) 13 | ConfigureLogging() 14 | 15 | var hook = NewTestLogHook() 16 | log.AddHook(hook) 17 | 18 | LogInfo("num", 1, "str", "text", "info log") 19 | 20 | assert.Equal(t, 1, len(hook.LogEntries)) 21 | entry := hook.LogEntries[0] 22 | assert.Equal(t, 2, len(entry.Data)) 23 | assert.Equal(t, 1, entry.Data["num"]) 24 | assert.Equal(t, "text", entry.Data["str"]) 25 | assert.Equal(t, "info log", entry.Message) 26 | } 27 | 28 | func TestEvenArgs(t *testing.T) { 29 | assert.Nil(t, os.Setenv("LOG_STYLE", "json")) 30 | ConfigureLogging() 31 | 32 | var hook = NewTestLogHook() 33 | log.AddHook(hook) 34 | 35 | LogInfo("num", 1, "str", "text") 36 | 37 | assert.Equal(t, 1, len(hook.LogEntries)) 38 | entry := hook.LogEntries[0] 39 | assert.Equal(t, 2, len(entry.Data)) 40 | assert.Equal(t, 1, entry.Data["num"]) 41 | assert.Equal(t, "text", entry.Data["str"]) 42 | } 43 | -------------------------------------------------------------------------------- /base/utils/metrics.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "time" 6 | ) 7 | 8 | func ObserveSecondsSince(timeStart time.Time, observer prometheus.Observer) { 9 | observer.Observe(time.Since(timeStart).Seconds()) 10 | } 11 | 12 | func ObserveHoursSince(timeStart time.Time, observer prometheus.Observer) { 13 | observer.Observe(time.Since(timeStart).Hours()) 14 | } 15 | -------------------------------------------------------------------------------- /base/utils/openapi.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | func PtrBool(v bool) *bool { return &v } 8 | func PtrInt(v int) *int { return &v } 9 | func PtrInt32(v int32) *int32 { return &v } 10 | func PtrInt64(v int64) *int64 { return &v } 11 | func PtrFloat32(v float32) *float32 { return &v } 12 | func PtrFloat64(v float64) *float64 { return &v } 13 | func PtrString(v string) *string { return &v } 14 | func PtrTime(v time.Time) *time.Time { return &v } 15 | 16 | func PtrSliceString(v []string) *[]string { 17 | return &v 18 | } 19 | 20 | func PtrTimeParse(ts string) *time.Time { 21 | t, _ := time.Parse(time.RFC3339, ts) 22 | return &t 23 | } 24 | 25 | func PtrBoolNil() *bool { 26 | var b *bool 27 | return b 28 | } 29 | 30 | func EmptyToNil(s *string) *string { 31 | if s != nil && *s == "" { 32 | return nil 33 | } 34 | return s 35 | } 36 | -------------------------------------------------------------------------------- /base/utils/testing.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | log "github.com/sirupsen/logrus" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func SkipWithoutDB(t *testing.T) { 12 | if !PodConfig.GetBool("use_testing_db", false) { 13 | t.Skip("testing database not used - skipping") 14 | } 15 | } 16 | 17 | func SkipWithoutPlatform(t *testing.T) { 18 | if CoreCfg.VmaasAddress == "" { 19 | t.Skip("testing platform instance not used - skipping") 20 | } 21 | } 22 | 23 | type TestLogHook struct { 24 | LogEntries []log.Entry 25 | LevelsToStore []log.Level 26 | } 27 | 28 | func (t *TestLogHook) Levels() []log.Level { 29 | return t.LevelsToStore 30 | } 31 | 32 | func (t *TestLogHook) Fire(entry *log.Entry) error { 33 | t.LogEntries = append(t.LogEntries, *entry) 34 | return nil 35 | } 36 | 37 | func NewTestLogHook(levelsToStore ...log.Level) *TestLogHook { 38 | if len(levelsToStore) == 0 { 39 | levelsToStore = []log.Level{log.PanicLevel, log.FatalLevel, log.ErrorLevel, log.WarnLevel, log.InfoLevel, 40 | log.DebugLevel, log.TraceLevel} 41 | } 42 | return &TestLogHook{LevelsToStore: levelsToStore} 43 | } 44 | 45 | func AssertEqualWait(t *testing.T, timeoutSeconds int, values func() (exp, act interface{})) { 46 | var exp, act interface{} 47 | for i := 0; i < timeoutSeconds*10; i++ { 48 | time.Sleep(time.Millisecond * 100) 49 | exp, act = values() 50 | if exp == act { 51 | break 52 | } 53 | } 54 | assert.Equal(t, exp, act) 55 | } 56 | -------------------------------------------------------------------------------- /build_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -exv 4 | 5 | # no space left on jenkins 6 | export TMPDIR=/var/lib/jenkins 7 | 8 | IMAGE="quay.io/cloudservices/patchman-engine-app" 9 | IMAGE_TAG=$(git rev-parse --short=7 HEAD) 10 | IMAGE_VERSION=$(git tag --points-at $IMAGE_TAG) 11 | SECURITY_COMPLIANCE_TAG="sc-$(date +%Y%m%d)-$(git rev-parse --short=7 HEAD)" 12 | 13 | if [[ -z "$QUAY_USER" || -z "$QUAY_TOKEN" ]]; then 14 | echo "QUAY_USER and QUAY_TOKEN must be set" 15 | exit 1 16 | fi 17 | 18 | if [[ -z "$RH_REGISTRY_USER" || -z "$RH_REGISTRY_TOKEN" ]]; then 19 | echo "RH_REGISTRY_USER and RH_REGISTRY_TOKEN must be set" 20 | exit 1 21 | fi 22 | 23 | # Create tmp dir to store data in during job run (do NOT store in $WORKSPACE) 24 | export TMP_JOB_DIR=$(mktemp -d -p "$HOME" -t "jenkins-${JOB_NAME}-${BUILD_NUMBER}-XXXXXX") 25 | echo "job tmp dir location: $TMP_JOB_DIR" 26 | 27 | function job_cleanup() { 28 | echo "cleaning up job tmp dir: $TMP_JOB_DIR" 29 | rm -fr $TMP_JOB_DIR 30 | } 31 | 32 | trap job_cleanup EXIT ERR SIGINT SIGTERM 33 | 34 | AUTH_CONF_DIR="$TMP_JOB_DIR/.podman" 35 | mkdir -p $AUTH_CONF_DIR 36 | export REGISTRY_AUTH_FILE="$AUTH_CONF_DIR/auth.json" 37 | podman login -u="$QUAY_USER" -p="$QUAY_TOKEN" quay.io 38 | podman login -u="$RH_REGISTRY_USER" -p="$RH_REGISTRY_TOKEN" registry.redhat.io 39 | podman build -f Dockerfile -t "${IMAGE}:${IMAGE_TAG}" . 40 | 41 | if [[ "$GIT_BRANCH" == "origin/security-compliance" ]]; then 42 | podman tag "${IMAGE}:${IMAGE_TAG}" "${IMAGE}:${SECURITY_COMPLIANCE_TAG}" 43 | podman push "${IMAGE}:${SECURITY_COMPLIANCE_TAG}" 44 | else 45 | podman push "${IMAGE}:${IMAGE_TAG}" 46 | podman tag "${IMAGE}:${IMAGE_TAG}" "${IMAGE}:latest" 47 | podman push "${IMAGE}:latest" 48 | if [[ -n "$IMAGE_VERSION" ]]; then 49 | podman tag "${IMAGE}:${IMAGE_TAG}" "${IMAGE}:${IMAGE_VERSION}" 50 | podman push "${IMAGE}:${IMAGE_VERSION}" 51 | fi 52 | fi 53 | -------------------------------------------------------------------------------- /conf/admin_api.env: -------------------------------------------------------------------------------- 1 | DB_USER=admin 2 | DB_PASSWD=passwd 3 | 4 | EVAL_TOPIC=patchman.evaluator.recalc 5 | POD_CONFIG=turnpike_auth=false 6 | -------------------------------------------------------------------------------- /conf/common.env: -------------------------------------------------------------------------------- 1 | LOG_LEVEL=trace 2 | LOG_STYLE=plain 3 | GIN_MODE=release 4 | ACG_CONFIG=/go/src/app/conf/cdappconfig.json 5 | 6 | DB_DEBUG=false 7 | 8 | # kafka is running in separate containers 9 | KAFKA_GROUP=patchman 10 | KAFKA_READY_ADDRESS=http://kafka:9099/ 11 | # set if you want to bypass kafka SSL verification 12 | #KAFKA_SSL_SKIP_VERIFY=true 13 | 14 | EVAL_TOPIC=patchman.evaluator.upload 15 | EVENTS_TOPIC=platform.inventory.events 16 | NOTIFICATIONS_TOPIC=platform.notifications.ingress 17 | PAYLOAD_TRACKER_TOPIC=platform.payload-status 18 | REMEDIATIONS_UPDATE_TOPIC=platform.remediation-updates.patch 19 | TEMPLATE_TOPIC=platform.content-sources.template 20 | 21 | # If vmaas is running locally, its available here 22 | #VMAAS_ADDRESS=http://vmaas_webapp:8080 23 | ENABLE_PROFILER=true 24 | 25 | CANDLEPIN_ADDRESS=http://platform:9001/candlepin 26 | -------------------------------------------------------------------------------- /conf/database.env: -------------------------------------------------------------------------------- 1 | POSTGRESQL_USER=admin 2 | POSTGRESQL_PASSWORD=passwd 3 | POSTGRESQL_DATABASE=patchman 4 | PGDATA=/var/lib/pgsql/data 5 | -------------------------------------------------------------------------------- /conf/database_admin.env: -------------------------------------------------------------------------------- 1 | DB_ADMIN_USER=admin 2 | DB_ADMIN_PASSWD=passwd 3 | 4 | MANAGER_PASSWORD=manager 5 | LISTENER_PASSWORD=listener 6 | VMAAS_SYNC_PASSWORD=vmaas_sync 7 | EVALUATOR_PASSWORD=evaluator 8 | CYNDI_PASSWORD=cyndi 9 | 10 | # Optionally set schema_migration=XXX and/or reset_schema 11 | POD_CONFIG=update_users;update_db_config;wait_for_db=empty 12 | -------------------------------------------------------------------------------- /conf/evaluator_common.env: -------------------------------------------------------------------------------- 1 | DB_USER=evaluator 2 | DB_PASSWD=evaluator 3 | -------------------------------------------------------------------------------- /conf/evaluator_recalc.env: -------------------------------------------------------------------------------- 1 | EVAL_TOPIC=patchman.evaluator.recalc 2 | 3 | POD_CONFIG=label=recalc 4 | -------------------------------------------------------------------------------- /conf/evaluator_upload.env: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/patchman-engine/7e86762bf9f80fefde1a6fd2faa32cfa3613c03f/conf/evaluator_upload.env -------------------------------------------------------------------------------- /conf/gorun.env: -------------------------------------------------------------------------------- 1 | # to run current souces mounted to dev containers 2 | GORUN=on 3 | -------------------------------------------------------------------------------- /conf/kafka.env: -------------------------------------------------------------------------------- 1 | # Copied config from 2 | # https://github.com/confluentinc/examples/blob/b8a68d8e41572cf1cb51e5b63f54e14b379c2ec4/cp-all-in-one/docker-compose.yml 3 | 4 | KAFKA_BROKER_ID=1 5 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,SSL:SSL,SSL_HOST:SSL 6 | KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,SSL://kafka:9093,PLAINTEXT_HOST://localhost:29092,SSL_HOST://localhost:29093 7 | KAFKA_LISTENERS=PLAINTEXT://kafka:9092,SSL://kafka:9093,CONTROLLER://kafka:29094,PLAINTEXT_HOST://localhost:29092,SSL_HOST://localhost:29093 8 | 9 | # secrets from /etc/kafka/secrets 10 | KAFKA_SSL_KEYSTORE_CREDENTIALS=broker_creds 11 | KAFKA_SSL_KEY_CREDENTIALS=broker_creds 12 | KAFKA_SSL_TRUSTSTORE_CREDENTIALS=broker_creds 13 | KAFKA_SSL_KEYSTORE_FILENAME=kafka.broker.keystore.jks 14 | KAFKA_SSL_TRUSTSTORE_FILENAME=kafka.broker.truststore.jks 15 | KAFKA_SSL_CLIENT_AUTH=none 16 | 17 | # KRaft mode settings 18 | # https://docs.confluent.io/platform/current/installation/docker/config-reference.html#cp-kakfa-example 19 | KAFKA_PROCESS_ROLES=broker,controller 20 | KAFKA_CONTROLLER_QUORUM_VOTERS=1@kafka:29094 21 | KAFKA_INTER_BROKER_LISTENER_NAME=PLAINTEXT 22 | KAFKA_CONTROLLER_LISTENER_NAMES=CONTROLLER 23 | CLUSTER_ID=ok340dKtTOWc4eOxrdavLA 24 | 25 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 26 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR=1 27 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR=1 28 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS=0 29 | -------------------------------------------------------------------------------- /conf/listener.env: -------------------------------------------------------------------------------- 1 | DB_USER=listener 2 | DB_PASSWD=listener 3 | 4 | POD_CONFIG= 5 | -------------------------------------------------------------------------------- /conf/local.env: -------------------------------------------------------------------------------- 1 | GIN_MODE=release 2 | LOG_LEVEL=DEBUG 3 | DB_DEBUG=true 4 | 5 | ACG_CONFIG=./conf/cdappconfig.json 6 | 7 | DB_HOST=localhost 8 | DB_PORT=5433 9 | DB_USER=admin 10 | DB_PASSWD=passwd 11 | DB_NAME=patchman 12 | ## Options: "require" (default), "verify-full", "verify-ca", "disable" 13 | DB_SSLMODE=verify-full 14 | DB_SSLROOTCERT=dev/database/secrets/pgca.crt 15 | 16 | VMAAS_ADDRESS=http://localhost:9001 17 | CANDLEPIN_ADDRESS=http://localhost:9001/candlepin 18 | 19 | #KAFKA_ADDRESS=localhost:29092 20 | KAFKA_GROUP=patchman 21 | KAFKA_SSL_CERT=dev/kafka/secrets/ca.crt 22 | PAYLOAD_TRACKER_TOPIC=platform.payload-status 23 | EVENTS_TOPIC=platform.inventory.events 24 | EVAL_TOPIC=patchman.evaluator.upload 25 | TEMPLATE_TOPIC=platform.content-sources.template 26 | 27 | RBAC_ADDRESS=http://localhost:9001 28 | 29 | TZ=UTC 30 | 31 | GORUN=on 32 | 33 | # don't put "" or '' around the text otherwise they'll be included into content 34 | POD_CONFIG=label=upload;baseline_change_eval=false;use_testing_db 35 | -------------------------------------------------------------------------------- /conf/manager.env: -------------------------------------------------------------------------------- 1 | DB_USER=manager 2 | DB_PASSWD=manager 3 | 4 | MAX_REQUEST_BODY_SIZE=1048576 5 | -------------------------------------------------------------------------------- /conf/platform.env: -------------------------------------------------------------------------------- 1 | ACG_CONFIG=/go/src/app/conf/cdappconfig.json 2 | #KAFKA_ADDRESS=kafka:9092 3 | TEMPLATE_TOPIC=platform.content-sources.template 4 | -------------------------------------------------------------------------------- /conf/test.env: -------------------------------------------------------------------------------- 1 | GIN_MODE=release 2 | LOG_LEVEL=DEBUG 3 | 4 | ACG_CONFIG=/go/src/app/conf/cdappconfig.json 5 | 6 | DB_USER=admin 7 | DB_PASSWD=passwd 8 | 9 | # don't retry vmaas calls forever 10 | LIMIT_PAGE_SIZE=false 11 | 12 | # don't put "" or '' around the text otherwise they'll be included into content 13 | POD_CONFIG=label=upload;vmaas_call_max_retries=100;baseline_change_eval=false;update_users;update_db_config;use_testing_db 14 | -------------------------------------------------------------------------------- /conf/vmaas_sync.env: -------------------------------------------------------------------------------- 1 | DB_USER=vmaas_sync 2 | DB_PASSWD=vmaas_sync 3 | 4 | EVAL_TOPIC=patchman.evaluator.recalc 5 | -------------------------------------------------------------------------------- /dashboards/grafana/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'Prometheus' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | editable: true 10 | options: 11 | path: /etc/grafana/provisioning/dashboards 12 | -------------------------------------------------------------------------------- /dashboards/grafana/datasources/prometheus_ds.yml: -------------------------------------------------------------------------------- 1 | datasources: 2 | - name: Prometheus 3 | access: proxy 4 | type: prometheus 5 | url: http://prometheus:9090 6 | isDefault: true 7 | -------------------------------------------------------------------------------- /dashboards/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 5s 3 | evaluation_interval: 5s 4 | 5 | scrape_configs: 6 | - job_name: 'patchman-engine' 7 | static_configs: 8 | - targets: ['manager:9080'] 9 | - targets: ['listener:8080'] 10 | - targets: ['evaluator_recalc:8080'] 11 | labels: 12 | pod: 'patchman-evaluator-recalc' 13 | - targets: ['evaluator_upload:8080'] 14 | labels: 15 | pod: 'patchman-evaluator-upload' 16 | - job_name: 'kafka' 17 | static_configs: 18 | - targets: ['kafka:9101'] 19 | -------------------------------------------------------------------------------- /database_admin/check-upgraded.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e -o pipefail # stop on error 4 | 5 | MIGRATION_DIR=file://./database_admin/migrations 6 | 7 | echo "Running in $(pwd) as $(id)" 8 | ${GORUN:+go run} ./main${GORUN:+.go} check_upgraded $MIGRATION_DIR 9 | -------------------------------------------------------------------------------- /database_admin/config.go: -------------------------------------------------------------------------------- 1 | package database_admin //nolint:revive,stylecheck 2 | 3 | import ( 4 | "app/base/utils" 5 | ) 6 | 7 | var ( 8 | // Schema to migrate to (-1 means latest) 9 | schemaMigration = utils.PodConfig.GetInt("schema_migration", -1) 10 | // Put this version into schema_migration table and set dirty=false 11 | forceMigrationVersion = utils.PodConfig.GetInt("force_migration_version", -1) 12 | // Drop everything and create schema from scratch 13 | resetSchema = utils.PodConfig.GetBool("reset_schema", false) 14 | // Create users and update their password 15 | updateUsers = utils.PodConfig.GetBool("update_users", false) 16 | // Reset cyndi password 17 | updateCyndiPasswd = utils.PodConfig.GetBool("update_cyndi_passwd", false) 18 | // rerun config.sql 19 | updateDBConfig = utils.PodConfig.GetBool("update_db_config", false) 20 | ) 21 | -------------------------------------------------------------------------------- /database_admin/config.sql: -------------------------------------------------------------------------------- 1 | -- In test/CI/QA environment 2 | -- Log statements which take more than 2s 3 | DO 4 | $$ 5 | DECLARE 6 | dbname text; 7 | BEGIN 8 | SELECT current_database() into dbname; 9 | IF dbname = 'patchman' THEN 10 | EXECUTE 'ALTER DATABASE ' || dbname || ' SET log_min_duration_statement = 2000;'; 11 | END IF; 12 | END 13 | $$ LANGUAGE plpgsql; 14 | -------------------------------------------------------------------------------- /database_admin/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e -o pipefail # stop on error 4 | 5 | MIGRATION_FILES=file://./database_admin/migrations 6 | 7 | echo "Running in $(pwd) as $(id)" 8 | ${GORUN:+go run} ./main${GORUN:+.go} migrate $MIGRATION_FILES 9 | -------------------------------------------------------------------------------- /database_admin/migrations/101_baseline_timestamps.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE baseline DROP COLUMN IF EXISTS creator; 2 | ALTER TABLE baseline DROP COLUMN IF EXISTS published; 3 | ALTER TABLE baseline DROP COLUMN IF EXISTS last_edited; 4 | -------------------------------------------------------------------------------- /database_admin/migrations/101_baseline_timestamps.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE baseline ADD COLUMN creator TEXT CHECK (NOT empty(creator)); 2 | ALTER TABLE baseline ADD COLUMN published TIMESTAMP WITH TIME ZONE; 3 | ALTER TABLE baseline ADD COLUMN last_edited TIMESTAMP WITH TIME ZONE; 4 | -------------------------------------------------------------------------------- /database_admin/migrations/106_alphanumeric_collation.down.sql: -------------------------------------------------------------------------------- 1 | DROP COLLATION "numeric"; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/106_alphanumeric_collation.up.sql: -------------------------------------------------------------------------------- 1 | CREATE COLLATION IF NOT EXISTS numeric (provider = icu, locale = 'en-u-kn-true'); 2 | -------------------------------------------------------------------------------- /database_admin/migrations/108_add_rhsm-system-profile-bridge.up.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO reporter (id, name) VALUES 2 | (4, 'rhsm-system-profile-bridge') 3 | ON CONFLICT DO NOTHING; 4 | -------------------------------------------------------------------------------- /database_admin/migrations/109_update_status.down.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS update_status(update_data jsonb); 2 | -------------------------------------------------------------------------------- /database_admin/migrations/109_update_status.up.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION update_status(update_data jsonb) 2 | RETURNS TEXT as 3 | $$ 4 | DECLARE 5 | len int; 6 | BEGIN 7 | len = jsonb_array_length(update_data); 8 | IF len IS NULL or len = 0 THEN 9 | RETURN 'None'; 10 | END IF; 11 | len = jsonb_array_length(jsonb_path_query_array(update_data, '$ ? (@.status == "Installable")')); 12 | IF len > 0 THEN 13 | RETURN 'Installable'; 14 | END IF; 15 | RETURN 'Applicable'; 16 | END; 17 | $$ LANGUAGE 'plpgsql'; 18 | 19 | -------------------------------------------------------------------------------- /database_admin/migrations/110_package_account_data_applicable.up.sql: -------------------------------------------------------------------------------- 1 | TRUNCATE package_account_data; 2 | UPDATE rh_account SET valid_package_cache = FALSE; 3 | ALTER TABLE package_account_data RENAME COLUMN systems_updatable TO systems_installable; 4 | ALTER TABLE package_account_data ADD COLUMN systems_applicable INT NOT NULL DEFAULT 0; 5 | -------------------------------------------------------------------------------- /database_admin/migrations/110_package_account_data_installable.down.sql: -------------------------------------------------------------------------------- 1 | TRUNCATE package_account_data; 2 | UPDATE rh_account SET valid_package_cache = FALSE; 3 | ALTER TABLE package_account_data RENAME COLUMN systems_installable TO systems_updatable; 4 | ALTER TABLE package_account_data DROP COLUMN systems_applicable; 5 | -------------------------------------------------------------------------------- /database_admin/migrations/111_immutable.down.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION empty(t TEXT) 2 | RETURNS BOOLEAN as 3 | $empty$ 4 | BEGIN 5 | RETURN t ~ '^[[:space:]]*$'; 6 | END; 7 | $empty$ LANGUAGE 'plpgsql'; 8 | 9 | 10 | CREATE OR REPLACE FUNCTION update_status(update_data jsonb) 11 | RETURNS TEXT as 12 | $$ 13 | DECLARE 14 | len int; 15 | BEGIN 16 | len = jsonb_array_length(update_data); 17 | IF len IS NULL or len = 0 THEN 18 | RETURN 'None'; 19 | END IF; 20 | len = jsonb_array_length(jsonb_path_query_array(update_data, '$ ? (@.status == "Installable")')); 21 | IF len > 0 THEN 22 | RETURN 'Installable'; 23 | END IF; 24 | RETURN 'Applicable'; 25 | END; 26 | $$ LANGUAGE 'plpgsql'; 27 | -------------------------------------------------------------------------------- /database_admin/migrations/111_immutable.up.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION empty(t TEXT) 2 | RETURNS BOOLEAN as 3 | $$ 4 | BEGIN 5 | RETURN t ~ '^[[:space:]]*$'; 6 | END; 7 | $$ LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE; 8 | 9 | 10 | CREATE OR REPLACE FUNCTION update_status(update_data jsonb) 11 | RETURNS TEXT as 12 | $$ 13 | DECLARE 14 | len int; 15 | BEGIN 16 | len = jsonb_array_length(update_data); 17 | IF len IS NULL or len = 0 THEN 18 | RETURN 'None'; 19 | END IF; 20 | len = jsonb_array_length(jsonb_path_query_array(update_data, '$ ? (@.status == "Installable")')); 21 | IF len > 0 THEN 22 | RETURN 'Installable'; 23 | END IF; 24 | RETURN 'Applicable'; 25 | END; 26 | $$ LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE; 27 | -------------------------------------------------------------------------------- /database_admin/migrations/112_advisory_cache_valid.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rh_account DROP valid_advisory_cache; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/112_advisory_cache_valid.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rh_account ADD COLUMN valid_advisory_cache BOOLEAN NOT NULL DEFAULT FALSE; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/113_satellite_managed.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform DROP COLUMN satellite_managed; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/113_satellite_managed.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD COLUMN satellite_managed BOOLEAN NOT NULL DEFAULT FALSE; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/114_built_pkgcache.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform DROP COLUMN built_pkgcache; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/114_built_pkgcache.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD COLUMN built_pkgcache BOOLEAN NOT NULL DEFAULT FALSE; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/115_system_packages2.down.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE system_package2; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/115_system_packages2.up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS system_package2 2 | ( 3 | rh_account_id INT NOT NULL, 4 | system_id BIGINT NOT NULL, 5 | name_id BIGINT NOT NULL REFERENCES package_name (id), 6 | package_id BIGINT NOT NULL REFERENCES package (id), 7 | -- Use null to represent up-to-date packages 8 | installable_id BIGINT REFERENCES package (id), 9 | applicable_id BIGINT REFERENCES package (id), 10 | 11 | PRIMARY KEY (rh_account_id, system_id, package_id), 12 | FOREIGN KEY (rh_account_id, system_id) REFERENCES system_platform (rh_account_id, id) 13 | ) PARTITION BY HASH (rh_account_id); 14 | 15 | CREATE INDEX IF NOT EXISTS system_package2_account_pkg_name_idx 16 | ON system_package2 (rh_account_id, name_id) INCLUDE (system_id, package_id, installable_id, applicable_id); 17 | 18 | CREATE INDEX IF NOT EXISTS system_package2_package_id_idx on system_package2 (package_id); 19 | 20 | GRANT SELECT, INSERT, UPDATE, DELETE ON system_package2 TO evaluator; 21 | GRANT SELECT, UPDATE, DELETE ON system_package2 TO listener; 22 | GRANT SELECT, UPDATE, DELETE ON system_package2 TO manager; 23 | GRANT SELECT, UPDATE, DELETE ON system_package2 TO vmaas_sync; 24 | 25 | SELECT create_table_partitions('system_package2', 128, 26 | $$WITH (fillfactor = '70', autovacuum_vacuum_scale_factor = '0.05')$$); 27 | 28 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO evaluator; 29 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO evaluator; 30 | 31 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO listener; 32 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO listener; 33 | 34 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO manager; 35 | 36 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO vmaas_sync; 37 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO vmaas_sync; 38 | -------------------------------------------------------------------------------- /database_admin/migrations/117_migrate_system_package2.down.sql: -------------------------------------------------------------------------------- 1 | DROP PROCEDURE copy_system_packages; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/119_migrate_system_package2.up.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE PROCEDURE copy_system_packages() 2 | LANGUAGE plpgsql 3 | AS 4 | $$ 5 | DECLARE 6 | cnt bigint := 0; 7 | prev_cnt bigint := 0; 8 | rows_inserted bigint := 0; 9 | account int := 0; 10 | BEGIN 11 | FOR account IN (SELECT id from rh_account ORDER BY hash_partition_id(id, 128), id) 12 | LOOP 13 | INSERT INTO system_package2 14 | SELECT system_package.rh_account_id, 15 | system_id, 16 | name_id, 17 | package_id, 18 | (SELECT id 19 | FROM package 20 | WHERE package.name_id = system_package.name_id 21 | AND evra = 22 | JSONB_PATH_QUERY_ARRAY(update_data, '$[*] ? (@.status== "Installable").evra') ->> 0), 23 | (SELECT id 24 | FROM package 25 | WHERE package.name_id = system_package.name_id 26 | AND evra = JSONB_PATH_QUERY_ARRAY(update_data, '$[*] ? (@.status== "Applicable").evra') ->> 0) 27 | FROM system_package 28 | JOIN system_platform ON system_platform.id = system_package.system_id AND system_platform.rh_account_id = system_package.rh_account_id 29 | WHERE system_package.rh_account_id = account; 30 | COMMIT; 31 | 32 | GET DIAGNOSTICS rows_inserted = ROW_COUNT; 33 | 34 | cnt := cnt + rows_inserted; 35 | IF (cnt/1000000)::int > (prev_cnt/1000000)::int THEN 36 | RAISE NOTICE 'inserted % rows, account: %, partition: %', cnt, account, hash_partition_id(account, 128); 37 | prev_cnt := cnt; 38 | END IF; 39 | END LOOP; 40 | END 41 | $$; 42 | -------------------------------------------------------------------------------- /database_admin/migrations/120_add_new_yupana_reporters.up.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO reporter (id, name) VALUES 2 | (5, 'satellite'), 3 | (6, 'discovery') 4 | ON CONFLICT DO NOTHING; 5 | -------------------------------------------------------------------------------- /database_admin/migrations/121_packages_applicable.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform 2 | DROP COLUMN IF EXISTS packages_applicable; 3 | ALTER TABLE system_platform 4 | RENAME COLUMN packages_installable TO packages_updatable; 5 | -------------------------------------------------------------------------------- /database_admin/migrations/121_packages_applicable.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform 2 | RENAME COLUMN packages_updatable TO packages_installable; 3 | ALTER TABLE system_platform 4 | ADD COLUMN IF NOT EXISTS packages_applicable 5 | INT NOT NULL DEFAULT 0; 6 | -------------------------------------------------------------------------------- /database_admin/migrations/123_yum_updates_unchanged_trigger.up.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION check_unchanged() 2 | RETURNS TRIGGER AS 3 | $check_unchanged$ 4 | BEGIN 5 | IF (TG_OP = 'INSERT') AND 6 | NEW.unchanged_since IS NULL THEN 7 | NEW.unchanged_since := CURRENT_TIMESTAMP; 8 | END IF; 9 | IF (TG_OP = 'UPDATE') AND 10 | (NEW.json_checksum <> OLD.json_checksum OR NEW.yum_updates <> OLD.yum_updates) THEN 11 | NEW.unchanged_since := CURRENT_TIMESTAMP; 12 | END IF; 13 | RETURN NEW; 14 | END; 15 | $check_unchanged$ 16 | LANGUAGE 'plpgsql'; 17 | -------------------------------------------------------------------------------- /database_admin/migrations/124_templates.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform DROP COLUMN IF EXISTS template_id; 2 | 3 | DROP TABLE IF EXISTS template; 4 | 5 | DROP FUNCTION IF EXISTS grant_table_partitions(perms text, tbl regclass, grantie text); 6 | -------------------------------------------------------------------------------- /database_admin/migrations/124_templates.up.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION grant_table_partitions(perms text, tbl regclass, grantie text) 2 | RETURNS VOID AS 3 | $$ 4 | DECLARE 5 | r record; 6 | BEGIN 7 | FOR r IN SELECT child.relname 8 | FROM pg_inherits 9 | JOIN pg_class parent 10 | ON pg_inherits.inhparent = parent.oid 11 | JOIN pg_class child 12 | ON pg_inherits.inhrelid = child.oid 13 | WHERE parent.relname = text(tbl) 14 | LOOP 15 | EXECUTE 'GRANT ' || perms || ' ON TABLE ' || r.relname || ' TO ' || grantie; 16 | END LOOP; 17 | EXECUTE 'GRANT ' || perms || ' ON TABLE ' || text(tbl) || ' TO ' || grantie; 18 | END; 19 | $$ LANGUAGE plpgsql; 20 | 21 | CREATE TABLE IF NOT EXISTS template 22 | ( 23 | id BIGINT GENERATED BY DEFAULT AS IDENTITY, 24 | rh_account_id INT NOT NULL REFERENCES rh_account (id), 25 | uuid UUID NOT NULL, 26 | name TEXT NOT NULL CHECK (not empty(name)), 27 | description TEXT CHECK (NOT empty(description)), 28 | config JSONB, 29 | creator TEXT CHECK (NOT empty(creator)), 30 | published TIMESTAMP WITH TIME ZONE, 31 | last_edited TIMESTAMP WITH TIME ZONE, 32 | PRIMARY KEY (rh_account_id, id), 33 | UNIQUE(rh_account_id, uuid) 34 | ) PARTITION BY HASH (rh_account_id); 35 | 36 | SELECT create_table_partitions('template', 16, 37 | $$WITH (fillfactor = '70', autovacuum_vacuum_scale_factor = '0.05')$$); 38 | 39 | SELECT grant_table_partitions('SELECT', 'template', 'manager'); 40 | SELECT grant_table_partitions('SELECT, INSERT, UPDATE, DELETE', 'template', 'listener'); 41 | SELECT grant_table_partitions('SELECT', 'template', 'evaluator'); 42 | SELECT grant_table_partitions('SELECT', 'template', 'vmaas_sync'); 43 | 44 | GRANT SELECT,USAGE ON SEQUENCE public.template_id_seq TO evaluator; 45 | GRANT SELECT,USAGE ON SEQUENCE public.template_id_seq TO listener; 46 | GRANT SELECT,USAGE ON SEQUENCE public.template_id_seq TO vmaas_sync; 47 | 48 | ALTER TABLE system_platform ADD COLUMN IF NOT EXISTS template_id BIGINT; 49 | ALTER TABLE system_platform ADD CONSTRAINT template_id 50 | FOREIGN KEY (rh_account_id, template_id) REFERENCES template (rh_account_id, id); 51 | -------------------------------------------------------------------------------- /database_admin/migrations/125_yum_checksum.down.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION check_unchanged() 2 | RETURNS TRIGGER AS 3 | $check_unchanged$ 4 | BEGIN 5 | IF (TG_OP = 'INSERT') AND 6 | NEW.unchanged_since IS NULL THEN 7 | NEW.unchanged_since := CURRENT_TIMESTAMP; 8 | END IF; 9 | IF (TG_OP = 'UPDATE') AND 10 | (NEW.json_checksum <> OLD.json_checksum OR NEW.yum_updates <> OLD.yum_updates) THEN 11 | NEW.unchanged_since := CURRENT_TIMESTAMP; 12 | END IF; 13 | RETURN NEW; 14 | END; 15 | $check_unchanged$ 16 | LANGUAGE 'plpgsql'; 17 | 18 | 19 | ALTER TABLE system_platform DROP COLUMN IF EXISTS yum_checksum; 20 | -------------------------------------------------------------------------------- /database_admin/migrations/125_yum_checksum.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD COLUMN IF NOT EXISTS yum_checksum TEXT CHECK (NOT empty(yum_checksum)); 2 | 3 | CREATE OR REPLACE FUNCTION check_unchanged() 4 | RETURNS TRIGGER AS 5 | $check_unchanged$ 6 | BEGIN 7 | IF (TG_OP = 'INSERT') AND 8 | NEW.unchanged_since IS NULL THEN 9 | NEW.unchanged_since := CURRENT_TIMESTAMP; 10 | END IF; 11 | IF (TG_OP = 'UPDATE') AND 12 | (NEW.json_checksum <> OLD.json_checksum OR NEW.yum_checksum <> OLD.yum_checksum) THEN 13 | NEW.unchanged_since := CURRENT_TIMESTAMP; 14 | END IF; 15 | RETURN NEW; 16 | END; 17 | $check_unchanged$ 18 | LANGUAGE 'plpgsql'; 19 | -------------------------------------------------------------------------------- /database_admin/migrations/126_template_environment.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE template DROP COLUMN IF EXISTS environment_id; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/126_template_environment.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE template ADD COLUMN IF NOT EXISTS environment_id TEXT CHECK (NOT empty(environment_id)); 2 | 3 | UPDATE template set environment_id = REPLACE(uuid::text, '-', ''); 4 | 5 | ALTER TABLE template ALTER COLUMN environment_id SET NOT NULL; 6 | -------------------------------------------------------------------------------- /database_admin/migrations/127_template_arch_version.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE template DROP COLUMN IF EXISTS version; 2 | ALTER TABLE template DROP COLUMN IF EXISTS arch; 3 | -------------------------------------------------------------------------------- /database_admin/migrations/127_template_arch_version.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE template ADD COLUMN IF NOT EXISTS arch TEXT CHECK (NOT empty(arch)); 2 | ALTER TABLE template ADD COLUMN IF NOT EXISTS version TEXT CHECK (NOT empty(version)); 3 | ALTER TABLE system_platform ADD COLUMN IF NOT EXISTS arch TEXT CHECK (NOT empty(arch)); 4 | -------------------------------------------------------------------------------- /database_admin/migrations/128_system_platform_bootc.down.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform DROP COLUMN IF EXISTS bootc; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/128_system_platform_bootc.up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD COLUMN IF NOT EXISTS bootc BOOLEAN NOT NULL DEFAULT false; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/129_create_pg_repack.up.sql: -------------------------------------------------------------------------------- 1 | DO $$ 2 | BEGIN 3 | IF EXISTS (SELECT 1 FROM pg_available_extensions WHERE name = 'pg_repack') THEN 4 | CREATE EXTENSION IF NOT EXISTS pg_repack; 5 | END IF; 6 | END 7 | $$; 8 | -------------------------------------------------------------------------------- /database_admin/migrations/130_recreate_pg_repack_extension.up.sql: -------------------------------------------------------------------------------- 1 | DO $$ 2 | BEGIN 3 | DROP EXTENSION IF EXISTS pg_repack; 4 | IF EXISTS (SELECT 1 FROM pg_available_extensions WHERE name = 'pg_repack') THEN 5 | CREATE EXTENSION pg_repack; 6 | END IF; 7 | END 8 | $$; 9 | -------------------------------------------------------------------------------- /database_admin/migrations/131_remove_advisory_old_package_data.up.sql: -------------------------------------------------------------------------------- 1 | update advisory_metadata set package_data = null where package_data is not json array; 2 | -------------------------------------------------------------------------------- /database_admin/migrations/133_null_advisory_timestamps.up.sql: -------------------------------------------------------------------------------- 1 | UPDATE advisory_metadata SET public_date = NULL, modified_date = NULL where synced = 'f'; 2 | -------------------------------------------------------------------------------- /database_admin/schema/clear_db.sql: -------------------------------------------------------------------------------- 1 | DO 2 | $$ 3 | DECLARE 4 | stmt TEXT; 5 | BEGIN 6 | EXECUTE 'DROP EXTENSION IF EXISTS pgcrypto'; 7 | EXECUTE 'DROP EXTENSION IF EXISTS "uuid-ossp"'; 8 | FOR stmt IN (SELECT 'DROP ' || case when prokind = 'f' then 'FUNCTION ' else 'PROCEDURE ' end 9 | || ns.nspname || '.' || proname || '(' || oidvectortypes(proargtypes) || ') CASCADE;' 10 | FROM pg_proc 11 | INNER JOIN pg_namespace ns ON (pg_proc.pronamespace = ns.oid) 12 | WHERE ns.nspname = 'public' 13 | ORDER BY proname) 14 | LOOP 15 | EXECUTE stmt; 16 | END LOOP; 17 | 18 | FOR stmt IN (SELECT 'DROP TABLE IF EXISTS "' || table_name || '" CASCADE;' 19 | FROM information_schema.tables 20 | WHERE table_schema = (SELECT current_schema()) 21 | AND table_type = 'BASE TABLE') 22 | LOOP 23 | EXECUTE stmt; 24 | END LOOP; 25 | 26 | END; 27 | $$ language plpgsql; 28 | -------------------------------------------------------------------------------- /database_admin/schema/create_users.sql: -------------------------------------------------------------------------------- 1 | DO 2 | $$ 3 | DECLARE 4 | usr text; 5 | BEGIN 6 | FOR usr IN 7 | SELECT name 8 | FROM (VALUES ('evaluator'), ('listener'), ('manager'), ('vmaas_sync'), ('cyndi')) users (name) 9 | WHERE name NOT IN (SELECT rolname FROM pg_catalog.pg_roles) 10 | LOOP 11 | execute 'CREATE USER ' || usr || ';'; 12 | END LOOP; 13 | END 14 | $$ -------------------------------------------------------------------------------- /dev/create_inventory_hosts.sql: -------------------------------------------------------------------------------- 1 | -- Create "inventory.hosts" for testing purposes. In deployment it's created by remote Cyndi service. 2 | CREATE SCHEMA IF NOT EXISTS inventory; 3 | 4 | DO 5 | $$ 6 | BEGIN 7 | -- The admin ROLE that allows the inventory schema to be managed 8 | CREATE ROLE cyndi_admin; 9 | -- The reader ROLE that provides SELECT access to the inventory.hosts view 10 | CREATE ROLE cyndi_reader; 11 | EXCEPTION 12 | WHEN DUPLICATE_OBJECT THEN NULL; 13 | END 14 | $$; 15 | 16 | CREATE TABLE IF NOT EXISTS inventory.hosts_v1_0 ( 17 | id uuid PRIMARY KEY, 18 | account character varying(10), 19 | display_name character varying(200) NOT NULL, 20 | tags jsonb NOT NULL, 21 | updated timestamp with time zone NOT NULL, 22 | created timestamp with time zone NOT NULL, 23 | stale_timestamp timestamp with time zone NOT NULL, 24 | system_profile jsonb NOT NULL, 25 | insights_id uuid, 26 | reporter character varying(255) NOT NULL, 27 | per_reporter_staleness jsonb NOT NULL, 28 | org_id character varying(36), 29 | groups jsonb 30 | ); 31 | 32 | DELETE FROM inventory.hosts_v1_0; 33 | 34 | CREATE INDEX IF NOT EXISTS hosts_v1_0_tags_index ON inventory.hosts_v1_0 USING GIN (tags JSONB_PATH_OPS); 35 | CREATE INDEX IF NOT EXISTS hosts_v1_0_insights_reporter_index ON inventory.hosts_v1_0 (reporter); 36 | CREATE INDEX IF NOT EXISTS hosts_v1_0_stale_timestamp_index ON inventory.hosts_v1_0 USING btree (stale_timestamp); 37 | CREATE INDEX IF NOT EXISTS hosts_v1_0_groups_index ON inventory.hosts_v1_0 USING GIN (groups JSONB_PATH_OPS); 38 | 39 | CREATE OR REPLACE VIEW inventory.hosts AS SELECT 40 | id, 41 | account, 42 | display_name, 43 | created, 44 | updated, 45 | stale_timestamp, 46 | stale_timestamp + INTERVAL '1' DAY * '7'::double precision AS stale_warning_timestamp, 47 | stale_timestamp + INTERVAL '1' DAY * '14'::double precision AS culled_timestamp, 48 | tags, 49 | system_profile, 50 | insights_id, 51 | reporter, 52 | per_reporter_staleness, 53 | org_id, 54 | groups 55 | FROM inventory.hosts_v1_0; 56 | 57 | GRANT SELECT ON TABLE inventory.hosts TO cyndi_reader; 58 | -------------------------------------------------------------------------------- /dev/database/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/cloudservices/postgresql-rds:16-4649c84 2 | 3 | # install pg_repack 4 | USER root 5 | RUN curl -o /etc/yum.repos.d/postgresql.repo \ 6 | https://copr.fedorainfracloud.org/coprs/g/insights/postgresql-16/repo/epel-8/group_insights-postgresql-16-epel-8.repo 7 | RUN dnf install -y pg_repack 8 | 9 | ADD /dev/database/init.sh /docker-entrypoint-initdb.d/ 10 | 11 | USER postgres 12 | 13 | # copy custom config to enable SSL connections 14 | ADD /dev/database/custom.conf /opt/app-root/src/postgresql-cfg/ 15 | ADD --chown=postgres:postgres /dev/database/secrets/pg.* /opt/app-root/src/certificates/ 16 | RUN chmod 0600 /opt/app-root/src/certificates/pg.key 17 | 18 | # copy config to enforce SSL connections to ensure all clients use SSL 19 | ADD /dev/database/pg_hba.conf /opt/app-root/ 20 | -------------------------------------------------------------------------------- /dev/database/custom.conf: -------------------------------------------------------------------------------- 1 | ssl=on 2 | hba_file='/opt/app-root/pg_hba.conf' 3 | ssl_cert_file='/opt/app-root/src/certificates/pg.crt' 4 | ssl_key_file='/opt/app-root/src/certificates/pg.key' 5 | 6 | listen_addresses = '*' 7 | max_connections=250 8 | logging_collector=off 9 | 10 | #uncomment if you want to see queries running in database 11 | #log_statement = 'all' 12 | -------------------------------------------------------------------------------- /dev/database/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PGUSER=$POSTGRES_USER 4 | export PGPASSWORD=$POSTGRES_PASSWORD 5 | export PGDATABASE=$POSTGRES_DB 6 | 7 | # allow to create users for patchman database admin user 8 | psql -c "ALTER USER ${POSTGRES_USER} WITH CREATEROLE" 9 | psql -c "ALTER USER ${POSTGRES_USER} WITH SUPERUSER" 10 | -------------------------------------------------------------------------------- /dev/database/pg_hba.conf: -------------------------------------------------------------------------------- 1 | # TYPE DATABASE USER ADDRESS METHOD 2 | # "local" is for Unix domain socket connections only 3 | local all all trust 4 | # IPv4 local connections: 5 | host all all 127.0.0.1/32 trust 6 | # IPv6 local connections: 7 | host all all ::1/128 trust 8 | # Allow replication connections from localhost, by a user with the 9 | # replication privilege. 10 | local replication all trust 11 | host replication all 127.0.0.1/32 trust 12 | host replication all ::1/128 trust 13 | 14 | # Allow connections from all hosts using ssl. 15 | hostssl all all all md5 16 | 17 | # Allow replication connections from all hosts using ssl. 18 | hostssl replication all all md5 19 | -------------------------------------------------------------------------------- /dev/database/secrets/create_pg_certs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # CA 4 | openssl req -new -newkey rsa:4096 -days 10000 -x509 -subj "/CN=PGCA" -keyout pgca.key -out pgca.crt -nodes 5 | 6 | ## pg server 7 | #openssl req -new -newkey rsa:4096 -days 10000 -x509 -subj "/CN=postgres" -addext "subjectAltName = DNS:db,DNS:localhost" \ 8 | # -keyout pg.key -out pg.crt -nodes 9 | 10 | # csr - signing request 11 | openssl req -new -newkey rsa:4096 -subj "/CN=postgres" -out pg.csr -keyout pg.key -nodes 12 | 13 | # sign csr with ca 14 | echo "subjectAltName = DNS:db,DNS:localhost" >>/tmp/san.cnf 15 | openssl x509 -req -CA pgca.crt -CAkey pgca.key -in pg.csr -out pg.crt -days 10000 -CAcreateserial -extfile /tmp/san.cnf -text 16 | -------------------------------------------------------------------------------- /dev/database/secrets/pg.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIEWDCCAkACAQAwEzERMA8GA1UEAwwIcG9zdGdyZXMwggIiMA0GCSqGSIb3DQEB 3 | AQUAA4ICDwAwggIKAoICAQCyqWR2yICFdt+kOcvv1/YoLR9wKcc+dKFtEh1ZQYhx 4 | XL4tpb5JfCl4D7xwe2BWZb55J9+3W1QI7QSJpy3/mdBISVYpOxA79FFtNBq1TQU2 5 | DIslTa4BrWhAcyC5nbeyeBA3rSdmnCy7rXk1qwBxSp51cRrX/Q5dQbUpa+bSwcD+ 6 | ThCo0Dnw1w5lWd5ix44xsCA5trn5qn4KMmmSw9puidU0AD2Cfi/LQ9aWRsbeDWTH 7 | ck5/ZHLq2u2oJiedKx+L3C3W3g6gBNtKWE6ucnK5M0fBLDDbqFbhfRE6zNf6TrS7 8 | loOQjRpdxZ9olRPBNIsODjZlhTK3IAER3UahbC/Myy0RCu4DQMwDWxkOaG1I5ujI 9 | csNv2XrsRNxoZV4y74MoGO1RlRWksVNDTuONKOy8/JaihpudiVit8lcb2kwwQIcm 10 | DgZ3gVbvcIsONHdl4pp2R6P6rzAajRoAfAymR29Kn91Xj/E/fbXPP8FADhNtyKWd 11 | ifwVGZCyaXRY+fRvmfQrHDPbqGrnk4shUfH7/LynIslhxSYBO0SHrlBUOtj5ykMP 12 | XjrNZGpimnzlI6pApJAa6k+YKges9rikkjMbvxnJh90guXlT61FLacIEV19g2YYk 13 | Exy8v5YhjLiM/1+v90yVLBAFeedPJBcJm0/7LU8pfZh/7EuYzvkCEcdhYFLjeByz 14 | nQIDAQABoAAwDQYJKoZIhvcNAQELBQADggIBAAzcFX2AL+htz2wwzR3RYo7zzOcq 15 | 1RRZ6OY0SDhOz8f9xRQb23GhcVpNtmtZEuNM3QQYxSBT+20QdADMTuzMK7Io2GOd 16 | 5bBm/kj1Euq8WMkcXeS5SZTgUE67UJiGQVNVkEAkzSpqBCBciOXAXWAwweo429Rg 17 | cvaXFk+cSGsBlpj38ZTuK3G5Phgc9fbGqoRNEN7+rLjwTfGRMAVjpxtoFGOizXax 18 | Vvj2yiPGpm/koWxZwUG1n4NOktsOT4KM8oaAuAv45HHM3gIUNlw5fNzKCjj2JyDU 19 | PGSI6na7KT7bs2NYAukz2OauPgRPH2gvntVUefE4Wb0KH16QXlhMaWFlTueizbeB 20 | YsDx7dPC/UVGA4nS5EtCZIsj3cnm8Dsr2kfTvuQl4Yx1bIYVW2jshQT6Gb5GgqnJ 21 | CXTHDhuFzc8/JpSEnKR4h84zRhU2D279dazUtIKqm0e6jJhUbiVEPBPjExUpTyjS 22 | NpTQfVHwZD8/MUImmtOfXKiyowr2KtBtJBTS43QMzuKv29Mub1nRYfDkThzECi6P 23 | 3EuZKTEa7pvpZs7mjcIp+h5UP/qUMdCjwu5crtWG4RbJR9qKPja/ACDhcCohQhxN 24 | Hk54CeO/C8/JYn6t1zJ1YUGOfbRPo+jBMculIGrcKC1/FHpXj8yg/MA2wK9qiy7Y 25 | CiCFs27N4k+auE7t 26 | -----END CERTIFICATE REQUEST----- 27 | -------------------------------------------------------------------------------- /dev/database/secrets/pgca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE/zCCAuegAwIBAgIUI4cIE7HB+HFXfWbjfxskKWYk3Z8wDQYJKoZIhvcNAQEL 3 | BQAwDzENMAsGA1UEAwwEUEdDQTAeFw0yMTA2MjIwOTM5NDlaFw00ODExMDcwOTM5 4 | NDlaMA8xDTALBgNVBAMMBFBHQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK 5 | AoICAQDELKrbv8PPj7wUmA3GPTd1v64lAZd6IeAi1OzCxJLsi6z7Wqfhgt2sYbER 6 | dB6OVhVw70mcGYmuTP0qq+xVp41ccH04eT/PZQs8pNIsYoWXEiSB7+1SLq8d9JVI 7 | HHj4KdRz+5pRUbu0zwjTNWG4JwFqxLF3wsJAARtAuj6xOt60p5R28T7NOoR0NJAZ 8 | HIx7Q4Dh0tS2N1yj0U1F0JGAWT/bXjovn0BNPbdsk3czc0S327bi8+hYu5spkve0 9 | rdi2DqIOx8zGKMRSFLIJBJDreQM3PntrfWg/wxLDKYKrAw5vximNi9AlTb0vkMvQ 10 | oBmFsVeRJ1cvJcD54DOoG4mDd2qEiTlU0t6x+4DKcqv7V81IUw5dpancfspOl+/d 11 | mXyWWj+h/NeNoEx7amUP1HpPbhHKnCMoV7JPg1crgVQeV1qtq7CIzz1X3MStyaiJ 12 | N3yE+JYIUj9+5mJuiAinz3GQz2Kh5q+5rSXnvbFn45heQVo5k7S6wJJbKWVfmbTJ 13 | gDdNMuIeAR/DtUbmu9HzTpYGO+WhxEdBQGZC3An2zi52tIEhrOSIIu8RLqCUUUNF 14 | VEmvvsmy8Qv+IrL5HeXYkKYPjiMXsF77F0WMwOaHnetm7rmZx0HOU7Sm8u0jK96A 15 | wssrZyVgdVAXMHdY6covtMM7HjLIl4buygnKaEGr3Q71a4oUHQIDAQABo1MwUTAd 16 | BgNVHQ4EFgQUfQK0juCA4jhDtYj3XNv42JEveLQwHwYDVR0jBBgwFoAUfQK0juCA 17 | 4jhDtYj3XNv42JEveLQwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC 18 | AgEAJpY536mWsA72vls8K/clks8S6RXYjDQo0+bYxlDhZrVKT93JkyMocpUHMPvR 19 | OuzkglsYlCFuiZf3rdE4h6t6G4AKeXHma52DdLMNIgdzCp5o/r1oin3xz5dWUim+ 20 | cHZuGg43RQzi32XKKH2Ln9vUiqm2vKcSZxAWbvsZtCBunJHB4pj4sYHZFjFl59nv 21 | HCHb+lv5/uFfUFgRTr1yI7JFrG7POz2R+LvI+QklMCuWWe+xcb7uwyR+UBGitn9l 22 | VBSuqV3wqRFiDzVOx2kl59ilrVqiTlVB43641+bgLcsbslp2c2uOct0ElaL/EoKL 23 | 7BFg8LWcKY3O+skUgvgApVfPMmd4ymYVk+veZ8vT1rhi5VLjbhjtC6WuzasWdSQw 24 | ylS3GQcAAKw0KoyffimPyxB6PKKhi+/lx01530opuCd4xy6v+CgpDdDXE0m4jDpj 25 | 9tH1tOZQAd2ZNMyz3371xkWeIUODQrjbY+tJDdIs2jxnzMJc5MzzOTEX6+5RuaaO 26 | /Z4dmmekr8DQJoI0MpB1/OOs/01N7c2ZLv9hdBZwoNCYnSCJbNR7RqwZlI1giNwv 27 | jD1lLdUWJkRScxhQHxMTj3P3kEL5e4oBN72VuBdytg5O3EHFhJyvQ4sXRarjp/kc 28 | GxE2VhCpPEVkAOWj+sT6S7UvG3QrMgWnmuOjivODBcaBN3E= 29 | -----END CERTIFICATE----- 30 | -------------------------------------------------------------------------------- /dev/database/secrets/pgca.srl: -------------------------------------------------------------------------------- 1 | 574BEEE4FAC2CA22D642DA94C4766C9C4AD1A9B9 2 | -------------------------------------------------------------------------------- /dev/grafana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM grafana/grafana:12.0.1 2 | 3 | USER root 4 | RUN apk add --no-cache python3 py3-yaml 5 | USER grafana 6 | 7 | ENV GF_AUTH_ANONYMOUS_ENABLED=true 8 | ENV GF_AUTH_ANONYMOUS_ORG_ROLE=Admin 9 | ENV GF_AUTH_DISABLE_LOGIN_FORM=true 10 | 11 | ADD /dev/grafana/extract_dashboard.py /usr/local/bin 12 | ADD /dashboards/grafana/datasources /etc/grafana/provisioning/datasources 13 | ADD /dashboards/grafana/dashboards /etc/grafana/provisioning/dashboards 14 | 15 | ADD /dashboards/app-sre/grafana-dashboard-insights-patchman-engine-general.configmap.yaml /etc/grafana 16 | 17 | RUN extract_dashboard.py /etc/grafana/provisioning/dashboards/grafana-dashboard-insights-patchman-engine-general.json 18 | -------------------------------------------------------------------------------- /dev/grafana/extract_dashboard.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import yaml 4 | 5 | 6 | with sys.stdin as f: 7 | configmap = yaml.safe_load(f) 8 | dashboard_json = configmap["data"]["grafana.json"] 9 | replaced = dashboard_json.replace("$datasource", "Prometheus") 10 | print(replaced) 11 | -------------------------------------------------------------------------------- /dev/kafka/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/psegedy/cp-kafka:7.7.0 2 | 3 | ADD /dev/kafka/entrypoint.sh /app/entrypoint.sh 4 | ADD /dev/kafka/setup.sh /app/setup.sh 5 | ADD /dev/kafka/secrets /etc/kafka/secrets 6 | 7 | CMD ["/app/entrypoint.sh"] 8 | -------------------------------------------------------------------------------- /dev/kafka/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | /app/setup.sh 2>&1 | grep -v '^WARNING: Due to limitations in metric names' & 4 | 5 | exec /etc/confluent/docker/run 2>&1 \ 6 | | grep -v -E ' (TRACE|DEBUG|INFO) ' 7 | -------------------------------------------------------------------------------- /dev/kafka/secrets/broker_creds: -------------------------------------------------------------------------------- 1 | confluent 2 | -------------------------------------------------------------------------------- /dev/kafka/secrets/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE+zCCAuOgAwIBAgIUTfQ1vc8lvsLhB4CdAo1abb9mZtwwDQYJKoZIhvcNAQEL 3 | BQAwDTELMAkGA1UEAwwCQ0EwHhcNMjEwNjA4MTExNDQ4WhcNNDgxMDI0MTExNDQ4 4 | WjANMQswCQYDVQQDDAJDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB 5 | ALGtZW97lg248ulqBLH27a1mdbbEsZY9z5ky4ZCWxcpz1uhxrnoahqgqsxgftXJB 6 | zoKDuw72FdcKRVyHcETjmClcqZTzvMEwLYxDyOD17+zfJqGTvhfAuAwG1ra0nSnI 7 | Qw5VqpYBDnrrzYZj8SYrWuoSwo0JN9UHAPPkgy5sXyIDkhhCG4KezSGyer1NnKT4 8 | +4t8LnRFymsDaGauismj7rdyopvM47bxcjiFgkHSdDkGk6NGb0rjJP5t5iggutqu 9 | jKT4Eyudd1med1YbaoWrui0eBGIyLtlf/jUQDeQ7sqkKnZlgOGP8NTHl5Y9jkHwX 10 | zbJwovOkc3T/fnJ+ud0oBymfRcYFrdOXA7xtjBamkpCzcR6SHj4si/bAwxIuRF1+ 11 | amiyl1UFTFPVeZePYmn3j3iay+IVf+T+0vQQ/Sp2tcvxOhJHtaJ0oQmlINUTnS8i 12 | woiKgJBt7SlJLeImf7EvR5Y0Ob54Iq8N69nDgFox79Uk9rZ/ik2IUVGUdjjPlZ6W 13 | cNtkjlpk60M1vlgQkiLEb4aOrULy76eNYQIslkrce5R1kC8N45BbQ+fiJzDMoO/C 14 | XC1mCscQ7F4PZH4NNtuKo0ktaquDcnlzcp/GLyLbAaCc5OB+EHent5UF+UNp9d50 15 | uziu/9Nx+YEBL+LLK/dxL/bJ4PXKHkVbW8bp2D06dmI3AgMBAAGjUzBRMB0GA1Ud 16 | DgQWBBS8g/G+Jyv/VLuWfvwdBAQc+Lnh2TAfBgNVHSMEGDAWgBS8g/G+Jyv/VLuW 17 | fvwdBAQc+Lnh2TAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQAC 18 | LpL7BDXC7cJoPcDSHFpsGgWryBN1BKiKpA0bRe5cnlpcH2hUVDw1hceW6vJDa1fP 19 | AwRm5bczkbUk3Tlh9Wd0yiO8w9cLdXtZRdNiQmaprHETe69qT9KcgnaeZc8Prw4a 20 | 6ar/hw1oHTwBAxqArOfxefwxKlwxN+ermkwj/9yHMX3yHWCYohKv+XCBTVkZ2TJF 21 | R6Q4fhi4W5sJZ6MhG78Ddqm0eu9yQovnHItm9D3QRgHqgmntcH/3WvpnZt5IMR4F 22 | H6qqQ1b6U/J4+bYgzSleb5Z0j2NVMnhHaQlV6UNCbWP0v6gLVOoih1DeIiH4Cfwj 23 | bR6HvMB+FcgkXCRFrtXH13f4KlAzkC7S7pldKHZKS33JttZi+MVHGrPui3ithl6h 24 | v1vVeEs/68k43KLuBQH1ARK6NaNl2fPiDhptIu13fax06HrrjhzknNGINFQbO83K 25 | vD/qCAL906JAMeyW+UzWoX3DH/LphYTS8IvbXRmk9EhQIPsl6tYXRWWq5U7xKG78 26 | Tvq+KcN+cIz4JCcNYwe3BaK/5lcJ6ogz43wvQ/lu++snuXTN+3cMI3BwqcJQxDjb 27 | p+tAVlWt76ALcoLWxgev1ypNNZEMLkboZ8X6ha8RY10mmh3PMgYmsNpPAxSHJd5L 28 | 6UB/iv6x+Zqyi2eCwFqyg+8emJGhXuu9KjnoMKP6Yw== 29 | -----END CERTIFICATE----- 30 | -------------------------------------------------------------------------------- /dev/kafka/secrets/create_certs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash -e 2 | 3 | # 1. Create own private Certificate Authority (CA) 4 | openssl req -new -newkey rsa:4096 -days 10000 -x509 -subj "/CN=CA" -keyout ca.key -out ca.crt -nodes 5 | 6 | # 2. Create kafka server certificate and store in keystore 7 | openssl req -new -newkey rsa:4096 -days 10000 -x509 -subj "/CN=kafka" -addext "subjectAltName = DNS:kafka,DNS:localhost" \ 8 | -keyout kafka.key -out kafka.crt -nodes 9 | openssl pkcs12 -export -in kafka.crt -inkey kafka.key -out kafka.p12 -password pass:confluent 10 | rm -f kafka.broker.keystore.jks 11 | keytool -importkeystore -destkeystore kafka.broker.keystore.jks -deststorepass confluent -destkeypass confluent -deststoretype pkcs12 -destalias mykey \ 12 | -srcstorepass confluent -srckeystore kafka.p12 -srcstoretype pkcs12 -srcalias 1 -noprompt 13 | # verify certificate 14 | keytool -list -v -keystore kafka.broker.keystore.jks -storepass confluent 15 | 16 | # 3. Create Certificate signed request (CSR) 17 | keytool -keystore kafka.broker.keystore.jks -certreq -file kafka.csr -storepass confluent -keypass confluent 18 | 19 | # 4. Get CSR Signed with the CA: 20 | echo "subjectAltName = DNS:kafka,DNS:localhost" >>san.cnf 21 | openssl x509 -req -CA ca.crt -CAkey ca.key -in kafka.csr -out kafka-signed.crt -days 10000 -CAcreateserial -passin pass:confluent -extfile san.cnf 22 | # verify certificate 23 | keytool -printcert -v -file kafka-signed.crt -storepass confluent 24 | 25 | # 5. Import CA certificate in KeyStore: 26 | keytool -keystore kafka.broker.keystore.jks -alias CARoot -import -file ca.crt -storepass confluent -keypass confluent -noprompt 27 | 28 | # 6. Import Signed CSR In KeyStore: 29 | keytool -keystore kafka.broker.keystore.jks -import -file kafka-signed.crt -storepass confluent -keypass confluent -noprompt 30 | 31 | # 7. Import CA certificate In TrustStore: 32 | rm -f kafka.broker.truststore.jks 33 | keytool -keystore kafka.broker.truststore.jks -alias CARoot -import -file ca.crt -storepass confluent -keypass confluent -noprompt 34 | 35 | rm -f ca.{key,srl} kafka.{crt,csr,key,p12} kafka-signed.crt san.cnf 36 | -------------------------------------------------------------------------------- /dev/kafka/secrets/kafka.broker.keystore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/patchman-engine/7e86762bf9f80fefde1a6fd2faa32cfa3613c03f/dev/kafka/secrets/kafka.broker.keystore.jks -------------------------------------------------------------------------------- /dev/kafka/secrets/kafka.broker.truststore.jks: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/patchman-engine/7e86762bf9f80fefde1a6fd2faa32cfa3613c03f/dev/kafka/secrets/kafka.broker.truststore.jks -------------------------------------------------------------------------------- /dev/kafka/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | #wait until kafka is ready 4 | sleep 5 5 | 6 | # create topics with multiple partitions for scaling 7 | for topic in "platform.inventory.events" "patchman.evaluator.upload" \ 8 | "patchman.evaluator.recalc" "platform.remediation-updates.patch" "platform.notifications.ingress" \ 9 | "platform.payload-status" "test" \ 10 | "platform.content-sources.template" 11 | do 12 | until /usr/bin/kafka-topics --create --if-not-exists --topic $topic --partitions 1 --bootstrap-server kafka:9092 \ 13 | --replication-factor 1; do 14 | echo "Unable to create topic $topic" 15 | sleep 1 16 | done 17 | echo "Topic $topic created successfully" 18 | done 19 | # start simple http server so other components can check that kafka has fully started 20 | while : ; do 21 | nc -l -p 9099 -c 'echo -e "HTTP/1.1 200 OK\n\nTOPICS READY"'; 22 | done 23 | -------------------------------------------------------------------------------- /dev/scripts/advisories_list.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | IDENTITY="$($(dirname "$0")/identity.sh)" 4 | 5 | curl -v -H "x-rh-identity: $IDENTITY" http://localhost:8080/api/patch/v3/advisories | python3 -m json.tool 6 | -------------------------------------------------------------------------------- /dev/scripts/advisory_detail.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | IDENTITY="$($(dirname "$0")/identity.sh)" 4 | ADVISORY=${1:-RH-1} 5 | 6 | curl -v -H "x-rh-identity: $IDENTITY" http://localhost:8080/api/patch/v3/advisories/$ADVISORY | python3 -m json.tool 7 | -------------------------------------------------------------------------------- /dev/scripts/advisory_systems.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | IDENTITY="$($(dirname "$0")/identity.sh)" 4 | ADVISORY=${1:-RH-1} 5 | 6 | curl -v -H "x-rh-identity: $IDENTITY" http://localhost:8080/api/patch/v3/advisories/$ADVISORY/systems | python3 -m json.tool 7 | -------------------------------------------------------------------------------- /dev/scripts/docker-compose-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -n $DB_HOST ]] ; then 4 | ./dev/scripts/wait-for-services.sh 5 | fi 6 | 7 | if [[ -n $KAFKA_READY_ADDRESS ]] ; then 8 | ./dev/scripts/wait-for-kafka.sh 9 | fi 10 | 11 | exec ./scripts/entrypoint.sh "$@" 12 | -------------------------------------------------------------------------------- /dev/scripts/ephemeral-port-forward.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #PODS="$(oc get services -o name)" 4 | 5 | fwd() { 6 | service=$1 7 | ports=$2 8 | 9 | oc port-forward "$service" $ports >/dev/null & 10 | echo $service forwarded to $ports 11 | } 12 | 13 | fwd service/ingress-service 8000:8000 14 | fwd service/patchman-db 5433:5432 15 | fwd service/patchman-manager 8080:8000 16 | 17 | wait 18 | -------------------------------------------------------------------------------- /dev/scripts/identity-system.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This identity contains account_name = "0" 4 | SYSTEM_SUBSRIPTION_UUID=${1:-"cccccccc-0000-0000-0001-000000000004"} 5 | ORG_ID=${2:-"org_1"} 6 | 7 | encode() { 8 | local input=$(/dev/null ; then 10 | input=$(jq -cM <<<"$input") 11 | fi 12 | base64 -w 0 - <<<"$input" 13 | } 14 | 15 | encode </dev/null ; then 9 | input=$(jq -cM <<<"$input") 10 | fi 11 | base64 -w 0 - <<<"$input" 12 | } 13 | 14 | encode < < message_file.json") 16 | os.Exit(1) 17 | } 18 | 19 | // parse args 20 | topic := os.Args[1] 21 | 22 | // read input json 23 | message, err := io.ReadAll(os.Stdin) 24 | 25 | if err != nil { 26 | panic(err) 27 | } 28 | 29 | platform.SendMessageToTopic(topic, string(message)) 30 | } 31 | -------------------------------------------------------------------------------- /dev/scripts/system_detail.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | IDENTITY="$($(dirname "$0")/identity.sh)" 4 | UUID=${1:-00000000-0000-0000-0000-000000000001} 5 | 6 | curl -v -H "x-rh-identity: $IDENTITY" http://localhost:8080/api/patch/v3/systems/$UUID | python3 -m json.tool 7 | -------------------------------------------------------------------------------- /dev/scripts/system_packages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | IDENTITY="$($(dirname "$0")/identity.sh)" 4 | UUID=${1:-00000000-0000-0000-0000-000000000002} 5 | 6 | curl -v -H "x-rh-identity: $IDENTITY" http://localhost:8080/api/patch/v3/systems/$UUID/packages | python3 -m json.tool 7 | -------------------------------------------------------------------------------- /dev/scripts/systems_applicable.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | IDENTITY="$($(dirname "$0")/identity.sh)" 4 | 5 | curl -v -H "x-rh-identity: $IDENTITY" http://localhost:8080/api/patch/v3/advisories/RH-1/systems | python3 -m json.tool 6 | -------------------------------------------------------------------------------- /dev/scripts/systems_list.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | IDENTITY="$($(dirname "$0")/identity.sh)" 4 | 5 | curl -v -H "x-rh-identity: $IDENTITY" http://localhost:8080/api/patch/v3/systems | python3 -m json.tool 6 | -------------------------------------------------------------------------------- /dev/scripts/wait-for-kafka.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | until curl -s $KAFKA_READY_ADDRESS >/dev/null ; do 4 | >&2 echo "Kafka topics not ready yet" 5 | sleep 1 6 | done 7 | -------------------------------------------------------------------------------- /dev/scripts/wait-for-services.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | set -e 4 | 5 | cmd="$@" 6 | 7 | export PGSSLMODE=$DB_SSLMODE 8 | export PGSSLROOTCERT=$DB_SSLROOTCERT 9 | 10 | if [ ! -z "$DB_HOST" ]; then 11 | >&2 echo "Checking if PostgreSQL is up" 12 | if [ ! -z "$WAIT_FOR_EMPTY_DB" ]; then 13 | CHECK_QUERY="\q" # Wait only for empty database. 14 | elif [ ! -z "$WAIT_FOR_FULL_DB" ]; then 15 | # Wait for full schema, all migrations, e.g. before tests (schema_migrations.dirty='f'). 16 | CHECK_QUERY="SELECT 1/count(*) FROM schema_migrations WHERE dirty='f';" 17 | else 18 | # Wait for created schema. 19 | CHECK_QUERY="SELECT * FROM schema_migrations;" 20 | fi 21 | until PGPASSWORD="$DB_PASSWD" psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "${CHECK_QUERY}" -q 2>/dev/null; do 22 | >&2 echo "PostgreSQL is unavailable - sleeping (host: $DB_HOST, port: $DB_PORT, user: $DB_USER, db_name: $DB_NAME)" 23 | sleep 1 24 | done 25 | else 26 | >&2 echo "Skipping PostgreSQL check" 27 | fi 28 | 29 | >&2 echo "Everything is up - executing command" 30 | exec $cmd 31 | -------------------------------------------------------------------------------- /dev/sonar/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi9 2 | 3 | RUN yum install -y unzip java && \ 4 | yum clean all 5 | 6 | ENV SONAR_VERSION=4.6.0.2311 7 | 8 | RUN curl https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SONAR_VERSION}-linux.zip \ 9 | -o sonar-scanner-cli-${SONAR_VERSION}-linux.zip && \ 10 | unzip sonar-scanner-cli-${SONAR_VERSION}-linux.zip 11 | 12 | ADD run.sh /run.sh 13 | -------------------------------------------------------------------------------- /dev/sonar/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | sonar: 5 | build: 6 | context: . 7 | dockerfile: Dockerfile 8 | environment: 9 | - PROJECT_NAME="Patchman engine" 10 | - PROJECT_KEY=insights:patchman-engine 11 | - SONAR_HOST_URL 12 | - SONAR_CERT_URL 13 | - SONAR_LOGIN 14 | command: /run.sh 15 | volumes: 16 | - ../../:/usr/src 17 | security_opt: 18 | - label=disable 19 | -------------------------------------------------------------------------------- /dev/sonar/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Download and add CA certificate if provided 4 | if [[ ! -z "$SONAR_CERT_URL" ]] 5 | then 6 | curl $SONAR_CERT_URL -o /CA.crt 7 | /usr/lib/jvm/jre-1.8.0/bin/keytool \ 8 | -keystore /CA.keystore \ 9 | -import -alias CA \ 10 | -file /CA.crt \ 11 | -noprompt -storepass passwd 12 | export SONAR_SCANNER_OPTS='-Djavax.net.ssl.trustStore=/CA.keystore -Djavax.net.ssl.trustStorePassword=passwd' 13 | fi 14 | 15 | # Create SonarQube config file using env variables 16 | echo -e "sonar.projectKey=$PROJECT_KEY" > /sonar-scanner-${SONAR_VERSION}-linux/conf/sonar-scanner.properties 17 | echo -e "sonar.projectName=$PROJECT_NAME" >> /sonar-scanner-${SONAR_VERSION}-linux/conf/sonar-scanner.properties 18 | echo -e "sonar.host.url=$SONAR_HOST_URL" >> /sonar-scanner-${SONAR_VERSION}-linux/conf/sonar-scanner.properties 19 | echo -e "sonar.login=$SONAR_LOGIN" >> /sonar-scanner-${SONAR_VERSION}-linux/conf/sonar-scanner.properties 20 | 21 | # Do code analysis in mounted folder 22 | cd /usr/src 23 | exec /sonar-scanner-${SONAR_VERSION}-linux/bin/sonar-scanner 24 | -------------------------------------------------------------------------------- /docker-compose.test.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | services: 3 | db: 4 | container_name: db 5 | build: 6 | context: . 7 | dockerfile: dev/database/Dockerfile 8 | image: patchman-engine-db 9 | ports: 10 | - 5433:5432 11 | env_file: 12 | - ./conf/database.env 13 | 14 | kafka: 15 | container_name: kafka 16 | build: 17 | context: . 18 | dockerfile: ./dev/kafka/Dockerfile 19 | image: patchman-engine-kafka 20 | env_file: 21 | - ./conf/kafka.env 22 | volumes: 23 | - ./dev/kafka/secrets:/etc/kafka/secrets 24 | - ./dev/kafka:/app 25 | ports: 26 | - 29092:29092 27 | - 29093:29093 28 | - 9092:9092 29 | - 9093:9093 30 | - 9099:9099 31 | security_opt: 32 | - label=disable 33 | # https://github.com/wurstmeister/kafka-docker/issues/389#issuecomment-875428899 34 | restart: always 35 | 36 | platform: 37 | container_name: platform 38 | build: 39 | context: . 40 | dockerfile: Dockerfile 41 | args: 42 | - INSTALL_TOOLS=yes 43 | target: buildimg 44 | image: patchman-engine-app 45 | env_file: 46 | - ./conf/common.env 47 | - ./conf/platform.env 48 | command: ./dev/scripts/docker-compose-entrypoint.sh platform 49 | restart: unless-stopped 50 | ports: 51 | - 9001:9001 52 | volumes: 53 | - ./conf/cdappconfig.json:/go/src/app/conf/cdappconfig.json 54 | depends_on: 55 | - kafka 56 | - db 57 | security_opt: 58 | - label=disable 59 | 60 | test: 61 | container_name: test 62 | image: patchman-engine-app 63 | env_file: 64 | - ./conf/common.env 65 | - ./conf/database.env 66 | - ./conf/database_admin.env 67 | - ./conf/gorun.env 68 | - ./conf/test.env 69 | depends_on: 70 | - db 71 | - platform 72 | user: root 73 | command: ./scripts/go_test_on_ci.sh 74 | volumes: 75 | - ./:/go/src/app/ 76 | security_opt: 77 | - label=disable 78 | -------------------------------------------------------------------------------- /docs/docs_test.go: -------------------------------------------------------------------------------- 1 | package docs 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/getkin/kin-openapi/openapi3" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | const openAPIPath = "v3/openapi.json" 12 | 13 | func TestValidateOpenAPI3DocStr(t *testing.T) { 14 | doc, err := os.ReadFile(openAPIPath) 15 | assert.Nil(t, err) 16 | _, err = openapi3.NewLoader().LoadFromData(doc) 17 | assert.Nil(t, err) 18 | } 19 | 20 | func TestFilterOpenAPIPaths1(t *testing.T) { 21 | nRemovedPaths := filterOpenAPI(EndpointsConfig{ 22 | EnableTemplates: true, 23 | }, openAPIPath, "/tmp/openapi-filter-test.json") 24 | assert.Equal(t, 0, nRemovedPaths) 25 | } 26 | 27 | func TestFilterOpenAPIPaths2(t *testing.T) { 28 | nRemovedPaths := filterOpenAPI(EndpointsConfig{ 29 | EnableTemplates: false, 30 | }, openAPIPath, "/tmp/openapi-filter-test.json") 31 | assert.Equal(t, 6, nRemovedPaths) 32 | } 33 | -------------------------------------------------------------------------------- /docs/md/database.md: -------------------------------------------------------------------------------- 1 | # Database 2 | 3 | ## Tables 4 | Main database tables description: 5 | - **system_platform** - stores info about registered systems. Mainly system inventory ID column (`inventory_id`) Red Hat account (`rh_account_id`) which system belongs to, JSON string with lists of installed packages, repos, modules (`vmaas_json`) needed for requesting VMaaS when evaluating system. It also stores aggregated results from evaluation - advisories counts by its types. Records are created and updated by both `listener` and `evaluator` components. 6 | - **advisory_metadata** - stores info about advisories (`description`, `summary`, `solution` etc.). It's synced and stored on trigger by `vmaas_sync` component. It allows to display detail information about the advisory. 7 | - **system_advisories** - stores info about advisories evaluated for particular systems (system - advisory M-N mapping table). Contains info when system advisory was firstly reported and patched (if so). Records are created and updated by `evaluator` component. It allows to display list of advisories related to a system. 8 | - **advisory_account_data** - stores info about all advisories detected within at least one system that belongs to a given account. So it provides overall statistics about system advisories displayed by the application. 9 | - **package_name** - names of the packages installed on systems 10 | - **package** - list of all packages versions, precisely all EVRAs (epoch-version-release-arch) 11 | - **system_package2** - list of packages installed on a system 12 | 13 | ## Schema 14 | ![](graphics/db_diagram.png) 15 | -------------------------------------------------------------------------------- /docs/md/graphics/db_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/patchman-engine/7e86762bf9f80fefde1a6fd2faa32cfa3613c03f/docs/md/graphics/db_diagram.png -------------------------------------------------------------------------------- /docs/md/graphics/icon_gopher.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/patchman-engine/7e86762bf9f80fefde1a6fd2faa32cfa3613c03f/docs/md/graphics/icon_gopher.png -------------------------------------------------------------------------------- /docs/md/graphics/icon_postgresql.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/patchman-engine/7e86762bf9f80fefde1a6fd2faa32cfa3613c03f/docs/md/graphics/icon_postgresql.png -------------------------------------------------------------------------------- /docs/md/graphics/icon_python.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/patchman-engine/7e86762bf9f80fefde1a6fd2faa32cfa3613c03f/docs/md/graphics/icon_python.png -------------------------------------------------------------------------------- /docs/md/graphics/icon_typescript.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/patchman-engine/7e86762bf9f80fefde1a6fd2faa32cfa3613c03f/docs/md/graphics/icon_typescript.png -------------------------------------------------------------------------------- /docs/md/graphics/schema.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/patchman-engine/7e86762bf9f80fefde1a6fd2faa32cfa3613c03f/docs/md/graphics/schema.png -------------------------------------------------------------------------------- /evaluator/evaluate_baseline.go: -------------------------------------------------------------------------------- 1 | package evaluator 2 | 3 | import ( 4 | "app/base" 5 | "app/base/database" 6 | "app/base/models" 7 | "app/base/vmaas" 8 | "time" 9 | ) 10 | 11 | func limitVmaasToBaseline(system *models.SystemPlatform, vmaasData *vmaas.UpdatesV3Response) error { 12 | baselineConfig := database.GetBaselineConfig(system) 13 | if baselineConfig == nil { 14 | return nil // no baseline config, nothing to change 15 | } 16 | 17 | reportedMap := getReportedAdvisories(vmaasData) 18 | reportedNames := make([]string, 0, len(reportedMap)) 19 | for name := range reportedMap { 20 | reportedNames = append(reportedNames, name) 21 | } 22 | 23 | var filterOutNames []string 24 | err := database.DB.Model(&models.AdvisoryMetadata{}).Where("name IN (?)", reportedNames). 25 | Where("public_date >= ?", baselineConfig.ToTime.Truncate(24*time.Hour)). 26 | Pluck("name", &filterOutNames).Error 27 | if err != nil { 28 | return base.WrapFatalDBError(err, "load reported advisories") 29 | } 30 | 31 | // create map of advisories we need to filter out 32 | filterOutNamesSet := make(map[string]struct{}, len(filterOutNames)) 33 | for _, i := range filterOutNames { 34 | filterOutNamesSet[i] = struct{}{} 35 | } 36 | 37 | updateList := vmaasData.GetUpdateList() 38 | modifiedUpdateList := make(map[string]*vmaas.UpdatesV3ResponseUpdateList, len(updateList)) 39 | for pkg, updates := range updateList { 40 | availableUpdates := updates.GetAvailableUpdates() 41 | for i := range availableUpdates { 42 | advisoryName := availableUpdates[i].GetErratum() 43 | if _, ok := filterOutNamesSet[advisoryName]; ok { 44 | availableUpdates[i].StatusID = APPLICABLE 45 | } else { 46 | availableUpdates[i].StatusID = INSTALLABLE 47 | } 48 | } 49 | updates.AvailableUpdates = &availableUpdates 50 | modifiedUpdateList[pkg] = updates 51 | } 52 | 53 | if vmaasData != nil && vmaasData.UpdateList != nil { 54 | vmaasData.UpdateList = &modifiedUpdateList 55 | } 56 | return nil 57 | } 58 | -------------------------------------------------------------------------------- /evaluator/evaluate_baseline_test.go: -------------------------------------------------------------------------------- 1 | package evaluator 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/models" 6 | "app/base/utils" 7 | "app/base/vmaas" 8 | 9 | "sort" 10 | "testing" 11 | 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func TestLimitVmaasToBaseline(t *testing.T) { 16 | utils.SkipWithoutDB(t) 17 | core.SetupTestEnvironment() 18 | configure() 19 | 20 | // a system without baseline 21 | system := models.SystemPlatform{ID: 5, RhAccountID: 1, BaselineID: nil} 22 | originalVmaasData := getVMaaSUpdates(t) 23 | vmaasData := getVMaaSUpdates(t) 24 | err := limitVmaasToBaseline(&system, &vmaasData) 25 | assert.Nil(t, err) 26 | assert.Equal(t, originalVmaasData, vmaasData) 27 | 28 | // a system with baseline but nothing filtered out 29 | system = models.SystemPlatform{ID: 3, RhAccountID: 1, BaselineID: utils.PtrInt64(2)} 30 | err = limitVmaasToBaseline(&system, &vmaasData) 31 | assert.Nil(t, err) 32 | assert.Equal(t, []string{"RH-1", "RH-100", "RH-2"}, errataInVmaasData(vmaasData, INSTALLABLE)) 33 | 34 | // a system with baseline and filtered errata 35 | system = models.SystemPlatform{ID: 1, RhAccountID: 1, BaselineID: utils.PtrInt64(1)} 36 | vmaasData = getVMaaSUpdates(t) 37 | err = limitVmaasToBaseline(&system, &vmaasData) 38 | assert.Nil(t, err) 39 | assert.Equal(t, []string{"RH-100"}, errataInVmaasData(vmaasData, INSTALLABLE)) 40 | assert.Equal(t, []string{"RH-1", "RH-2"}, errataInVmaasData(vmaasData, APPLICABLE)) 41 | } 42 | 43 | func errataInVmaasData(vmaasData vmaas.UpdatesV3Response, status int) []string { 44 | errata := make([]string, 0) 45 | for _, updates := range vmaasData.GetUpdateList() { 46 | availableUpdates := updates.GetAvailableUpdates() 47 | for _, u := range availableUpdates { 48 | if u.StatusID == status { 49 | advisoryName := u.GetErratum() 50 | errata = append(errata, advisoryName) 51 | } 52 | } 53 | } 54 | sort.Strings(errata) 55 | return errata 56 | } 57 | -------------------------------------------------------------------------------- /evaluator/remediations_test.go: -------------------------------------------------------------------------------- 1 | package evaluator 2 | 3 | import ( 4 | "app/base/utils" 5 | "app/base/vmaas" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | var testFfUpdates = []vmaas.UpdatesV3ResponseAvailableUpdates{ 12 | {Repository: utils.PtrString("repo1"), Releasever: utils.PtrString("ser1"), Basearch: utils.PtrString("i686"), 13 | Erratum: utils.PtrString("RH-1"), Package: utils.PtrString("firefox-0:77.0.1-1.fc31.x86_64")}, 14 | {Repository: utils.PtrString("repo1"), Releasever: utils.PtrString("ser1"), Basearch: utils.PtrString("i686"), 15 | Erratum: utils.PtrString("RH-2"), Package: utils.PtrString("firefox-1:76.0.1-1.fc31.x86_64")}, 16 | } 17 | var testKUpdates = []vmaas.UpdatesV3ResponseAvailableUpdates{ 18 | {Repository: utils.PtrString("repo1"), Releasever: utils.PtrString("ser1"), Basearch: utils.PtrString("i686"), 19 | Erratum: utils.PtrString("RH-100"), Package: utils.PtrString("kernel-5.10.13-200.fc31.x86_64")}, 20 | } 21 | var testUpdateList = map[string]*vmaas.UpdatesV3ResponseUpdateList{ 22 | "firefox-76.0.1-1.fc31.x86_64": { 23 | AvailableUpdates: &testFfUpdates, 24 | }, 25 | "kernel-5.6.13-200.fc31.x86_64": { 26 | AvailableUpdates: &testKUpdates, 27 | }, 28 | } 29 | var testModuleList = []vmaas.UpdatesV3RequestModulesList{} 30 | var testVmaasResponse = vmaas.UpdatesV3Response{ 31 | UpdateList: &testUpdateList, 32 | RepositoryList: utils.PtrSliceString([]string{"repo1"}), 33 | ModulesList: &testModuleList, 34 | Releasever: utils.PtrString("ser1"), 35 | Basearch: utils.PtrString("i686"), 36 | } 37 | 38 | func TestCreateRemediationsState(t *testing.T) { 39 | id := "00000000-0000-0000-0000-000000000012" 40 | state := createRemediationsStateMsg(id, &testVmaasResponse) 41 | assert.NotNil(t, state) 42 | assert.Equal(t, state.HostID, id) 43 | assert.Equal(t, state.Issues, []string{"patch:RH-1", "patch:RH-100", "patch:RH-2", 44 | "patch:firefox-0:77.0.1-1.fc31.x86_64", "patch:firefox-1:76.0.1-1.fc31.x86_64", 45 | "patch:kernel-5.10.13-200.fc31.x86_64"}) 46 | } 47 | -------------------------------------------------------------------------------- /evaluator/status.go: -------------------------------------------------------------------------------- 1 | package evaluator 2 | 3 | import "app/base/database" 4 | 5 | var STATUS = make(map[int]string, 2) 6 | 7 | const INSTALLABLE = 0 8 | const APPLICABLE = 1 9 | 10 | type statusRow struct { 11 | ID int 12 | Name string 13 | } 14 | 15 | func configureStatus() { 16 | var rows []statusRow 17 | 18 | err := database.DB.Table("status s").Select("id, name").Scan(&rows).Error 19 | if err != nil { 20 | panic(err) 21 | } 22 | 23 | for _, r := range rows { 24 | STATUS[r.ID] = r.Name 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /evaluator/status_test.go: -------------------------------------------------------------------------------- 1 | package evaluator 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/utils" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestStatus(t *testing.T) { 12 | utils.SkipWithoutDB(t) 13 | core.SetupTestEnvironment() 14 | configureStatus() 15 | 16 | assert.Equal(t, "Installable", STATUS[INSTALLABLE]) 17 | assert.Equal(t, "Applicable", STATUS[APPLICABLE]) 18 | } 19 | -------------------------------------------------------------------------------- /evaluator/vmaas_cache.go: -------------------------------------------------------------------------------- 1 | package evaluator 2 | 3 | import ( 4 | "app/base/types" 5 | "app/base/utils" 6 | "app/base/vmaas" 7 | "app/tasks/vmaas_sync" 8 | "time" 9 | 10 | lru "github.com/hashicorp/golang-lru/v2" 11 | ) 12 | 13 | var memoryVmaasCache *VmaasCache 14 | 15 | type VmaasCache struct { 16 | enabled bool 17 | size int 18 | currentSize int 19 | validity *types.Rfc3339TimestampWithZ 20 | checkDuration time.Duration 21 | data *lru.TwoQueueCache[string, *vmaas.UpdatesV3Response] 22 | } 23 | 24 | func NewVmaasPackageCache(enabled bool, size int, checkDuration time.Duration) *VmaasCache { 25 | c := new(VmaasCache) 26 | 27 | c.enabled = enabled 28 | c.size = size 29 | c.currentSize = 0 30 | c.validity = vmaas_sync.GetLastSync(vmaas_sync.VmaasExported) 31 | c.checkDuration = checkDuration 32 | vmaasCacheGauge.Set(0) 33 | 34 | if c.enabled { 35 | var err error 36 | c.data, err = lru.New2Q[string, *vmaas.UpdatesV3Response](c.size) 37 | if err != nil { 38 | panic(err) 39 | } 40 | return c 41 | } 42 | return c 43 | } 44 | 45 | func (c *VmaasCache) Get(checksum *string) (*vmaas.UpdatesV3Response, bool) { 46 | if c.enabled && checksum != nil { 47 | val, ok := c.data.Get(*checksum) 48 | if ok { 49 | vmaasCacheCnt.WithLabelValues("hit").Inc() 50 | utils.LogTrace("checksum", *checksum, "VmaasCache.Get cache hit") 51 | return val, true 52 | } 53 | } 54 | vmaasCacheCnt.WithLabelValues("miss").Inc() 55 | return nil, false 56 | } 57 | 58 | func (c *VmaasCache) Add(checksum *string, response *vmaas.UpdatesV3Response) { 59 | if c.enabled && checksum != nil { 60 | c.data.Add(*checksum, response) 61 | if c.currentSize <= c.size { 62 | c.currentSize++ 63 | vmaasCacheGauge.Inc() 64 | } 65 | } 66 | } 67 | 68 | func (c *VmaasCache) Reset(ts *types.Rfc3339TimestampWithZ) { 69 | if c.enabled { 70 | c.data.Purge() 71 | c.validity = ts 72 | vmaasCacheGauge.Set(0) 73 | } 74 | } 75 | 76 | func (c *VmaasCache) CheckValidity() { 77 | for range time.Tick(c.checkDuration) { 78 | lastModifiedTS := vmaas_sync.GetLastSync(vmaas_sync.VmaasExported) 79 | if lastModifiedTS == nil || c.validity == nil || c.validity.Time().Before(*lastModifiedTS.Time()) { 80 | c.Reset(lastModifiedTS) 81 | } 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /listener/listener_test.go: -------------------------------------------------------------------------------- 1 | package listener 2 | 3 | import ( 4 | "app/base/utils" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestLoadValidReporters(t *testing.T) { 11 | utils.SkipWithoutDB(t) 12 | utils.SkipWithoutPlatform(t) 13 | configure() 14 | 15 | reporter := loadValidReporters() 16 | assert.Equal(t, 6, len(reporter)) 17 | assert.Equal(t, 1, reporter["puptoo"]) 18 | assert.Equal(t, 2, reporter["rhsm-conduit"]) 19 | assert.Equal(t, 3, reporter["yupana"]) 20 | assert.Equal(t, 4, reporter["rhsm-system-profile-bridge"]) 21 | } 22 | -------------------------------------------------------------------------------- /listener/rhsm_test.go: -------------------------------------------------------------------------------- 1 | package listener 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/database" 6 | "app/base/utils" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func int64ptr(x int64) *int64 { 13 | return &x 14 | } 15 | 16 | func TestAssignTemplates(t *testing.T) { 17 | utils.SkipWithoutDB(t) 18 | core.SetupTestEnvironment() 19 | configure() 20 | 21 | type result struct { 22 | templateID *int64 23 | isErr bool 24 | } 25 | tests := []struct { 26 | name string 27 | environments []string 28 | res result 29 | }{ 30 | {"NoEnvironments", []string{}, result{nil, false}}, 31 | {"NoTemplatesFound", []string{"env-with-no-template"}, result{nil, false}}, 32 | {"SingleTemplate", []string{"99900000000000000000000000000001", "env-with-no-template"}, result{int64ptr(1), false}}, 33 | { 34 | "MultipleTemplatesFound", []string{"99900000000000000000000000000001", "99900000000000000000000000000002"}, 35 | result{int64ptr(1), false}, 36 | }, 37 | } 38 | for _, test := range tests { 39 | t.Run(test.name, func(t *testing.T) { 40 | templateID, err := getTemplate(database.DB, 1, test.environments) 41 | isErrReturned := err != nil 42 | assert.Equal(t, test.res.isErr, isErrReturned) 43 | if test.res.templateID == nil { 44 | assert.Equal(t, test.res.templateID, templateID) 45 | } else { 46 | assert.Equal(t, *test.res.templateID, *templateID) 47 | } 48 | }) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "app/base" 5 | "app/base/utils" 6 | "app/database_admin" 7 | "app/evaluator" 8 | "app/listener" 9 | "app/manager" 10 | "app/platform" 11 | "app/tasks/caches" 12 | "app/tasks/cleaning" 13 | "app/tasks/repack" 14 | "app/tasks/system_culling" 15 | "app/tasks/vmaas_sync" 16 | "app/turnpike" 17 | "log" 18 | "os" 19 | 20 | _ "go.uber.org/automaxprocs" // automatically sets GOMAXPROCS based on the CPU limit 21 | ) 22 | 23 | func main() { 24 | base.HandleSignals() 25 | 26 | defer utils.LogPanics(true) 27 | if len(os.Args) > 1 { 28 | switch os.Args[1] { 29 | case "admin": 30 | turnpike.RunAdminAPI() 31 | return 32 | case "manager": 33 | manager.RunManager() 34 | return 35 | case "listener": 36 | listener.RunListener() 37 | return 38 | case "evaluator": 39 | evaluator.RunEvaluator() 40 | return 41 | case "migrate": 42 | database_admin.UpdateDB(os.Args[2]) 43 | return 44 | case "platform": 45 | platform.RunPlatformMock() 46 | return 47 | case "print_clowder_params": 48 | utils.PrintClowderParams() 49 | return 50 | case "check_upgraded": 51 | database_admin.CheckUpgraded(os.Args[2]) 52 | return 53 | case "job": 54 | runJob(os.Args[2]) 55 | return 56 | } 57 | } 58 | log.Panic("You need to provide a command") 59 | } 60 | 61 | func runJob(name string) { 62 | switch name { 63 | case "vmaas_sync": 64 | vmaas_sync.RunVmaasSync() 65 | case "system_culling": 66 | system_culling.RunSystemCulling() 67 | case "advisory_cache_refresh": 68 | caches.RunAdvisoryRefresh() 69 | case "delete_unused": 70 | cleaning.RunDeleteUnusedData() 71 | case "packages_cache_refresh": 72 | caches.RunPackageRefresh() 73 | case "repack": 74 | repack.RunRepack() 75 | case "clean_advisory_account_data": 76 | cleaning.RunCleanAdvisoryAccountData() 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /manager/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "app/base/utils" 5 | ) 6 | 7 | var ( 8 | // Use in-memory cache for /advisories/:id API 9 | EnableAdvisoryDetailCache = utils.PodConfig.GetBool("advisory_detail_cache", true) 10 | // Size of in-memory advisory cache 11 | AdvisoryDetailCacheSize = utils.PodConfig.GetInt("advisory_detail_cache_size", 100) 12 | // Load all advisories into cache at startup 13 | PreLoadCache = utils.PodConfig.GetBool("advisory_detail_cache_preload", true) 14 | // Use in-memory package cache 15 | EnabledPackageCache = utils.PodConfig.GetBool("package_cache", true) 16 | 17 | // Allow filtering by cyndi tags 18 | EnableCyndiTags = utils.PodConfig.GetBool("cyndi_tags", true) 19 | // Use precomputed system counts for advisories 20 | DisableCachedCounts = !utils.PodConfig.GetBool("cache_counts", true) 21 | // Satellite systems can't be assigned to baselines/templates 22 | EnableSatelliteFunctionality = utils.PodConfig.GetBool("satellite_functionality", true) 23 | 24 | // Send recalc message for systems which have been assigned to a different baseline 25 | EnableBaselineChangeEval = utils.PodConfig.GetBool("baseline_change_eval", true) 26 | // Send recalc message for systems which have been assigned to a different template 27 | EnableTemplateChangeEval = utils.PodConfig.GetBool("template_change_eval", true) 28 | // Honor rbac permissions (can be disabled for tests) 29 | EnableRBACCHeck = utils.PodConfig.GetBool("rbac", true) 30 | 31 | // Expose templates API (feature flag) 32 | EnableTemplates = utils.PodConfig.GetBool("templates_api", true) 33 | ) 34 | -------------------------------------------------------------------------------- /manager/controllers/advisory_systems_export_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/core" 5 | "net/http" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestAdvisorySystemsExportJSON(t *testing.T) { 13 | core.SetupTest(t) 14 | w := CreateRequestRouterWithPath("GET", "/:advisory_id", "RH-1", "", nil, "application/json", 15 | AdvisorySystemsExportHandler) 16 | 17 | var output []SystemDBLookupExtended 18 | CheckResponse(t, w, http.StatusOK, &output) 19 | assert.Equal(t, 6, len(output)) 20 | assert.Equal(t, output[0].ID, "00000000-0000-0000-0000-000000000001") 21 | assert.Equal(t, SystemTagsList{{"k1", "ns1", "val1"}, {"k2", "ns1", "val2"}}, output[0].SystemItemAttributes.Tags) 22 | } 23 | 24 | func TestAdvisorySystemsExportCSV(t *testing.T) { 25 | core.SetupTest(t) 26 | w := CreateRequestRouterWithPath("GET", "/:advisory_id", "RH-1", "", nil, "text/csv", AdvisorySystemsExportHandler) 27 | 28 | assert.Equal(t, http.StatusOK, w.Code) 29 | body := w.Body.String() 30 | lines := strings.Split(body, "\r\n") 31 | 32 | assert.Equal(t, 8, len(lines)) 33 | assert.Equal(t, 34 | "display_name,last_upload,stale,os,rhsm,stale_timestamp,stale_warning_timestamp,culled_timestamp,created,tags,"+ 35 | "groups,baseline_id,baseline_name,template_name,template_uuid,status,satellite_managed,built_pkgcache,id", lines[0]) 36 | 37 | assert.Equal(t, "00000000-0000-0000-0000-000000000001,2020-09-22T16:00:00Z,false,RHEL 8.10,8.10,2018-08-26T16:00:00Z,"+ 38 | "2018-09-02T16:00:00Z,2018-09-09T16:00:00Z,2018-08-26T16:00:00Z,\"[{'key':'k1','namespace':'ns1','value':'val1'},"+ 39 | "{'key':'k2','namespace':'ns1','value':'val2'}]\",\"[{'id':'inventory-group-1','name':'group1'}]\","+ 40 | "1,baseline_1-1,temp1-1,99900000-0000-0000-0000-000000000001,Installable,false,false,"+ 41 | "00000000-0000-0000-0000-000000000001", 42 | lines[1]) 43 | } 44 | -------------------------------------------------------------------------------- /manager/controllers/package_versions_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/core" 5 | "net/http" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestPackageVersions(t *testing.T) { 12 | core.SetupTest(t) 13 | w := CreateRequestRouterWithParams("GET", "/:package_name/versions", "firefox", "", nil, "", 14 | PackageVersionsListHandler, 3) 15 | 16 | var output PackageVersionsResponse 17 | assert.Greater(t, len(w.Body.Bytes()), 0) 18 | CheckResponse(t, w, http.StatusOK, &output) 19 | assert.Equal(t, 1, len(output.Data)) 20 | assert.Equal(t, "76.0.1-1.fc31.x86_64", output.Data[0].Evra) 21 | } 22 | 23 | func TestPackageVersionsInvalidName(t *testing.T) { 24 | core.SetupTest(t) 25 | w := CreateRequestRouterWithParams("GET", "/:package_name/versions", "not-existing", "", nil, "", 26 | PackageVersionsListHandler, 3) 27 | 28 | assert.Equal(t, http.StatusNotFound, w.Code) 29 | } 30 | -------------------------------------------------------------------------------- /manager/controllers/packages_export.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/utils" 5 | "app/manager/middlewares" 6 | 7 | "github.com/gin-gonic/gin" 8 | ) 9 | 10 | // nolint: lll 11 | // @Summary Show me all installed packages across my systems 12 | // @Description Show me all installed packages across my systems. Export endpoints are not paginated. 13 | // @ID exportPackages 14 | // @Security RhIdentity 15 | // @Accept json 16 | // @Produce json,text/csv 17 | // @Param sort query string false "Sort field" Enums(id,name,systems_installed,systems_installable,systems_applicable) 18 | // @Param search query string false "Find matching text" 19 | // @Param filter[name] query string false "Filter" 20 | // @Param filter[systems_installed] query string false "Filter" 21 | // @Param filter[systems_installable] query string false "Filter" 22 | // @Param filter[systems_applicable] query string false "Filter" 23 | // @Param filter[summary] query string false "Filter" 24 | // @Success 200 {array} PackageItem 25 | // @Failure 415 {object} utils.ErrorResponse 26 | // @Failure 500 {object} utils.ErrorResponse 27 | // @Router /export/packages [get] 28 | func PackagesExportHandler(c *gin.Context) { 29 | account := c.GetInt(utils.KeyAccount) 30 | groups := c.GetStringMapString(utils.KeyInventoryGroups) 31 | filters, err := ParseAllFilters(c, PackagesOpts) 32 | if err != nil { 33 | return 34 | } 35 | 36 | db := middlewares.DBFromContext(c) 37 | useCache := shouldUseCache(db, account, filters, groups) 38 | if !useCache { 39 | db.Exec("SET work_mem TO '?'", utils.CoreCfg.DBWorkMem) 40 | defer db.Exec("RESET work_mem") 41 | } 42 | query := packagesQuery(db, filters, account, groups, useCache) 43 | query, err = ExportListCommon(query, c, PackagesOpts) 44 | var data []PackageDBLookup 45 | 46 | if err != nil { 47 | return 48 | } // Error handled in method itself 49 | 50 | err = query.Find(&data).Error 51 | items, _ := PackageDBLookup2Item(data) 52 | if err != nil { 53 | LogAndRespError(c, err, "db error") 54 | return 55 | } 56 | 57 | OutputExportData(c, items) 58 | } 59 | -------------------------------------------------------------------------------- /manager/controllers/packages_export_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/core" 5 | "net/http" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestPackageExportJSON(t *testing.T) { 13 | core.SetupTest(t) 14 | w := CreateRequestRouterWithParams("GET", "/", "", "", nil, "application/json", PackagesExportHandler, 3) 15 | 16 | var output []PackageItem 17 | CheckResponse(t, w, http.StatusOK, &output) 18 | assert.Equal(t, 4, len(output)) 19 | assert.Equal(t, "kernel", output[0].Name) 20 | } 21 | 22 | func TestPackageExportCSV(t *testing.T) { 23 | core.SetupTest(t) 24 | w := CreateRequestRouterWithParams("GET", "/", "", "", nil, "text/csv", PackagesExportHandler, 3) 25 | 26 | assert.Equal(t, http.StatusOK, w.Code) 27 | body := w.Body.String() 28 | lines := strings.Split(body, "\r\n") 29 | 30 | assert.Equal(t, 6, len(lines)) 31 | assert.Equal(t, "name,summary,systems_installed,systems_installable,systems_applicable", lines[0]) 32 | 33 | assert.Equal(t, "kernel,The Linux kernel,3,2,2", lines[1]) 34 | } 35 | -------------------------------------------------------------------------------- /manager/controllers/paging.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | func CreateLinks(path string, offset, limit, total int, otherParams ...string) Links { 8 | var queryStr string 9 | 10 | for _, param := range otherParams { 11 | if len(param) > 0 { 12 | queryStr = fmt.Sprintf("%v&%v", queryStr, param) 13 | } 14 | } 15 | 16 | pager := pager{path, offset, limit, total, queryStr} 17 | links := Links{ 18 | First: pager.createLink(0), 19 | Last: pager.createLastLink(), 20 | Next: pager.createNextLink(), 21 | Previous: pager.createPreviousLink(), 22 | } 23 | 24 | return links 25 | } 26 | 27 | type pager struct { 28 | path string 29 | offset int 30 | limit int 31 | total int 32 | otherParams string 33 | } 34 | 35 | func (p pager) createLink(linkOffset int) string { 36 | link := fmt.Sprintf("%s?offset=%d&limit=%d%s", 37 | p.path, linkOffset, p.limit, p.otherParams) 38 | return link 39 | } 40 | 41 | func (p pager) createLastLink() string { 42 | lastOffset := ((p.total / p.limit) - 1) * p.limit 43 | if lastOffset < 0 { 44 | lastOffset = 0 45 | } 46 | 47 | return p.createLink(lastOffset) 48 | } 49 | 50 | func (p pager) createNextLink() *string { 51 | if p.total <= p.offset+p.limit { 52 | return nil 53 | } 54 | 55 | next := p.createLink(p.offset + p.limit) 56 | return &next 57 | } 58 | 59 | func (p pager) createPreviousLink() *string { 60 | if p.offset == 0 { 61 | return nil 62 | } 63 | curPage := p.offset / p.limit 64 | prevOffset := 0 65 | if curPage > 0 { 66 | prevOffset = (curPage - 1) * p.limit 67 | } 68 | link := p.createLink(prevOffset) 69 | return &link 70 | } 71 | -------------------------------------------------------------------------------- /manager/controllers/paging_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func TestFirst(t *testing.T) { 9 | pager := pager{"/", 0, 10, 1000, ""} 10 | assert.Equal(t, "/?offset=10&limit=10", *pager.createNextLink()) 11 | assert.Equal(t, "/?offset=990&limit=10", pager.createLastLink()) 12 | assert.Nil(t, pager.createPreviousLink()) 13 | } 14 | 15 | func TestMiddle(t *testing.T) { 16 | pager := pager{"/", 20, 10, 1000, ""} 17 | assert.Equal(t, "/?offset=30&limit=10", *pager.createNextLink()) 18 | assert.Equal(t, "/?offset=990&limit=10", pager.createLastLink()) 19 | assert.Equal(t, "/?offset=10&limit=10", *pager.createPreviousLink()) 20 | } 21 | 22 | func TestLast(t *testing.T) { 23 | pager := pager{"/", 990, 10, 1000, ""} 24 | assert.Nil(t, pager.createNextLink()) 25 | assert.Equal(t, "/?offset=990&limit=10", pager.createLastLink()) 26 | assert.Equal(t, "/?offset=980&limit=10", *pager.createPreviousLink()) 27 | } 28 | 29 | func TestFewItems(t *testing.T) { 30 | pager := pager{"/", 0, 10, 8, ""} 31 | assert.Nil(t, pager.createNextLink()) 32 | assert.Equal(t, "/?offset=0&limit=10", pager.createLastLink()) 33 | assert.Nil(t, pager.createPreviousLink()) 34 | } 35 | -------------------------------------------------------------------------------- /manager/controllers/status.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/database" 5 | "net/http" 6 | 7 | "github.com/gin-gonic/gin" 8 | ) 9 | 10 | // @Summary Status endpoint 11 | // @Success 200 {int} http.StatusOK 12 | // @Failure 503 {object} utils.ErrorResponse 13 | func Status(c *gin.Context) { 14 | sqlDB, _ := database.DB.DB() 15 | if err := sqlDB.Ping(); err != nil { 16 | LogAndRespStatusError(c, http.StatusServiceUnavailable, err, "Database not connected") 17 | } else { 18 | c.Status(http.StatusOK) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /manager/controllers/structures.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | type Links struct { 4 | First string `json:"first" example:"/link/to/the/first"` 5 | Last string `json:"last" example:"/link/to/the/last"` 6 | Next *string `json:"next" example:"/link/to/the/next"` 7 | Previous *string `json:"previous" example:"/link/to/the/previous"` 8 | } 9 | 10 | type ListMeta struct { 11 | // Used response limit (page size) - pagination 12 | Limit int `json:"limit" example:"20"` 13 | 14 | // Used response offset - pagination 15 | Offset int `json:"offset" example:"0"` 16 | 17 | // Used sorting fields 18 | Sort []string `json:"sort,omitempty" example:"name"` 19 | 20 | // Used search terms 21 | Search string `json:"search,omitempty" example:"kernel"` 22 | 23 | // Used filters 24 | Filter map[string]FilterData `json:"filter"` 25 | 26 | // Total items count to return 27 | TotalItems int `json:"total_items" example:"1000"` 28 | 29 | // Some subtotals used by some endpoints 30 | SubTotals map[string]int `json:"subtotals,omitempty"` 31 | 32 | // Show whether customer has some registered systems 33 | HasSystems *bool `json:"has_systems,omitempty"` 34 | } 35 | 36 | type IDPlain struct { 37 | ID string `json:"id"` 38 | } 39 | 40 | type IDStatus struct { 41 | ID string `json:"id"` 42 | Status string `json:"status"` 43 | } 44 | 45 | type IDSatelliteManaged struct { 46 | ID string `json:"id"` 47 | SatelliteManaged bool `json:"satellite_managed"` 48 | } 49 | 50 | type IDsResponseCommon struct { 51 | IDs []string `json:"ids"` 52 | } 53 | 54 | type IDsPlainResponse struct { 55 | Data []IDPlain `json:"data"` 56 | IDsResponseCommon 57 | } 58 | 59 | type IDsStatusResponse struct { 60 | Data []IDStatus `json:"data"` 61 | // backward compatibility 62 | // TODO: delete later once UI is using only the new `data` field 63 | IDsResponseCommon 64 | } 65 | 66 | type IDsSatelliteManagedResponse struct { 67 | Data []IDSatelliteManaged `json:"data"` 68 | // backward compatibility 69 | // TODO: delete later once UI is using only the new `data` field 70 | IDsResponseCommon 71 | } 72 | 73 | type SystemGroup struct { 74 | ID string `json:"id"` 75 | Name string `json:"name"` 76 | } 77 | -------------------------------------------------------------------------------- /manager/controllers/system_advisories_export_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/core" 5 | "net/http" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestSystemAdvisoriesExportJSON(t *testing.T) { 13 | core.SetupTest(t) 14 | w := CreateRequestRouterWithPath("GET", "/:inventory_id", "00000000-0000-0000-0000-000000000001", "", nil, 15 | "application/json", SystemAdvisoriesExportHandler) 16 | 17 | var output []AdvisoriesDBLookup 18 | CheckResponse(t, w, http.StatusOK, &output) 19 | assert.Equal(t, 8, len(output)) 20 | assert.Equal(t, output[0].Description, "adv-1-des") 21 | } 22 | 23 | func TestSystemAdvisoriesExportCSV(t *testing.T) { 24 | core.SetupTest(t) 25 | w := CreateRequestRouterWithPath("GET", "/:inventory_id", "00000000-0000-0000-0000-000000000001", "", nil, 26 | "text/csv", SystemAdvisoriesExportHandler) 27 | 28 | assert.Equal(t, http.StatusOK, w.Code) 29 | body := w.Body.String() 30 | lines := strings.Split(body, "\r\n") 31 | 32 | assert.Equal(t, 10, len(lines)) 33 | assert.Equal(t, "id,description,public_date,synopsis,advisory_type_name,severity,cve_count,"+ 34 | "reboot_required,release_versions,status", lines[0]) 35 | assert.Equal(t, "RH-1,adv-1-des,2016-09-22T16:00:00Z,adv-1-syn,enhancement,,0,"+ 36 | "false,\"7.0,7Server\",Installable", lines[1]) 37 | } 38 | 39 | func TestUnknownSystemAdvisoriesExport(t *testing.T) { 40 | core.SetupTest(t) 41 | w := CreateRequestRouterWithPath("GET", "/:inventory_id", "unknownsystem", "", nil, "text/csv", 42 | SystemAdvisoriesExportHandler) 43 | 44 | assert.Equal(t, http.StatusBadRequest, w.Code) 45 | } 46 | -------------------------------------------------------------------------------- /manager/controllers/system_delete.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/utils" 5 | "app/manager/middlewares" 6 | "errors" 7 | "net/http" 8 | 9 | "github.com/gin-gonic/gin" 10 | ) 11 | 12 | // @Summary Delete system by inventory id 13 | // @Description Delete system by inventory id 14 | // @ID deletesystem 15 | // @Security RhIdentity 16 | // @Accept json 17 | // @Produce json 18 | // @Param inventory_id path string true "Inventory ID" 19 | // @Success 200 20 | // @Failure 400 {object} utils.ErrorResponse 21 | // @Failure 404 {object} utils.ErrorResponse 22 | // @Failure 500 {object} utils.ErrorResponse 23 | // @Router /systems/{inventory_id} [delete] 24 | func SystemDeleteHandler(c *gin.Context) { 25 | account := c.GetInt(utils.KeyAccount) 26 | 27 | inventoryID := c.Param("inventory_id") 28 | if inventoryID == "" { 29 | c.JSON(http.StatusBadRequest, utils.ErrorResponse{Error: "inventory_id param not found"}) 30 | return 31 | } 32 | 33 | if !utils.IsValidUUID(inventoryID) { 34 | LogAndRespBadRequest(c, errors.New("bad request"), "incorrect inventory_id format") 35 | return 36 | } 37 | 38 | var systemInventoryID []string 39 | db := middlewares.DBFromContext(c) 40 | tx := db.Begin() 41 | 42 | defer tx.Rollback() 43 | 44 | err := tx.Set("gorm:query_option", "FOR UPDATE OF system_platform"). 45 | Table("system_platform"). 46 | Where("rh_account_id = ?", account). 47 | Where("inventory_id = ?::uuid", inventoryID). 48 | Pluck("inventory_id", &systemInventoryID).Error 49 | 50 | if err != nil { 51 | LogAndRespError(c, err, "could not query database for system") 52 | return 53 | } 54 | 55 | if len(systemInventoryID) == 0 { 56 | LogAndRespNotFound(c, errors.New("no rows returned"), "system not found") 57 | return 58 | } 59 | 60 | query := tx.Exec("select deleted_inventory_id from delete_system(?::uuid)", systemInventoryID[0]) 61 | 62 | if query.Error != nil { 63 | LogAndRespError(c, err, "Could not delete system") 64 | return 65 | } 66 | 67 | if tx.Commit().Error != nil { 68 | LogAndRespError(c, err, "Could not delete system") 69 | return 70 | } 71 | 72 | if query.RowsAffected > 0 { 73 | c.Status(http.StatusOK) 74 | } else { 75 | LogAndRespNotFound(c, errors.New("no rows returned"), "system not found") 76 | return 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /manager/controllers/system_delete_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/database" 6 | "app/base/models" 7 | "app/base/utils" 8 | "net/http" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | const del = "99c0ffee-0000-0000-0000-000000000de1" 15 | 16 | func TestInitDelete(t *testing.T) { 17 | utils.TestLoadEnv("conf/test.env") 18 | core.SetupTest(t) 19 | 20 | assert.NoError(t, database.DB.Create(&models.SystemPlatform{ 21 | InventoryID: del, 22 | RhAccountID: 1, 23 | DisplayName: del, 24 | }).Error) 25 | utils.TestLoadEnv("conf/manager.env") 26 | } 27 | 28 | func TestSystemDelete(t *testing.T) { 29 | core.SetupTest(t) 30 | w := CreateRequestRouterWithParams("DELETE", "/:inventory_id", del, "", nil, "", SystemDeleteHandler, 1) 31 | 32 | assert.Equal(t, http.StatusOK, w.Code) 33 | } 34 | 35 | func TestSystemDeleteWrongAccount(t *testing.T) { 36 | core.SetupTest(t) 37 | w := CreateRequestRouterWithParams("DELETE", "/:inventory_id", del, "", nil, "", SystemDeleteHandler, 2) 38 | 39 | assert.Equal(t, http.StatusNotFound, w.Code) 40 | } 41 | 42 | func TestSystemDeleteNotFound(t *testing.T) { 43 | core.SetupTest(t) 44 | w := CreateRequestRouterWithParams("DELETE", "/:inventory_id", del, "", nil, "", SystemDeleteHandler, 1) 45 | 46 | assert.Equal(t, http.StatusNotFound, w.Code) 47 | } 48 | 49 | func TestSystemDeleteUnknown(t *testing.T) { 50 | core.SetupTest(t) 51 | w := CreateRequestRouterWithParams("DELETE", "/:inventory_id", "unknownsystem", "", nil, "", SystemDeleteHandler, 1) 52 | 53 | assert.Equal(t, http.StatusBadRequest, w.Code) 54 | } 55 | -------------------------------------------------------------------------------- /manager/controllers/system_packages_export_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/core" 5 | "net/http" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestSystemPackagesExportHandlerJSON(t *testing.T) { 13 | core.SetupTest(t) 14 | w := CreateRequestRouterWithParams("GET", "/:inventory_id/packages", "00000000-0000-0000-0000-000000000013", "", 15 | nil, "application/json", SystemPackagesExportHandler, 3) 16 | 17 | var output []SystemPackageInline 18 | CheckResponse(t, w, http.StatusOK, &output) 19 | assert.Equal(t, 4, len(output)) 20 | assert.Equal(t, output[0].Name, "kernel") 21 | assert.Equal(t, output[0].EVRA, "5.6.13-200.fc31.x86_64") 22 | assert.Equal(t, output[0].LatestInstallable, "5.6.13-200.fc31.x86_64") 23 | assert.Equal(t, output[0].LatestApplicable, "5.6.13-200.fc31.x86_64") 24 | assert.Equal(t, output[0].Summary, "The Linux kernel") 25 | } 26 | 27 | func TestSystemPackagesExportHandlerCSV(t *testing.T) { 28 | core.SetupTest(t) 29 | w := CreateRequestRouterWithParams("GET", "/:inventory_id/packages", "00000000-0000-0000-0000-000000000013", "", 30 | nil, "text/csv", SystemPackagesExportHandler, 3) 31 | 32 | assert.Equal(t, http.StatusOK, w.Code) 33 | body := w.Body.String() 34 | lines := strings.Split(body, "\r\n") 35 | 36 | assert.Equal(t, 6, len(lines)) 37 | assert.Equal(t, "name,evra,summary,description,updatable,update_status,latest_installable,latest_applicable", lines[0]) 38 | 39 | assert.Equal(t, "kernel,5.6.13-200.fc31.x86_64,The Linux kernel,The kernel meta package,false,"+ 40 | "None,5.6.13-200.fc31.x86_64,5.6.13-200.fc31.x86_64", lines[1]) 41 | assert.Equal(t, "firefox,76.0.1-1.fc31.x86_64,Mozilla Firefox Web browser,Mozilla Firefox is an "+ 42 | "open-source web browser...,true,Installable,76.0.1-2.fc31.x86_64,77.0.1-1.fc31.x86_64", lines[2]) 43 | } 44 | 45 | func TestSystemPackagesExportUnknown(t *testing.T) { 46 | core.SetupTest(t) 47 | w := CreateRequestRouterWithParams("GET", "/:inventory_id/packages", "unknownsystem", "", nil, "text/csv", 48 | SystemPackagesExportHandler, 3) 49 | 50 | assert.Equal(t, http.StatusBadRequest, w.Code) 51 | } 52 | -------------------------------------------------------------------------------- /manager/controllers/systems_auth_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/core" 5 | "net/http" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func testAccountSystemCounts(t *testing.T, acc int, count int) { 12 | core.SetupTest(t) 13 | var output SystemsResponse 14 | w := CreateRequestRouterWithAccount("GET", "/", "", "", nil, "", SystemsListHandler, acc) 15 | CheckResponse(t, w, http.StatusOK, &output) 16 | // data 17 | assert.Equal(t, count, len(output.Data)) 18 | } 19 | 20 | func TestMissingAccount(t *testing.T) { 21 | testAccountSystemCounts(t, 0, 0) 22 | testAccountSystemCounts(t, 1, 10) 23 | testAccountSystemCounts(t, 2, 3) 24 | testAccountSystemCounts(t, 3, 5) 25 | testAccountSystemCounts(t, 4, 0) 26 | } 27 | -------------------------------------------------------------------------------- /manager/controllers/template_systems_delete.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/utils" 5 | "app/manager/config" 6 | "app/manager/kafka" 7 | "app/manager/middlewares" 8 | "net/http" 9 | 10 | "github.com/gin-gonic/gin" 11 | ) 12 | 13 | // @Summary Remove systems from template 14 | // @Description Remove systems from template 15 | // @ID removeTemplateSystems 16 | // @Security RhIdentity 17 | // @Accept json 18 | // @Produce json 19 | // @Param body body TemplateSystemsUpdateRequest true "Request body" 20 | // @Success 200 21 | // @Failure 400 {object} utils.ErrorResponse 22 | // @Failure 404 {object} utils.ErrorResponse 23 | // @Failure 500 {object} utils.ErrorResponse 24 | // @Router /templates/systems [DELETE] 25 | func TemplateSystemsDeleteHandler(c *gin.Context) { 26 | account := c.GetInt(utils.KeyAccount) 27 | groups := c.GetStringMapString(utils.KeyInventoryGroups) 28 | 29 | var req TemplateSystemsUpdateRequest 30 | if err := c.ShouldBindJSON(&req); err != nil { 31 | LogAndRespBadRequest(c, err, "Invalid template delete request "+err.Error()) 32 | return 33 | } 34 | 35 | db := middlewares.DBFromContext(c) 36 | 37 | err := checkTemplateSystems(c, db, account, nil, req.Systems, groups) 38 | if err != nil { 39 | return 40 | } 41 | 42 | modified, err := assignCandlepinEnvironment(c, db, account, nil, req.Systems, groups) 43 | if err != nil { 44 | return 45 | } 46 | 47 | // unassign system from template => assign NULL as template_id 48 | err = assignTemplateSystems(c, db, account, nil, modified) 49 | if err != nil { 50 | return 51 | } 52 | 53 | // re-evaluate systems removed from templates 54 | if config.EnableTemplateChangeEval { 55 | inventoryAIDs := kafka.InventoryIDs2InventoryAIDs(account, req.Systems) 56 | kafka.EvaluateBaselineSystems(inventoryAIDs) 57 | } 58 | c.Status(http.StatusOK) 59 | } 60 | -------------------------------------------------------------------------------- /manager/controllers/template_systems_delete_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/database" 6 | "bytes" 7 | "net/http" 8 | "testing" 9 | 10 | "github.com/bytedance/sonic" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func testTemplateSystemsDelete(t *testing.T, body TemplateSystemsUpdateRequest, status int) { 15 | bodyJSON, err := sonic.Marshal(&body) 16 | if err != nil { 17 | panic(err) 18 | } 19 | 20 | w := CreateRequestRouterWithParams("POST", "/systems", "", "", bytes.NewBuffer(bodyJSON), "", 21 | TemplateSystemsDeleteHandler, templateAccount) 22 | 23 | assert.Equal(t, status, w.Code) 24 | } 25 | 26 | func TestTemplateSystemsDeleteDefault(t *testing.T) { 27 | core.SetupTest(t) 28 | 29 | database.CreateTemplate(t, templateAccount, templateUUID, templateSystems) 30 | template2 := "99999999-9999-8888-8888-888888888888" 31 | templateSystems2 := []string{ 32 | "00000000-0000-0000-0000-000000000005", 33 | } 34 | database.CreateTemplate(t, templateAccount, template2, templateSystems2) 35 | 36 | database.CheckTemplateSystems(t, templateAccount, templateUUID, templateSystems) 37 | database.CheckTemplateSystems(t, templateAccount, template2, templateSystems2) 38 | 39 | req := TemplateSystemsUpdateRequest{ 40 | Systems: append(templateSystems, templateSystems2...), 41 | } 42 | 43 | testTemplateSystemsDelete(t, req, http.StatusOK) 44 | 45 | database.CheckTemplateSystems(t, templateAccount, templateUUID, []string{}) 46 | database.CheckTemplateSystems(t, templateAccount, template2, []string{}) 47 | database.DeleteTemplate(t, templateAccount, templateUUID) 48 | database.DeleteTemplate(t, templateAccount, template2) 49 | } 50 | 51 | func TestTemplateSystemsDeleteInvalid(t *testing.T) { 52 | core.SetupTest(t) 53 | 54 | for _, req := range []TemplateSystemsUpdateRequest{ 55 | {}, 56 | {Systems: []string{}}} { 57 | testTemplateSystemsDelete(t, req, http.StatusBadRequest) 58 | } 59 | 60 | testTemplateSystemsDelete(t, TemplateSystemsUpdateRequest{ 61 | Systems: []string{"c0ffeec0-ffee-c0ff-eec0-ffeec0ffee00"}}, http.StatusNotFound) 62 | } 63 | -------------------------------------------------------------------------------- /manager/controllers/test_utils.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | const InvalidContentTypeErr = `{"error":"Invalid content type 'test-format', use 'application/json' or 'text/csv'"}` 11 | 12 | func ParseResponseBody(t *testing.T, bytes []byte, out interface{}) { 13 | // don't use sonic.Unmarshal as some tests receive empty output 14 | err := json.Unmarshal(bytes, &out) 15 | assert.Nil(t, err, string(bytes)) 16 | } 17 | -------------------------------------------------------------------------------- /manager/controllers/utils_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "app/base/database" 5 | "app/base/utils" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | 10 | "github.com/gin-gonic/gin" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestGroupNameFilter(t *testing.T) { 15 | utils.SkipWithoutDB(t) 16 | database.Configure() 17 | 18 | c, _ := gin.CreateTestContext(httptest.NewRecorder()) 19 | c.Request, _ = http.NewRequest("GET", "/?filter[group_name]=group2", nil) 20 | 21 | filters, err := ParseAllFilters(c, ListOpts{}) 22 | assert.Nil(t, err) 23 | 24 | var systems []SystemsID 25 | groups := map[string]string{ 26 | utils.KeyGrouped: `{"[{\"id\":\"inventory-group-1\"}]","[{\"id\":\"inventory-group-2\"}]"}`, 27 | } 28 | tx := database.Systems(database.DB, 1, groups) 29 | tx, _ = ApplyInventoryFilter(filters, tx, "sp.inventory_id") 30 | tx.Scan(&systems) 31 | 32 | assert.Equal(t, 2, len(systems)) // 2 systems with `group2` in test_data 33 | assert.Equal(t, "00000000-0000-0000-0000-000000000007", systems[0].ID) 34 | assert.Equal(t, "00000000-0000-0000-0000-000000000008", systems[1].ID) 35 | } 36 | 37 | func TestGroupNameFilter2(t *testing.T) { 38 | utils.SkipWithoutDB(t) 39 | database.Configure() 40 | 41 | c, _ := gin.CreateTestContext(httptest.NewRecorder()) 42 | c.Request, _ = http.NewRequest("GET", "/?filter[group_name]=group1,group2", nil) 43 | 44 | filters, err := ParseAllFilters(c, ListOpts{}) 45 | assert.Nil(t, err) 46 | 47 | var systems []SystemsID 48 | groups := map[string]string{ 49 | utils.KeyGrouped: `{"[{\"id\":\"inventory-group-1\"}]","[{\"id\":\"inventory-group-2\"}]"}`, 50 | } 51 | tx := database.Systems(database.DB, 1, groups) 52 | tx, _ = ApplyInventoryFilter(filters, tx, "sp.inventory_id") 53 | tx.Scan(&systems) 54 | 55 | assert.Equal(t, 9, len(systems)) // 2 systems with `group2`, 6 with `group1` in test_data 56 | } 57 | -------------------------------------------------------------------------------- /manager/middlewares/db.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "app/base/database" 5 | "app/base/utils" 6 | "net/http" 7 | 8 | "github.com/gin-gonic/gin" 9 | "gorm.io/gorm" 10 | ) 11 | 12 | const DBKey = "DB" 13 | const DBReadReplicaKey = "DBReadReplica" 14 | 15 | // Apply gin context to database so queries within context are canceled when request is aborted 16 | func DatabaseWithContext() gin.HandlerFunc { 17 | return func(c *gin.Context) { 18 | c.Set(DBKey, database.DB.WithContext(c)) 19 | if database.DBReadReplica != nil { 20 | c.Set(DBReadReplicaKey, database.DBReadReplica.WithContext(c)) 21 | } 22 | c.Next() 23 | } 24 | } 25 | 26 | // DB handler stored in request context 27 | func DBFromContext(c *gin.Context) *gorm.DB { 28 | if useReadReplica(c) { 29 | return c.MustGet(DBReadReplicaKey).(*gorm.DB) 30 | } 31 | return c.MustGet(DBKey).(*gorm.DB) 32 | } 33 | 34 | func useReadReplica(c *gin.Context) bool { 35 | if utils.CoreCfg.DBReadReplicaEnabled && c.Request.Method == http.MethodGet { 36 | // if Host or Port is not set, don't use read replica 37 | return database.ReadReplicaConfigured() 38 | } 39 | return false 40 | } 41 | -------------------------------------------------------------------------------- /manager/middlewares/deprecations.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "app/base/deprecations" 5 | 6 | "github.com/gin-gonic/gin" 7 | ) 8 | 9 | func Deprecate(options ...deprecations.Deprecation) gin.HandlerFunc { 10 | return func(c *gin.Context) { 11 | for _, o := range options { 12 | o.Deprecate(c) 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /manager/middlewares/limits.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "app/base/utils" 5 | "net/http" 6 | 7 | "github.com/gin-gonic/gin" 8 | "go.uber.org/ratelimit" 9 | ) 10 | 11 | func LimitRequestBodySize(size int64) gin.HandlerFunc { 12 | return func(c *gin.Context) { 13 | if c.Request != nil && c.Request.Body != nil { 14 | c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, size) 15 | } 16 | c.Next() 17 | } 18 | } 19 | 20 | func LimitRequestHeaders(maxHeaderCount int) gin.HandlerFunc { 21 | return func(c *gin.Context) { 22 | if len(c.Request.Header) > maxHeaderCount { 23 | c.AbortWithStatusJSON(http.StatusRequestEntityTooLarge, utils.ErrorResponse{Error: "too many headers"}) 24 | } 25 | } 26 | } 27 | 28 | func MaxConnections(limit int) gin.HandlerFunc { 29 | conns := make(chan struct{}, limit) 30 | return func(c *gin.Context) { 31 | conns <- struct{}{} 32 | defer func() { <-conns }() 33 | c.Next() 34 | } 35 | } 36 | 37 | func Ratelimit(limit int) gin.HandlerFunc { 38 | rl := ratelimit.New(limit) 39 | return func(c *gin.Context) { 40 | rl.Take() 41 | c.Next() 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /manager/middlewares/logger.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "app/base/utils" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/gin-gonic/gin" 9 | ) 10 | 11 | // setup logging middleware 12 | // ensures logging line after each http response with fields: 13 | // duration_ms, status, userAgent, method, remoteAddr, url, param_* 14 | func RequestResponseLogger() gin.HandlerFunc { 15 | return func(c *gin.Context) { 16 | tStart := time.Now() 17 | c.Next() 18 | var fields []interface{} 19 | 20 | duration := time.Since(tStart).Nanoseconds() / 1e6 21 | fields = append(fields, "durationMs", duration, 22 | "status_code", c.Writer.Status(), 23 | "user_agent", c.Request.UserAgent(), 24 | "method", c.Request.Method, 25 | "remote_addr", c.Request.RemoteAddr, 26 | "url", c.Request.URL.String(), 27 | "content_encoding", c.Writer.Header().Get("Content-Encoding"), 28 | "account", c.GetInt(utils.KeyAccount)) 29 | 30 | for _, param := range c.Params { 31 | fields = append(fields, "param_"+param.Key, param.Value) 32 | } 33 | fields = append(fields, "request") 34 | 35 | if c.Writer.Status() < http.StatusInternalServerError { 36 | utils.LogInfo(fields...) 37 | } else { 38 | utils.LogError(fields...) 39 | } 40 | 41 | utils.ObserveSecondsSince(tStart, requestDurations. 42 | WithLabelValues(c.Request.Method+c.FullPath())) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /manager/middlewares/prometheus.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "app/base/utils" 5 | "strings" 6 | 7 | "github.com/gin-gonic/gin" 8 | "github.com/prometheus/client_golang/prometheus" 9 | ginprometheus "github.com/zsais/go-gin-prometheus" 10 | ) 11 | 12 | var serviceErrorCnt = prometheus.NewCounterVec(prometheus.CounterOpts{ 13 | Namespace: "patchman_engine", 14 | Subsystem: "manager", 15 | Name: "dependency_call", 16 | }, []string{"name", "status"}) 17 | 18 | var requestDurations = prometheus.NewHistogramVec(prometheus.HistogramOpts{ 19 | Help: "Request Durations", 20 | Namespace: "patchman_engine", 21 | Subsystem: "manager", 22 | Name: "request_durations", 23 | Buckets: []float64{1, 1.5, 1.75, 2, 2.5, 3, 3.5, 4}, 24 | }, []string{"endpoint"}) 25 | 26 | var callerSourceCnt = prometheus.NewCounterVec(prometheus.CounterOpts{ 27 | Namespace: "patchman_engine", 28 | Subsystem: "manager", 29 | Name: "caller_source", 30 | }, []string{"source", "account"}) 31 | 32 | var AdvisoryDetailCnt = prometheus.NewCounterVec(prometheus.CounterOpts{ 33 | Help: "How many advisories hit/miss cache", 34 | Namespace: "patchman_engine", 35 | Subsystem: "manager", 36 | Name: "advisory_detail_cache", 37 | }, []string{"type"}) 38 | 39 | var AdvisoryDetailGauge = prometheus.NewGauge(prometheus.GaugeOpts{ 40 | Help: "Advisory detail cache size", 41 | Namespace: "patchman_engine", 42 | Subsystem: "manager", 43 | Name: "advisory_detail_cache_size", 44 | }) 45 | 46 | // Create and configure Prometheus middleware to expose metrics 47 | func Prometheus() *ginprometheus.Prometheus { 48 | prometheus.MustRegister(serviceErrorCnt, requestDurations, callerSourceCnt, 49 | AdvisoryDetailCnt, AdvisoryDetailGauge) 50 | 51 | p := ginprometheus.NewPrometheus("patchman_engine") 52 | p.MetricsPath = utils.CoreCfg.MetricsPath 53 | unifyParametrizedUrlsCounters(p) 54 | return p 55 | } 56 | 57 | func unifyParametrizedUrlsCounters(p *ginprometheus.Prometheus) { 58 | p.ReqCntURLLabelMappingFn = func(c *gin.Context) string { 59 | url := c.Request.URL.Path 60 | for _, p := range c.Params { 61 | url = strings.Replace(url, "/"+p.Value, "/:"+p.Key, 1) 62 | } 63 | return url 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /manager/middlewares/swagger.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "app/base/utils" 5 | "app/docs" 6 | "regexp" 7 | "strconv" 8 | 9 | "github.com/gin-gonic/gin" 10 | 11 | ginSwagger "github.com/swaggo/gin-swagger" 12 | 13 | swaggerFiles "github.com/swaggo/files" 14 | ) 15 | 16 | var apiRegexp = regexp.MustCompile(`/v(\d)`) 17 | 18 | func apiver(path string) int { 19 | match := apiRegexp.FindStringSubmatch(path) 20 | if len(match) > 1 { 21 | i, err := strconv.Atoi(match[1]) 22 | if err == nil { 23 | return i 24 | } 25 | } 26 | return 1 27 | } 28 | 29 | func SetSwagger(app *gin.Engine, config docs.EndpointsConfig) { 30 | // Serving openapi docs 31 | openapiURL := docs.Init(app, config) 32 | 33 | url := ginSwagger.URL(openapiURL) 34 | app.GET("/openapi/*any", ginSwagger.WrapHandler(swaggerFiles.Handler, url)) 35 | } 36 | 37 | func SetAdminSwagger(app *gin.Engine) { 38 | oaURL := docs.InitAdminAPI((app)) 39 | 40 | url := ginSwagger.URL(oaURL) 41 | api := app.Group("/api/patch/admin") 42 | api.GET("/openapi/*any", ginSwagger.WrapHandler(swaggerFiles.Handler, url)) 43 | } 44 | 45 | func SetAPIVersion(basePath string) gin.HandlerFunc { 46 | return func(c *gin.Context) { 47 | c.Set(utils.KeyApiver, apiver(basePath)) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /manager/middlewares/timeout.go: -------------------------------------------------------------------------------- 1 | package middlewares 2 | 3 | import ( 4 | "app/base/utils" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/gin-contrib/timeout" 9 | "github.com/gin-gonic/gin" 10 | ) 11 | 12 | func WithTimeout(seconds time.Duration) gin.HandlerFunc { 13 | return timeout.New( 14 | timeout.WithTimeout(seconds*time.Second), 15 | timeout.WithHandler(func(c *gin.Context) { 16 | c.Next() 17 | }), 18 | timeout.WithResponse(func(c *gin.Context) { 19 | c.AbortWithStatusJSON(http.StatusRequestTimeout, utils.ErrorResponse{Error: "Request timeout"}) 20 | c.Done() 21 | }), 22 | ) 23 | } 24 | -------------------------------------------------------------------------------- /manager/models/models_test.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/database" 6 | "app/base/models" 7 | "app/base/utils" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | // test association SystemAdvisories.Advisory 14 | func TestSystemAdvisories(t *testing.T) { 15 | utils.SkipWithoutDB(t) 16 | core.SetupTestEnvironment() 17 | 18 | var systemAdvisories []models.SystemAdvisories 19 | err := database.DB.Model(models.SystemAdvisories{}).Preload("Advisory"). 20 | Where("system_id = ?", 1).Find(&systemAdvisories).Error 21 | assert.Nil(t, err) 22 | assert.Equal(t, 8, len(systemAdvisories)) 23 | assert.Equal(t, "RH-1", systemAdvisories[0].Advisory.Name) 24 | } 25 | -------------------------------------------------------------------------------- /platform/candlepin.go: -------------------------------------------------------------------------------- 1 | package platform 2 | 3 | import ( 4 | "app/base/candlepin" 5 | "app/base/utils" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "strings" 10 | 11 | "github.com/gin-gonic/gin" 12 | ) 13 | 14 | func candlepinEnvHandler(c *gin.Context) { 15 | envID := c.Param("envid") 16 | /* 17 | jsonData, _ := io.ReadAll(c.Request.Body) 18 | json.Unmarshal(jsonData, &body) // nolint:errcheck 19 | if body.ReturnStatus > 200 { 20 | c.AbortWithStatus(body.ReturnStatus) 21 | return 22 | } 23 | */ 24 | data := fmt.Sprintf(`{ 25 | "environment": "%s" 26 | }`, envID) 27 | utils.LogInfo(data) 28 | if envID == "return_404" { 29 | c.Data(http.StatusNotFound, gin.MIMEJSON, []byte{}) 30 | return 31 | } 32 | c.Data(http.StatusOK, gin.MIMEJSON, []byte(data)) 33 | } 34 | 35 | func candlepinConsumersPutHandler(c *gin.Context) { 36 | consumer := c.Param("consumer") 37 | jsonData, _ := io.ReadAll(c.Request.Body) 38 | utils.LogInfo("PUT consumer", consumer, "body", string(jsonData)) 39 | if consumer == "return_404" { 40 | c.Data(http.StatusNotFound, gin.MIMEJSON, []byte{}) 41 | return 42 | } 43 | c.Data(http.StatusOK, gin.MIMEJSON, []byte{}) 44 | } 45 | 46 | func candlepinConsumersGetHandler(c *gin.Context) { 47 | consumer := c.Param("consumer") 48 | utils.LogInfo("GET consumer", consumer, "body") 49 | env := strings.ReplaceAll(consumer, "-", "") 50 | env = strings.Replace(env, "000", "999", 1) 51 | response := candlepin.ConsumersDetailResponse{ 52 | Environments: []candlepin.ConsumersEnvironment{ 53 | {ID: env}, 54 | }, 55 | } 56 | c.JSON(http.StatusOK, response) 57 | } 58 | 59 | func initCandlepin(app *gin.Engine) { 60 | app.POST("/candlepin/environments/:envid/consumers", candlepinEnvHandler) 61 | app.PUT("/candlepin/consumers/:consumer", candlepinConsumersPutHandler) 62 | app.GET("/candlepin/consumers/:consumer", candlepinConsumersGetHandler) 63 | } 64 | -------------------------------------------------------------------------------- /platform/inventory.go: -------------------------------------------------------------------------------- 1 | package platform 2 | 3 | import ( 4 | "app/base/inventory" 5 | "app/base/utils" 6 | "crypto/rand" 7 | "math/big" 8 | ) 9 | 10 | var pkgs = []string{ 11 | "kernel-debug-devel-2.6.32-220.el6.i686", 12 | "bogl-debuginfo-0.1.18-11.2.1.el5.1.i386", 13 | "tetex-latex-3.0-33.13.el5.x86_64", 14 | "openssh-clients-5.3p1-20.el6_0.3.i686", 15 | "httpd-debuginfo-2.2.3-43.el5.i386", 16 | "openoffice.org-langpack-tn_ZA-1:3.2.1-19.6.el6_0.5.i686", 17 | "mod_nss-debuginfo-1.0.8-8.el5_10.i386", 18 | "java-1.5.0-ibm-demo-1:1.5.0.16.9-1jpp.1.el5.i386", 19 | "openoffice.org-calc-1:3.2.1-19.6.el6_0.5.i686", 20 | "rubygem-foreman_api-0.1.11-6.el6sat.noarch", 21 | "bluez-libs-debuginfo-3.7-1.1.i386", 22 | "java-1.6.0-sun-demo-1:1.6.0.27-1jpp.2.el5.x86_64", 23 | "thunderbird-debuginfo-2.0.0.24-6.el5.x86_64", 24 | "chkconfig-debuginfo-1.3.30.2-2.el5.i386", 25 | "PackageKit-device-rebind-0.5.8-20.el6.i686", 26 | "java-1.7.0-oracle-devel-1:1.7.0.25-1jpp.1.el5_9.i386", 27 | "xulrunner-debuginfo-1.9.0.7-3.el5.i386", 28 | "mysql-server-5.1.66-2.el6_3.i686", 29 | "iproute-2.6.18-13.el5.i386", 30 | "libbonobo-2.24.2-5.el6.i686"} 31 | 32 | // Create bare system profile 33 | func makeSystemProfile(id string, randomPkgs bool) inventory.SystemProfile { 34 | _pkgs := pkgs 35 | if id == "TEST-NO-PKGS" { 36 | _pkgs = []string{} 37 | } else if randomPkgs { 38 | nPkgs, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pkgs)))) 39 | _pkgs = pkgs[0:nPkgs.Int64()] 40 | } 41 | 42 | yumRepos := []inventory.YumRepo{ 43 | { 44 | ID: "repo1", 45 | Name: "Debug packages", 46 | Enabled: true, 47 | }, 48 | } 49 | dnfModules := []inventory.DnfModule{ 50 | { 51 | Name: "firefox", 52 | Stream: "60", 53 | }, 54 | } 55 | return inventory.SystemProfile{ 56 | Arch: utils.PtrString("i686"), 57 | InstalledPackages: &_pkgs, 58 | YumRepos: &yumRepos, 59 | DnfModules: &dnfModules, 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /platform/rbac.go: -------------------------------------------------------------------------------- 1 | package platform 2 | 3 | import ( 4 | "app/base/rbac" 5 | "app/base/utils" 6 | "net/http" 7 | 8 | "github.com/gin-gonic/gin" 9 | ) 10 | 11 | var rbacPermissions = utils.PodConfig.GetString("rbac_permissions", "patch:*:read") 12 | 13 | var inventoryGroup = "inventory-group-1" 14 | 15 | func rbacHandler(c *gin.Context) { 16 | c.JSON(http.StatusOK, rbac.AccessPagination{ 17 | Data: []rbac.Access{ 18 | {Permission: rbacPermissions}, 19 | { 20 | Permission: "inventory:hosts:read", 21 | ResourceDefinitions: []rbac.ResourceDefinition{{ 22 | AttributeFilter: rbac.AttributeFilter{ 23 | Key: "group.id", 24 | Operation: "in", 25 | Value: []*string{&inventoryGroup, nil}, 26 | }, 27 | }}, 28 | }, 29 | }, 30 | }) 31 | } 32 | 33 | // InitInventory routes. 34 | func initRbac(app *gin.Engine) { 35 | // Mock inventory system_profile endpoint 36 | app.GET("/api/rbac/v1/access", rbacHandler) 37 | } 38 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "description": "run on Monday after 3am and before 10am", 4 | "schedule": [ "* 3-10 * * 1" ], 5 | "timezone": "Europe/Prague", 6 | "semanticCommits": "disabled", 7 | "extends": ["config:base"], 8 | "automerge": true, 9 | "tekton": { 10 | "automerge": true, 11 | "automergeStrategy": "rebase", 12 | "automergeType": "pr", 13 | "enabled": true, 14 | "ignoreTests": true, 15 | "platformAutomerge": true 16 | }, 17 | "packageRules": [ 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /scripts/check-caches.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # script to manually call vmaas_sync 4 | 5 | curl http://localhost:9999/check-caches 6 | -------------------------------------------------------------------------------- /scripts/check-deploy-envs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | YAML=deploy/clowdapp.yaml 4 | 5 | ENV_VARS=$(grep '\${.*}' $YAML | sed 's/.*\${\+\([^}]*\)}\+.*/\1/') 6 | 7 | for i in $ENV_VARS ; do 8 | if awk "/parameters:/ {params=1} params && /$i/ { exit 1;}" $YAML ; then 9 | >&2 echo "Value of $i is not defined in $YAML" 10 | ERROR=1 11 | fi 12 | done 13 | 14 | [[ -z $ERROR ]] || exit 1 15 | -------------------------------------------------------------------------------- /scripts/check-dockercomposes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rc=0 4 | 5 | DEV=docker-compose.yml 6 | PROD=docker-compose.prod.yml 7 | # Check consistency of docker-compose.yml and docker-compose.yml 8 | sed \ 9 | -e "s|INSTALL_TOOLS=yes|INSTALL_TOOLS=no|" \ 10 | -e "s|target: buildimg|target: runtimeimg|" \ 11 | -e "/ - \.\/conf\/gorun.env/ d" \ 12 | -e "/ \(db_admin\|db_feed\|manager\|listener\|evaluator_recalc\|evaluator_upload\|vmaas_sync\|admin\|migrate_system_package2\):/,/^$/ { 13 | s/- \.\/:\/go\/src\/app/- \.\/dev:\/go\/src\/app\/dev\n\ 14 | - .\/dev\/database\/secrets:\/opt\/postgresql\n\ 15 | - \.\/dev\/kafka\/secrets:\/opt\/kafka/ 16 | }" \ 17 | "$DEV" | diff -u - "$PROD" 18 | rc=$? 19 | if [ $rc -gt 0 ]; then 20 | echo "$DEV and $PROD are too different!" 21 | else 22 | echo "$DEV and $PROD are OK" 23 | fi 24 | echo 25 | 26 | exit $rc 27 | -------------------------------------------------------------------------------- /scripts/check-openapi-docs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | APIVERS="v3 admin" 4 | declare -A OPENAPI_COPY 5 | for APIVER in $APIVERS; do 6 | OPENAPI_COPY["$APIVER"]=$(mktemp -t openapi.json.XXX) 7 | cp docs/$APIVER/openapi.json ${OPENAPI_COPY["$APIVER"]} 8 | done 9 | 10 | ./scripts/generate_docs.sh 11 | 12 | for APIVER in ${!OPENAPI_COPY[@]}; do 13 | diff docs/$APIVER/openapi.json ${OPENAPI_COPY["$APIVER"]} 14 | rc+=$? 15 | if [ $rc -gt 0 ]; then 16 | echo "docs/$APIVER/openapi.json different from file generated with './scripts/generate_docs.sh'!" 17 | else 18 | echo "docs/$APIVER/openapi.json consistent with generated file." 19 | fi 20 | 21 | rm ${OPENAPI_COPY["$APIVER"]} 22 | done 23 | exit $rc 24 | -------------------------------------------------------------------------------- /scripts/colorize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Colorize some keywords (e.g. PASS - green, FAIL - red) in stdin. 4 | # Example: echo This PASS, this FAIL | ./scripts/colorize.sh 5 | 6 | sed "s/PASS/\x1b[32mPASS\x1b[0m/ 7 | s/FAIL/\x1b[31mFAIL\x1b[0m/" 8 | -------------------------------------------------------------------------------- /scripts/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e -o pipefail # stop on error 4 | 5 | COMPONENT=$1 6 | shift 7 | # This script is launched inside the /go/src/app working directory 8 | echo "Running $COMPONENT in $(pwd) as $(id)" 9 | exec ${GORUN:+go run} ./main${GORUN:+.go} $COMPONENT $@ 10 | -------------------------------------------------------------------------------- /scripts/export_local_env.sh: -------------------------------------------------------------------------------- 1 | CONFIGS="conf/local.env" 2 | GITROOT=$(git rev-parse --show-toplevel) 3 | 4 | export $(grep -h '^[[:alpha:]]' $CONFIGS | xargs) 5 | 6 | export ACG_CONFIG=$GITROOT/$ACG_CONFIG 7 | export KAFKA_SSL_CERT=$GITROOT/$KAFKA_SSL_CERT 8 | [[ -n $DB_SSLROOTCERT ]] && export DB_SSLROOTCERT=$GITROOT/$DB_SSLROOTCERT 9 | 10 | -------------------------------------------------------------------------------- /scripts/feed_db.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "app/base/database" 5 | "os" 6 | ) 7 | 8 | func main() { 9 | database.InitDB() 10 | if len(os.Args) > 1 { 11 | switch os.Args[1] { 12 | case "inventory_hosts": 13 | createInventoryHosts() 14 | return 15 | case "feed": 16 | feed() 17 | return 18 | } 19 | } 20 | // create inventory.hosts and feed db 21 | createInventoryHosts() 22 | feed() 23 | } 24 | 25 | func createInventoryHosts() { 26 | database.DBWait("empty") 27 | query, err := os.ReadFile("./dev/create_inventory_hosts.sql") 28 | if err != nil { 29 | panic(err) 30 | } 31 | err = database.DB.Exec(string(query)).Error 32 | if err != nil { 33 | panic(err) 34 | } 35 | } 36 | 37 | func feed() { 38 | database.DBWait("full") 39 | query, err := os.ReadFile("./dev/test_data.sql") 40 | if err != nil { 41 | panic(err) 42 | } 43 | err = database.DB.Exec(string(query)).Error 44 | if err != nil { 45 | panic(err) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /scripts/generate_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Usage: 4 | # ./generate_docs.sh 5 | 6 | DOCS_TMP_DIR=/tmp 7 | CONVERT_URL="https://converter.swagger.io/api/convert" 8 | VERSION=$(cat VERSION) 9 | 10 | # Create temporary swagger 2.0 definition 11 | swag init --output $DOCS_TMP_DIR --exclude turnpike --generalInfo manager/manager.go 12 | swag init --output $DOCS_TMP_DIR/admin --dir turnpike --generalInfo admin_api.go 13 | 14 | convert_doc() { 15 | local in=$1 16 | local out=$2 17 | curl -X "POST" -H "accept: application/json" -H "Content-Type: application/json" \ 18 | -d @$DOCS_TMP_DIR/$in $CONVERT_URL \ 19 | | python3 -m json.tool | sed "s/{{.Version}}/$VERSION/" \ 20 | > $out 21 | } 22 | 23 | # Perform conversion 24 | convert_doc swagger.json docs/v3/openapi.json 25 | 26 | # Convert admin spec 27 | convert_doc admin/swagger.json docs/admin/openapi.json 28 | -------------------------------------------------------------------------------- /scripts/go_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o pipefail 4 | 5 | export TEST_WD=`pwd` 6 | 7 | TEST_DIRS=${1:-./...} 8 | 9 | # Run go test and colorize output (PASS - green, FAIL - red). 10 | # Set "-p 1" to run test sequentially to avoid parallel changes in testing database. 11 | gotestsum --format=standard-verbose -- -v -p 1 -coverprofile=coverage.txt -covermode=atomic $TEST_DIRS 12 | -------------------------------------------------------------------------------- /scripts/go_test_db.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e -o pipefail 4 | MIGRATION_FILES=file://./database_admin/migrations 5 | 6 | go run ./scripts/feed_db.go inventory_hosts 7 | 8 | # Create database 9 | go run main.go migrate $MIGRATION_FILES 10 | 11 | # Run database test, destroys and recreates database 12 | gotestsum --format=standard-verbose -- -v app/database_admin 13 | 14 | # Fill database with testing data 15 | go run ./scripts/feed_db.go feed 16 | 17 | # Normal test run - everything except database schema test 18 | TEST_DIRS=$(go list -buildvcs=false ./... | grep -v "app/database_admin") 19 | ./scripts/go_test.sh "${TEST_DIRS}" 20 | -------------------------------------------------------------------------------- /scripts/go_test_on_ci.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e -o pipefail 4 | 5 | # Analyse generated docs/v3/openapi.json 6 | ./scripts/check-openapi-docs.sh 7 | 8 | # Check dockerfiles and docker-composes consistency 9 | ./scripts/check-dockercomposes.sh 10 | 11 | # Check if all env variables have defined value 12 | ./scripts/check-deploy-envs.sh 13 | 14 | # Analyse code using lint 15 | golangci-lint run --timeout 5m 16 | echo "Go code analysed successfully." 17 | 18 | # Run project tests 19 | ./scripts/go_test_db.sh | ./scripts/colorize.sh 20 | -------------------------------------------------------------------------------- /scripts/grafana-json-to-yaml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PREFIX="apiVersion: v1\n\ 4 | data:\n\ 5 | grafana.json: |" 6 | POSTFIX="kind: ConfigMap\n\ 7 | metadata:\n\ 8 | name: grafana-dashboard-insights-patchman-engine-general\n\ 9 | labels:\n\ 10 | grafana_dashboard: \"true\"\n\ 11 | annotations:\n\ 12 | grafana-folder: /grafana-dashboard-definitions/Insights" 13 | 14 | json_reformat <$1 | \ 15 | sed "1 i $PREFIX 16 | $ a $POSTFIX 17 | /^$/ ! s/^/ / 18 | " 19 | 20 | -------------------------------------------------------------------------------- /scripts/increment_version.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Based on gist: https://gist.github.com/siddharthkrish/32072e6f97d7743b1a7c47d76d2cb06c#file-version-sh 3 | # Usage: ./scripts/increment_version.sh v1.2.3 /major 4 | # v2.0.0 5 | # ./scripts/increment_version.sh v1.2.3 /minor 6 | # v1.3.0 7 | # ./scripts/increment_version.sh v1.2.3 8 | # v1.2.4 9 | 10 | version="$1" 11 | RELEASE_TYPE=$2 # /major, /minor, /patch (default) 12 | 13 | major=0 14 | minor=0 15 | build=0 16 | 17 | # break down the version number into it's components 18 | regex="([0-9]+).([0-9]+).([0-9]+)" 19 | if [[ $version =~ $regex ]]; then 20 | major="${BASH_REMATCH[1]}" 21 | minor="${BASH_REMATCH[2]}" 22 | build="${BASH_REMATCH[3]}" 23 | fi 24 | 25 | if [[ "${RELEASE_TYPE}" == "/major" ]]; then 26 | ((major++)) 27 | minor=0 28 | build=0 29 | elif [[ "${RELEASE_TYPE}" == "/minor" ]]; then 30 | ((minor++)) 31 | build=0 32 | else 33 | ((build++)) 34 | fi 35 | 36 | # echo the new version number 37 | echo "v${major}.${minor}.${build}" 38 | -------------------------------------------------------------------------------- /scripts/re-calc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # script to manually call vmaas_sync - re-calc 4 | 5 | curl http://localhost:9999/re-calc 6 | -------------------------------------------------------------------------------- /scripts/sync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # script to manually call vmaas_sync - sync 4 | 5 | curl http://localhost:9999/sync 6 | -------------------------------------------------------------------------------- /scripts/try_export_clowder_params.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Load Clowder params from json and export them as a environment variables. 3 | 4 | # Use go app command to print Clowder params 5 | function print_clowder_params() { 6 | ${GORUN:+go run} ./main${GORUN:+.go} print_clowder_params 7 | } 8 | 9 | if [[ -n $ACG_CONFIG ]] ; then 10 | # clowder is enabled 11 | CLOWDER_PARAMS=$(print_clowder_params) 12 | 13 | # Enable to show Clowder vars in logs 14 | if [[ -n $SHOW_CLOWDER_VARS ]]; then 15 | echo $CLOWDER_PARAMS 16 | fi 17 | 18 | echo "Clowder params found, setting..." 19 | export $CLOWDER_PARAMS 20 | else 21 | echo "No Clowder params found" 22 | fi 23 | -------------------------------------------------------------------------------- /sonar-project.properties: -------------------------------------------------------------------------------- 1 | # must be unique in a given SonarQube instance 2 | sonar.projectKey=console.redhat.com:patchman-engine 3 | 4 | # --- optional properties --- 5 | 6 | # defaults to project key 7 | #sonar.projectName=My project 8 | # defaults to 'not provided' 9 | #sonar.projectVersion=1.0 10 | 11 | # Path is relative to the sonar-project.properties file. Defaults to . 12 | #sonar.sources=. 13 | 14 | # Encoding of the source code. Default is default system encoding 15 | #sonar.sourceEncoding=UTF-8 16 | 17 | sonar.exclusions=dev/** 18 | -------------------------------------------------------------------------------- /tasks/caches/caches.go: -------------------------------------------------------------------------------- 1 | package caches 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/utils" 6 | "app/tasks" 7 | ) 8 | 9 | var ( 10 | skipNAccountsRefresh int 11 | ) 12 | 13 | func configure() { 14 | core.ConfigureApp() 15 | } 16 | 17 | func RunAdvisoryRefresh() { 18 | tasks.HandleContextCancel(tasks.WaitAndExit) 19 | configure() 20 | utils.LogInfo("Refreshing advisory cache") 21 | RefreshAdvisoryCaches() 22 | } 23 | 24 | func RunPackageRefresh() { 25 | tasks.HandleContextCancel(tasks.WaitAndExit) 26 | configure() 27 | utils.LogInfo("Refreshing package cache") 28 | errRefresh := RefreshPackagesCaches(nil) 29 | if err := Metrics().Add(); err != nil { 30 | utils.LogInfo("err", err, "Could not push to pushgateway") 31 | } 32 | if errRefresh != nil { 33 | utils.LogError("err", errRefresh.Error(), "Refresh account packages caches") 34 | return 35 | } 36 | utils.LogInfo("Refreshed account packages caches") 37 | } 38 | -------------------------------------------------------------------------------- /tasks/caches/metrics.go: -------------------------------------------------------------------------------- 1 | package caches 2 | 3 | import ( 4 | "app/base/utils" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/prometheus/client_golang/prometheus/push" 8 | ) 9 | 10 | var packageRefreshPartDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ 11 | Help: "How long it took particular package refresh part", 12 | Namespace: "patchman_engine", 13 | Subsystem: "caches", 14 | Name: "package_refresh_part_duration_seconds", 15 | }, []string{"part"}) 16 | 17 | func Metrics() *push.Pusher { 18 | registry := prometheus.NewRegistry() 19 | registry.MustRegister(packageRefreshPartDuration) 20 | 21 | return push.New(utils.CoreCfg.PrometheusPushGateway, "caches").Gatherer(registry) 22 | } 23 | -------------------------------------------------------------------------------- /tasks/caches/refresh_advisory_caches_test.go: -------------------------------------------------------------------------------- 1 | package caches 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/database" 6 | "app/base/models" 7 | "app/base/utils" 8 | "sync" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestRefreshAdvisoryCachesPerAccounts(t *testing.T) { 15 | utils.SkipWithoutDB(t) 16 | core.SetupTestEnvironment() 17 | configure() 18 | 19 | // set wrong numbers of caches 20 | assert.Nil(t, database.DB.Model(&models.AdvisoryAccountData{}). 21 | Where("advisory_id = 1 AND rh_account_id = 2").Update("systems_installable", 5).Error) 22 | assert.Nil(t, database.DB.Model(&models.AdvisoryAccountData{}). 23 | Where("advisory_id = 2 AND rh_account_id = 1").Update("systems_installable", 3).Error) 24 | assert.Nil(t, database.DB.Model(&models.AdvisoryAccountData{}). 25 | Where("advisory_id = 3 AND rh_account_id = 1").Update("systems_installable", 8).Error) 26 | 27 | var wg sync.WaitGroup 28 | refreshAdvisoryCachesPerAccounts(&wg) 29 | wg.Wait() 30 | 31 | assert.Equal(t, 1, database.PluckInt(database.DB.Table("advisory_account_data"). 32 | Where("advisory_id = 1 AND rh_account_id = 2"), "systems_installable")) 33 | assert.Equal(t, 0, database.PluckInt(database.DB.Table("advisory_account_data"). 34 | Where("advisory_id = 2 AND rh_account_id = 1"), "systems_installable")) 35 | assert.Equal(t, 1, database.PluckInt(database.DB.Table("advisory_account_data"). 36 | Where("advisory_id = 3 AND rh_account_id = 1"), "systems_installable")) 37 | } 38 | -------------------------------------------------------------------------------- /tasks/caches/refresh_packages_caches_test.go: -------------------------------------------------------------------------------- 1 | package caches 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/database" 6 | "app/base/models" 7 | "app/base/utils" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | // save counts from getCounts as global for other tests 14 | var ( 15 | _counts = make([]models.PackageAccountData, 0) 16 | _acc = 3 17 | ) 18 | 19 | func TestAccountsWithoutCache(t *testing.T) { 20 | utils.SkipWithoutDB(t) 21 | core.SetupTestEnvironment() 22 | 23 | accs, err := accountsWithoutCache() 24 | assert.Nil(t, err) 25 | // there are only 4 account in test_data but other tests are creating new accounts 26 | assert.Equal(t, 10, len(accs)) 27 | } 28 | 29 | func TestGetCounts(t *testing.T) { 30 | utils.SkipWithoutDB(t) 31 | core.SetupTestEnvironment() 32 | 33 | err := getCounts(&_counts, &_acc) 34 | assert.Nil(t, err) 35 | assert.Equal(t, 4, len(_counts)) 36 | } 37 | 38 | func TestUpdatePackageAccountData(t *testing.T) { 39 | utils.SkipWithoutDB(t) 40 | core.SetupTestEnvironment() 41 | 42 | err := updatePackageAccountData(_counts) 43 | assert.Nil(t, err) 44 | 45 | // delete old cache data, just check it does not return error 46 | err = deleteOldCache(_counts, &_acc) 47 | assert.Nil(t, err) 48 | } 49 | 50 | func TestUpdatePkgCacheValidity(t *testing.T) { 51 | utils.SkipWithoutDB(t) 52 | core.SetupTestEnvironment() 53 | 54 | err := updatePkgCacheValidity(&_acc) 55 | assert.Nil(t, err) 56 | 57 | // set back to false 58 | database.DB.Table("rh_account"). 59 | Where("id = ?", _acc). 60 | Update("valid_package_cache", false) 61 | } 62 | -------------------------------------------------------------------------------- /tasks/cleaning/clean_advisory_account_data.go: -------------------------------------------------------------------------------- 1 | package cleaning 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/models" 6 | "app/base/utils" 7 | "app/tasks" 8 | ) 9 | 10 | func RunCleanAdvisoryAccountData() { 11 | tasks.HandleContextCancel(tasks.WaitAndExit) 12 | core.ConfigureApp() 13 | defer utils.LogPanics(true) 14 | utils.LogInfo("Deleting advisory rows with 0 applicable systems from advisory_account_data") 15 | 16 | if err := CleanAdvisoryAccountData(); err != nil { 17 | utils.LogError("err", err.Error(), "Cleaning advisory account data") 18 | return 19 | } 20 | utils.LogInfo("CleanAdvisoryAccountData task performed successfully") 21 | } 22 | 23 | func CleanAdvisoryAccountData() error { 24 | tx := tasks.CancelableDB().Begin() 25 | defer tx.Rollback() 26 | 27 | err := tx.Delete(&models.AdvisoryAccountData{}, "systems_installable <= 0 AND systems_applicable <= 0").Error 28 | if err != nil { 29 | return err 30 | } 31 | 32 | tx.Commit() 33 | return nil 34 | } 35 | -------------------------------------------------------------------------------- /tasks/cleaning/clean_unused_data.go: -------------------------------------------------------------------------------- 1 | package cleaning 2 | 3 | import ( 4 | "app/base/models" 5 | "app/base/utils" 6 | "app/tasks" 7 | ) 8 | 9 | func RunDeleteUnusedData() { 10 | defer utils.LogPanics(true) 11 | utils.LogInfo("Deleting unused data") 12 | 13 | deleteUnusedPackages() 14 | deleteUnusedAdvisories() 15 | } 16 | 17 | func deleteUnusedPackages() { 18 | tx := tasks.CancelableDB().Begin() 19 | defer tx.Rollback() 20 | 21 | // remove unused packages not synced from vmaas 22 | // before changing the query below test its performance on big data otherwise it can lock database 23 | subq := tx.Select("id").Table("package p"). 24 | Where("synced = ?", false). 25 | Where("NOT EXISTS" + 26 | " (SELECT 1 FROM system_package2 sp WHERE" + 27 | " p.id = sp.package_id OR p.id = sp.installable_id OR p.id = sp.applicable_id)", 28 | ).Limit(tasks.DeleteUnusedDataLimit) 29 | 30 | err := tx.Delete(&models.Package{}, "id IN (?)", subq).Error 31 | 32 | if err != nil { 33 | utils.LogError("err", err.Error(), "DeleteUnusedPackages") 34 | return 35 | } 36 | 37 | tx.Commit() 38 | utils.LogInfo("DeleteUnusedPackages tasks performed successfully") 39 | } 40 | 41 | func deleteUnusedAdvisories() { 42 | tx := tasks.CancelableDB().Begin() 43 | defer tx.Rollback() 44 | 45 | // remove unused advisories not synced from vmaas 46 | // before changing the query below test its performance on big data otherwise it can lock database 47 | // Time: 18988.223 ms (00:18.988) for 50k advisories, 75M system_advisories, 1.6M package and 50k rh_account 48 | subq := tx.Select("id").Table("advisory_metadata am"). 49 | Where("am.synced = ?", false). 50 | Where("NOT EXISTS (SELECT 1 FROM system_advisories sa WHERE am.id = sa.advisory_id)"). 51 | Where("NOT EXISTS (SELECT 1 FROM package p WHERE am.id = p.advisory_id)"). 52 | Where("NOT EXISTS (SELECT 1 FROM advisory_account_data aad WHERE am.id = aad.advisory_id)"). 53 | Limit(tasks.DeleteUnusedDataLimit) 54 | 55 | err := tx.Delete(&models.AdvisoryMetadata{}, "id IN (?)", subq).Error 56 | 57 | if err != nil { 58 | utils.LogError("err", err.Error(), "DeleteUnusedAdvisories") 59 | return 60 | } 61 | 62 | tx.Commit() 63 | utils.LogInfo("DeleteUnusedAdvisories tasks performed successfully") 64 | } 65 | -------------------------------------------------------------------------------- /tasks/common.go: -------------------------------------------------------------------------------- 1 | package tasks 2 | 3 | import ( 4 | "app/base" 5 | "app/base/database" 6 | "app/base/utils" 7 | "os" 8 | "time" 9 | 10 | "github.com/pkg/errors" 11 | "gorm.io/gorm" 12 | ) 13 | 14 | func HandleContextCancel(fn func()) { 15 | go func() { 16 | <-base.Context.Done() 17 | utils.LogInfo("stopping vmaas_sync") 18 | fn() 19 | }() 20 | } 21 | 22 | func WaitAndExit() { 23 | time.Sleep(time.Second) // give some time to close eventual db connections 24 | os.Exit(0) 25 | } 26 | 27 | // return database handler with base context 28 | // which will be properly cancled in case of service shutdown 29 | func CancelableDB() *gorm.DB { 30 | return database.DB.WithContext(base.Context) 31 | } 32 | 33 | // return read replica (if available) database handler with base context 34 | // which will be properly canceled in case of service shutdown 35 | func CancelableReadReplicaDB() *gorm.DB { 36 | if utils.CoreCfg.DBReadReplicaEnabled && database.ReadReplicaConfigured() { 37 | return database.DBReadReplica.WithContext(base.Context) 38 | } 39 | return database.DB.WithContext(base.Context) 40 | } 41 | 42 | func withTx(do func(db *gorm.DB) error, cancelableDB func() *gorm.DB) error { 43 | tx := cancelableDB().Begin() 44 | defer tx.Rollback() 45 | if err := do(tx); err != nil { 46 | return err 47 | } 48 | return errors.Wrap(tx.Commit().Error, "Commit") 49 | } 50 | 51 | // Need to run code within a function, because defer can't be used in loops 52 | func WithTx(do func(db *gorm.DB) error) error { 53 | return withTx(do, CancelableDB) 54 | } 55 | 56 | // Need to run code within a function, because defer can't be used in loops 57 | func WithReadReplicaTx(do func(db *gorm.DB) error) error { 58 | return withTx(do, CancelableReadReplicaDB) 59 | } 60 | -------------------------------------------------------------------------------- /tasks/config.go: -------------------------------------------------------------------------------- 1 | package tasks 2 | 3 | import ( 4 | "app/base/utils" 5 | 6 | log "github.com/sirupsen/logrus" 7 | ) 8 | 9 | var ( 10 | // Skip first N accounts in advisory refresh job, e.g. after failure 11 | SkipNAccountsRefresh = utils.PodConfig.GetInt("skip_n_accounts_refresh", 0) 12 | // Remove only LIMIT rows in a run, useful to avoid complete wipe in case of error 13 | DeleteUnusedDataLimit = utils.PodConfig.GetInt("delete_unused_data_limit", 1000) 14 | // Remove only LIMIT systems in a run, useful to avoid complete wipe in case of error 15 | DeleteCulledSystemsLimit = utils.PodConfig.GetInt("delete_culled_systems_limit", 1000) 16 | // Toggle cyndi metrics reporting 17 | EnableCyndiMetrics = utils.PodConfig.GetBool("enable_cyndi_metrics", true) 18 | UseTraceLevel = log.IsLevelEnabled(log.TraceLevel) 19 | // Toggle system reevaluation base on changed repos 20 | EnabledRepoBasedReeval = utils.PodConfig.GetBool("repo_based_re_evaluation", true) 21 | // Send recalc messages for systems with modified repos 22 | EnableRecalcMessagesSend = utils.PodConfig.GetBool("recalc_messages_send", true) 23 | // Toggle advisory sync in vmaas_sync 24 | EnableAdvisoriesSync = utils.PodConfig.GetBool("advisories_sync", true) 25 | // Toggle package sync in vmaas_sync 26 | EnablePackagesSync = utils.PodConfig.GetBool("packages_sync", true) 27 | // Toggle repo sync in vmaas_sync 28 | EnableReposSync = utils.PodConfig.GetBool("repos_sync", true) 29 | // Sync data in vnass_sync based on timestamp 30 | EnableModifiedSinceSync = utils.PodConfig.GetBool("modified_since_sync", true) 31 | // Page size for /errata vmass API call 32 | AdvisoryPageSize = utils.PodConfig.GetInt("errata_page_size", 500) 33 | // Page size for /packages vmass API call 34 | PackagesPageSize = utils.PodConfig.GetInt("packages_page_size", 5) 35 | // Number of retries for vmaas API calls, 0 - retry forever 36 | VmaasCallMaxRetries = utils.PodConfig.GetInt("vmaas_call_max_retries", 8) 37 | // Use eponential retry timeouts, false - retry periodically 38 | VmaasCallExpRetry = utils.PodConfig.GetBool("vmaas_call_exp_retry", true) 39 | // How ofter run full vmaas sync, 7 days by default 40 | FullSyncCadence = utils.PodConfig.GetInt("full_sync_cadence", 24*7) 41 | MaxChangedPackages = utils.PodConfig.GetInt("max_changed_packages", 30000) 42 | ) 43 | -------------------------------------------------------------------------------- /tasks/system_culling/culling.go: -------------------------------------------------------------------------------- 1 | package system_culling //nolint:revive,stylecheck 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/utils" 6 | "app/tasks" 7 | ) 8 | 9 | func configure() { 10 | core.ConfigureApp() 11 | } 12 | 13 | func RunSystemCulling() { 14 | tasks.HandleContextCancel(tasks.WaitAndExit) 15 | configure() 16 | 17 | runSystemCulling() 18 | if err := Metrics().Add(); err != nil { 19 | utils.LogInfo("err", err, "Could not push to pushgateway") 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /tasks/system_culling/metrics.go: -------------------------------------------------------------------------------- 1 | package system_culling //nolint:revive,stylecheck 2 | 3 | import ( 4 | "app/base/utils" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/prometheus/client_golang/prometheus/push" 8 | ) 9 | 10 | var ( 11 | deletedCulledSystemsCnt = prometheus.NewCounter(prometheus.CounterOpts{ 12 | Help: "How many culled systems were deleted", 13 | Namespace: "patchman_engine", 14 | Subsystem: "vmaas_sync", 15 | Name: "deleted_culled_systems", 16 | }) 17 | staleSystemsMarkedCnt = prometheus.NewCounter(prometheus.CounterOpts{ 18 | Help: "How many systems were marked as stale", 19 | Namespace: "patchman_engine", 20 | Subsystem: "vmaas_sync", 21 | Name: "stale_systems_marked", 22 | }) 23 | ) 24 | 25 | func Metrics() *push.Pusher { 26 | registry := prometheus.NewRegistry() 27 | registry.MustRegister(deletedCulledSystemsCnt, staleSystemsMarkedCnt) 28 | pusher := push.New(utils.CoreCfg.PrometheusPushGateway, "system_culling").Gatherer(registry) 29 | return pusher 30 | } 31 | -------------------------------------------------------------------------------- /tasks/system_culling/system_culling.go: -------------------------------------------------------------------------------- 1 | package system_culling //nolint:revive,stylecheck 2 | 3 | import ( 4 | "app/base/utils" 5 | "app/tasks" 6 | 7 | "github.com/pkg/errors" 8 | "gorm.io/gorm" 9 | ) 10 | 11 | func runSystemCulling() { 12 | defer utils.LogPanics(true) 13 | 14 | err := tasks.WithTx(func(tx *gorm.DB) error { 15 | nDeleted, err := deleteCulledSystems(tx, tasks.DeleteCulledSystemsLimit) 16 | if err != nil { 17 | return errors.Wrap(err, "Delete culled") 18 | } 19 | utils.LogInfo("nDeleted", nDeleted, "Culled systems deleted") 20 | deletedCulledSystemsCnt.Add(float64(nDeleted)) 21 | 22 | // marking systems as "stale" 23 | nMarked, err := markSystemsStale(tx, tasks.DeleteCulledSystemsLimit) 24 | if err != nil { 25 | return errors.Wrap(err, "Mark stale") 26 | } 27 | utils.LogInfo("nMarked", nMarked, "Stale systems marked") 28 | staleSystemsMarkedCnt.Add(float64(nMarked)) 29 | 30 | return nil 31 | }) 32 | 33 | if err != nil { 34 | utils.LogError("err", err.Error(), "System culling") 35 | } else { 36 | utils.LogInfo("System culling tasks performed successfully") 37 | } 38 | } 39 | 40 | // https://github.com/go-gorm/gorm/issues/3722 41 | func deleteCulledSystems(tx *gorm.DB, limitDeleted int) (nDeleted int, err error) { 42 | var nDeletedArr []int 43 | err = tx.Raw("select delete_culled_systems(?)", limitDeleted). 44 | Find(&nDeletedArr).Error 45 | if len(nDeletedArr) > 0 { 46 | nDeleted = nDeletedArr[0] 47 | } 48 | 49 | return nDeleted, err 50 | } 51 | 52 | func markSystemsStale(tx *gorm.DB, markedLimit int) (nMarked int, err error) { 53 | var nMarkedArr []int 54 | err = tx.Raw("select mark_stale_systems(?)", markedLimit). 55 | Find(&nMarkedArr).Error 56 | if len(nMarkedArr) > 0 { 57 | nMarked = nMarkedArr[0] 58 | } 59 | 60 | return nMarked, err 61 | } 62 | -------------------------------------------------------------------------------- /tasks/vmaas_sync/dbchange.go: -------------------------------------------------------------------------------- 1 | package vmaas_sync //nolint:revive,stylecheck 2 | 3 | import ( 4 | "app/base" 5 | "app/base/types" 6 | "app/base/utils" 7 | "app/base/vmaas" 8 | "app/tasks" 9 | "net/http" 10 | 11 | "github.com/pkg/errors" 12 | ) 13 | 14 | func isSyncNeeded(dbExportedTS *types.Rfc3339TimestampWithZ, vmaasExportedTS *types.Rfc3339Timestamp) bool { 15 | if dbExportedTS == nil || vmaasExportedTS == nil { 16 | return true 17 | } 18 | utils.LogInfo("last sync", dbExportedTS.Time(), "dbchange.exported", vmaasExportedTS.Time()) 19 | return dbExportedTS.Time().Before(*vmaasExportedTS.Time()) 20 | } 21 | 22 | func vmaasDBChangeRequest() (*vmaas.DBChangeResponse, error) { 23 | if vmaasClient == nil { 24 | panic("VMaaS client is nil") 25 | } 26 | 27 | vmaasCallFunc := func() (interface{}, *http.Response, error) { 28 | response := vmaas.DBChangeResponse{} 29 | resp, err := vmaasClient.Request(&base.Context, http.MethodGet, vmaasDBChangeURL, nil, &response) 30 | return &response, resp, err 31 | } 32 | 33 | vmaasDataPtr, err := utils.HTTPCallRetry(vmaasCallFunc, tasks.VmaasCallExpRetry, tasks.VmaasCallMaxRetries) 34 | if err != nil { 35 | vmaasCallCnt.WithLabelValues("error-dbchange").Inc() 36 | return nil, errors.Wrap(err, "Checking DBChange") 37 | } 38 | vmaasCallCnt.WithLabelValues("success").Inc() 39 | return vmaasDataPtr.(*vmaas.DBChangeResponse), nil 40 | } 41 | 42 | func VmaasDBExported() *types.Rfc3339Timestamp { 43 | dbchange, err := vmaasDBChangeRequest() 44 | if err != nil { 45 | utils.LogError("err", err, "Could'n query vmaas dbchange") 46 | return nil 47 | } 48 | return dbchange.GetExported() 49 | } 50 | -------------------------------------------------------------------------------- /tasks/vmaas_sync/metrics_cyndi_test.go: -------------------------------------------------------------------------------- 1 | package vmaas_sync //nolint:revive,stylecheck 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/utils" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestCyndiMetrics(t *testing.T) { 12 | utils.SkipWithoutDB(t) 13 | core.SetupTestEnvironment() 14 | 15 | tagCounts, systemCounts, err := getCyndiData() 16 | assert.Nil(t, err) 17 | assert.Equal(t, int64(0), systemCounts[lastUploadLast1D]) 18 | assert.Equal(t, int64(0), systemCounts[lastUploadLast7D]) 19 | assert.Equal(t, int64(0), systemCounts[lastUploadLast30D]) 20 | assert.Equal(t, int64(18), systemCounts[lastUploadAll]) 21 | assert.Equal(t, int64(18), tagCounts[allSystemCount]) 22 | assert.Equal(t, int64(15), tagCounts[systemsSapSystemCount]) 23 | assert.Equal(t, int64(16), tagCounts[systemsWithTagsCount]) 24 | } 25 | -------------------------------------------------------------------------------- /tasks/vmaas_sync/metrics_db_test.go: -------------------------------------------------------------------------------- 1 | package vmaas_sync //nolint:revive,stylecheck 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/utils" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestTableSizes(t *testing.T) { 12 | utils.SkipWithoutDB(t) 13 | core.SetupTestEnvironment() 14 | 15 | tableSizes := getTableSizes() 16 | uniqueTables := make(map[string]bool, len(tableSizes)) 17 | for _, item := range tableSizes { 18 | uniqueTables[item.Key] = true 19 | } 20 | assert.Equal(t, 230, len(tableSizes)) 21 | assert.Equal(t, 230, len(uniqueTables)) 22 | assert.True(t, uniqueTables["system_platform"]) // check whether table names were loaded 23 | assert.True(t, uniqueTables["package"]) 24 | assert.True(t, uniqueTables["repo"]) 25 | } 26 | 27 | func TestDatabaseSize(t *testing.T) { 28 | utils.SkipWithoutDB(t) 29 | core.SetupTestEnvironment() 30 | 31 | databaseSize := getDatabaseSize() 32 | 33 | assert.Equal(t, 1, len(databaseSize)) 34 | assert.Equal(t, "database", databaseSize[0].Key) 35 | assert.Greater(t, databaseSize[0].Value, 0.0) 36 | } 37 | 38 | func TestDatabaseProcCounts(t *testing.T) { 39 | utils.SkipWithoutDB(t) 40 | core.SetupTestEnvironment() 41 | 42 | processesInfo := getDatabaseProcesses() 43 | 44 | assert.Less(t, 0, len(processesInfo)) 45 | ndash := 0 46 | for _, info := range processesInfo { 47 | if info.Key == "-" { 48 | ndash++ 49 | } 50 | } 51 | assert.Less(t, 0, ndash) 52 | } 53 | -------------------------------------------------------------------------------- /tasks/vmaas_sync/package_sync_test.go: -------------------------------------------------------------------------------- 1 | package vmaas_sync //nolint:revive,stylecheck 2 | 3 | import ( 4 | "app/base/core" 5 | "app/base/database" 6 | "app/base/models" 7 | "app/base/utils" 8 | "testing" 9 | "time" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestPkgListSyncPackages(t *testing.T) { 15 | utils.SkipWithoutDB(t) 16 | core.SetupTestEnvironment() 17 | Configure() 18 | 19 | var oldNameCount, oldPkgCount, newNameCount, newPkgCount int 20 | var pkgNextval, nameNextval, pkgCurrval, nameCurrval int 21 | 22 | database.DB.Model(&models.PackageName{}).Select("count(*)").Find(&oldNameCount) 23 | database.DB.Model(&models.Package{}).Select("count(*)").Find(&oldPkgCount) 24 | database.DB.Raw("select nextval('package_id_seq')").Find(&pkgNextval) 25 | database.DB.Raw("select nextval('package_name_id_seq')").Find(&nameNextval) 26 | 27 | err := syncPackages(time.Now(), nil) 28 | assert.NoError(t, err) 29 | 30 | // make sure we are not creating gaps in id sequences 31 | database.DB.Model(&models.PackageName{}).Select("count(*)").Find(&newNameCount) 32 | database.DB.Model(&models.Package{}).Select("count(*)").Find(&newPkgCount) 33 | database.DB.Raw("select currval('package_id_seq')").Find(&pkgCurrval) 34 | database.DB.Raw("select currval('package_name_id_seq')").Find(&nameCurrval) 35 | 36 | nameCountInc := newNameCount - oldNameCount 37 | nameMaxInc := nameCurrval - nameNextval 38 | pkgCountInc := newPkgCount - oldPkgCount 39 | pkgMaxInc := pkgCurrval - pkgNextval 40 | assert.Equal(t, nameCountInc, nameMaxInc) 41 | assert.Equal(t, pkgCountInc, pkgMaxInc) 42 | 43 | database.CheckPackagesNamesInDB(t, "", "bash", "curl") 44 | database.CheckPackagesNamesInDB(t, "summary like '% newest summary'", "bash", "curl") 45 | database.CheckEVRAsInDBSynced(t, 4, true, 46 | "77.0.1-1.fc31.src", "77.0.1-1.fc31.x86_64", // added firefox versions 47 | "5.7.13-200.fc31.src", "5.7.13-200.fc31.x86_64") // added kernel versions 48 | database.DeleteNewlyAddedPackages(t) 49 | } 50 | -------------------------------------------------------------------------------- /tasks/vmaas_sync/repo_sync.go: -------------------------------------------------------------------------------- 1 | package vmaas_sync //nolint:revive,stylecheck 2 | 3 | import ( 4 | "app/tasks" 5 | "time" 6 | 7 | "github.com/pkg/errors" 8 | ) 9 | 10 | func syncRepos(syncStart time.Time) error { 11 | // mark non-thirdparty repos known to vmaas 12 | thirdParty := false 13 | repoPackages, repoNoPackages, _, err := getUpdatedRepos(syncStart, nil, &thirdParty) 14 | if err != nil { 15 | return err 16 | } 17 | 18 | redhatRepos := repoNoPackages 19 | for _, repoPkg := range repoPackages { 20 | redhatRepos = append(redhatRepos, repoPkg[0]) 21 | } 22 | 23 | if len(redhatRepos) == 0 { 24 | return nil 25 | } 26 | 27 | err = tasks.CancelableDB().Exec("UPDATE repo SET third_party = false WHERE name in (?)", redhatRepos).Error 28 | if err != nil { 29 | return errors.WithMessage(err, "Updating repo third_party flag for redhat content") 30 | } 31 | 32 | err = tasks.CancelableDB().Exec("UPDATE repo SET third_party = true WHERE name NOT IN (?)", redhatRepos).Error 33 | if err != nil { 34 | return errors.WithMessage(err, "Updating repo third_party flag for third party content") 35 | } 36 | return nil 37 | } 38 | -------------------------------------------------------------------------------- /tasks/vmaas_sync/send_messages.go: -------------------------------------------------------------------------------- 1 | package vmaas_sync //nolint:revive,stylecheck 2 | 3 | import ( 4 | "app/base" 5 | "app/base/mqueue" 6 | "app/base/utils" 7 | "app/tasks" 8 | "time" 9 | ) 10 | 11 | func SendReevaluationMessages() error { 12 | if !tasks.EnableRecalcMessagesSend { 13 | utils.LogInfo("Recalc messages sending disabled, skipping...") 14 | return nil 15 | } 16 | 17 | var inventoryAIDs mqueue.EvalDataSlice 18 | var err error 19 | 20 | if tasks.EnabledRepoBasedReeval { 21 | inventoryAIDs, err = getCurrentRepoBasedInventoryIDs() 22 | } else { 23 | inventoryAIDs, err = getAllInventoryIDs() 24 | } 25 | if err != nil { 26 | return err 27 | } 28 | 29 | tStart := time.Now() 30 | defer utils.ObserveSecondsSince(tStart, messageSendDuration) 31 | err = mqueue.SendMessages(base.Context, evalWriter, &inventoryAIDs) 32 | if err != nil { 33 | utils.LogError("err", err.Error(), "sending to re-evaluate failed") 34 | } 35 | utils.LogInfo("count", len(inventoryAIDs), "systems sent to re-calc") 36 | return nil 37 | } 38 | 39 | func getAllInventoryIDs() ([]mqueue.EvalData, error) { 40 | var inventoryAIDs []mqueue.EvalData 41 | err := tasks.CancelableDB().Table("system_platform sp"). 42 | Select("sp.inventory_id, sp.rh_account_id, ra.org_id"). 43 | Joins("JOIN rh_account ra on ra.id = sp.rh_account_id"). 44 | Order("ra.id"). 45 | Scan(&inventoryAIDs).Error 46 | if err != nil { 47 | return nil, err 48 | } 49 | return inventoryAIDs, nil 50 | } 51 | -------------------------------------------------------------------------------- /turnpike/admin_api.go: -------------------------------------------------------------------------------- 1 | package turnpike 2 | 3 | import ( 4 | "app/base" 5 | "app/base/core" 6 | "app/base/utils" 7 | "app/manager/middlewares" 8 | "app/manager/routes" 9 | 10 | "github.com/gin-gonic/gin" 11 | ) 12 | 13 | // @title Patch Admin API 14 | // @version {{.Version}} 15 | // @description Admin API of the Patch application on [internal.console.redhat.com](https://internal.console.redhat.com) 16 | 17 | // @license.name GPLv3 18 | // @license.url https://www.gnu.org/licenses/gpl-3.0.en.html 19 | 20 | // @query.collection.format multi 21 | // @securityDefinitions.apikey RhIdentity 22 | // @in header 23 | // @name x-rh-identity 24 | 25 | // @BasePath /api/patch/admin 26 | func RunAdminAPI() { 27 | core.ConfigureApp() 28 | 29 | // Toggle Turnpike authentication for internal API (manual sync, re-calc) 30 | enableTurnpikeAuth := utils.PodConfig.GetBool("turnpike_auth", true) 31 | 32 | utils.LogInfo("port", utils.CoreCfg.PublicPort, "Manager-admin starting") 33 | app := gin.New() 34 | app.Use(middlewares.RequestResponseLogger()) 35 | middlewares.SetAdminSwagger(app) 36 | 37 | core.InitProbes(app) 38 | routes.InitAdmin(app, enableTurnpikeAuth) 39 | 40 | err := utils.RunServer(base.Context, app, utils.CoreCfg.PublicPort) 41 | if err != nil { 42 | utils.LogError("err", err.Error()) 43 | panic(err) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /turnpike/auth/turnpike.go: -------------------------------------------------------------------------------- 1 | package auth 2 | 3 | import ( 4 | "app/base/utils" 5 | "net/http" 6 | 7 | "github.com/gin-gonic/gin" 8 | ) 9 | 10 | func TurnpikeAuthenticator() gin.HandlerFunc { 11 | return func(c *gin.Context) { 12 | identStr := c.GetHeader("x-rh-identity") 13 | if identStr == "" { 14 | c.AbortWithStatusJSON(http.StatusUnauthorized, utils.ErrorResponse{Error: "Missing x-rh-identity header"}) 15 | return 16 | } 17 | utils.LogTrace("ident", identStr, "Identity retrieved") 18 | 19 | xrhid, err := utils.ParseXRHID(identStr) 20 | if err != nil { 21 | c.AbortWithStatusJSON(http.StatusUnauthorized, utils.ErrorResponse{Error: "Invalid x-rh-identity header"}) 22 | return 23 | } 24 | 25 | if xrhid.Identity.Type != "associate" { 26 | c.AbortWithStatusJSON(http.StatusUnauthorized, utils.ErrorResponse{Error: "Invalid x-rh-identity header"}) 27 | return 28 | } 29 | } 30 | } 31 | --------------------------------------------------------------------------------