├── .coveragerc ├── .flake8 ├── .github └── workflows │ ├── container-publish.yaml │ ├── db-docs.yml │ ├── pre-commit.yml │ ├── semantic-release.yml │ └── tests.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .tekton ├── vulnerability-engine-pull-request.yaml └── vulnerability-engine-push.yaml ├── CHANGELOG.md ├── Dockerfile ├── Dockerfile.dbdocs ├── Dockerfile.test ├── Jenkinsfile ├── LICENSE ├── README.md ├── build_deploy.sh ├── ci └── functions.sh ├── cluster ├── __init__.py └── cluster.py ├── common ├── __init__.py ├── bounded_executor.py ├── config.py ├── constants.py ├── database_handler.py ├── feature_flags.py ├── identity.py ├── logging.py ├── mqueue.py ├── peewee_conditions.py ├── peewee_database.py ├── peewee_model.py ├── status_app.py ├── strtobool.py ├── utils.py └── vmaas_client.py ├── conf ├── common.env ├── database.env ├── evaluator.env ├── evaluator_recalc.env ├── evaluator_upload.env ├── exploit-sync.env ├── grouper.env ├── listener.env ├── manager.env ├── manager_base.env ├── notificator.env ├── reevaluation.env ├── settings.local.yaml ├── taskomatic.env └── vmaas-sync.env ├── database ├── README.md ├── RPM-GPG-KEY-CENTOS ├── __init__.py ├── schema │ ├── local_init_db.sh │ ├── upgrade_scripts │ │ ├── 001-db-upgrade-support.sql │ │ ├── 002-satellite-managed-caching.sql │ │ ├── 003-add-vmaas-sync-user.sql │ │ ├── 004-cache_refresh.sql │ │ ├── 005-grant-listener-delete.sql │ │ ├── 006-fix-refresh-caches.sql │ │ ├── 007-avoid-deadlocks.sql │ │ ├── 008-improve-cache-refresh-procedures.sql │ │ ├── 009-deleted_systems.sql │ │ ├── 010-add-repo-tables.sql │ │ ├── 011-delete_system_manager.sql │ │ ├── 012-add-timestamp-table.sql │ │ ├── 013-satellite-managed-removal.sql │ │ ├── 014-delete-fix.sql │ │ ├── 015-serial_ids.sql │ │ ├── 016-add-last-upload.sql │ │ ├── 017-rh-acc-table.sql │ │ ├── 018-rh-acc-fg-keys.sql │ │ ├── 019-rh-acc-funs.sql │ │ ├── 020-business-risk.sql │ │ ├── 021-store-more-cve-metadata.sql │ │ ├── 022-cve_status.sql │ │ ├── 023-cve_system_status_text.sql │ │ ├── 024-vmaas-sync-permissions.sql │ │ ├── 025-migrate-affected-systems-to-account-data.sql │ │ ├── 026-cve_account_data_vmaas_sync.sql │ │ ├── 027-cve_status_divergent.sql │ │ ├── 028-bug-in-refresh-funcs.sql │ │ ├── 029-manager-inserting-accounts.sql │ │ ├── 030-fqdn.sql │ │ ├── 031-stale-dates.sql │ │ ├── 032-add-metrics-user.sql │ │ ├── 033-db-user-taskomatic.sql │ │ ├── 034-stale-flag.sql │ │ ├── 035-stale-timestamp-index.sql │ │ ├── 036-taskomatic-write-access.sql │ │ ├── 037-taskomatic-cve-cache.sql │ │ ├── 038-taskomatic-cve-cache-delete.sql │ │ ├── 039-advisor-listener.sql │ │ ├── 040-rules-table.sql │ │ ├── 041-advisor-listener-write.sql │ │ ├── 042-system-vulnerabilities.sql │ │ ├── 043-insights-rule-update.sql │ │ ├── 044-rule_id-constraint.sql │ │ ├── 045-cve_account_data-locking.sql │ │ ├── 046-opt_out_system_cache.sql │ │ ├── 047-refresh_counts.sql │ │ ├── 048-advisor_evaluated-column.sql │ │ ├── 049-advisor_evaluated-usage.sql │ │ ├── 050-cve_rule_mapping.sql │ │ ├── 051-inisghts_rule-additional-data.sql │ │ ├── 052-insights_rule-active.sql │ │ ├── 053-cve_rule_mapping-unique.sql │ │ ├── 054-active-rules-procedures.sql │ │ ├── 055-rule_hit_details.sql │ │ ├── 056-dont-refresh-stale.sql │ │ ├── 057-rules_more_info.sql │ │ ├── 058-system_platform-indexes.sql │ │ ├── 059-deleted_table_removal.sql │ │ ├── 060-taskomatic-system_platform-delete.sql │ │ ├── 061-trigger-once.sql │ │ ├── 062-mitigation_reason.sql │ │ ├── 063-cyndi_integration.sql │ │ ├── 064-set_cyndi_pwd.sql │ │ ├── 065-mitigated_reason_procedures.sql │ │ ├── 066-rules_git_sync.sql │ │ ├── 067-partitioning.sql │ │ ├── 068-use_system_vulnerabilities_active.sql │ │ ├── 069-add_advisories_list.sql │ │ ├── 070-playbooks.sql │ │ ├── 071-manager_cyndi_grant.sql │ │ ├── 072-potential_deadlocks.sql │ │ ├── 073-taskomatic_cyndi_grant.sql │ │ ├── 074-manager-admin-delete-sys.sql │ │ ├── 075-rules_impact_table.sql │ │ ├── 076-partition-active-sys-vulns.sql │ │ ├── 077-use-account-id.sql │ │ ├── 078-system-platform-uuid.sql │ │ ├── 079-drop-account-cache.sql │ │ ├── 080-rule-publish-date.sql │ │ ├── 081-system_vulnerabilities_cve_index.sql │ │ ├── 082-add-exploits-field.sql │ │ ├── 083-drop-generic.sql │ │ ├── 084-rule-only-field.sql │ │ ├── 085-content_version.sql │ │ ├── 086-cve_name.sql │ │ ├── 087-bigserial-migration.sql │ │ ├── 088-cyndi-fedramp-fix.sql │ │ ├── 089-metrics-migration.sql │ │ ├── 090-vmaas-sync-rule-mapping.sql │ │ ├── 091-add-rule-checksum.sql │ │ ├── 092-add-system-cache.sql │ │ ├── 093-cve-account-cache.sql │ │ ├── 094-metrics-disable.sql │ │ ├── 095-add_host_type.sql │ │ ├── 096-grant-cyndi-reader-listeners.sql │ │ ├── 097-last-status-change.sql │ │ ├── 098-advisory-remediation-flags.sql │ │ ├── 099-vmaas-sync-delete-cache.sql │ │ ├── 100-account-cve-cache-keepalive.sql │ │ ├── 101-add-business-risk-critical.sql │ │ ├── 102-add-announcement.sql │ │ ├── 103-rules-account-cache.sql │ │ ├── 104-notificator.sql │ │ ├── 105-add-org-id.sql │ │ ├── 106-delete-notif-taskomatic.sql │ │ ├── 107-notif-accounts-index.sql │ │ ├── 108-system_vulnerabilities_rule_id_index.sql │ │ ├── 109-notificator-manager.sql │ │ ├── 110-account_number-null.sql │ │ ├── 111-exploit_data.sql │ │ ├── 112-add-insights_needs_reboot.sql │ │ ├── 113-rule_autoplaybook.sql │ │ ├── 114-add_advisories_column.sql │ │ ├── 115-truncate_inactive_partition.sql │ │ ├── 116-system_platform_rules.sql │ │ ├── 117-system_platform_indexes.sql │ │ ├── 118-system_vulnerabilities_indexes.sql │ │ ├── 119-system_vulnerabilities_state.sql │ │ ├── 120-evaluator_permissions.sql │ │ ├── 121-resolution_type.sql │ │ ├── 122-vulnerable_package_tables.sql │ │ ├── 123-unfixed_feature_flag.sql │ │ ├── 124-advisory_available_cache.sql │ │ ├── 125-delete_system_vuln_package.sql │ │ ├── 126-unpatched_cache.sql │ │ ├── 127-inventory_groups_cache.sql │ │ ├── 128-system_cve_data.sql │ │ ├── 129-migrate_system_cve_data.sql │ │ ├── 130-edge_cache.sql │ │ ├── 131-drop-systems_affected.sql │ │ ├── 132-add-vulnerable_package_cve-index.sql │ │ ├── 133-vulnerable_package-module.sql │ │ ├── 134-cve_account_data_idx.sql │ │ ├── 135-recalc-events.sql │ │ ├── 136-primary-keys.sql │ │ ├── 137-operating-system.sql │ │ ├── 138-os-lifecycle_phase.sql │ │ ├── 139-taskomatic-read-all-schemas.sql │ │ ├── 140-os-fk-system_platform.sql │ │ ├── 141-group_set.sql │ │ ├── 142-cve_account_granular_cache.sql │ │ └── 143-numeric-collation.sql │ ├── ve_db_dev_cyndi.sql │ ├── ve_db_dev_data.sql │ ├── ve_db_postgresql.sql │ └── ve_db_user_create_postgresql.sql └── upgrade │ ├── __init__.py │ ├── dbupgrade.sh │ └── upgrade.py ├── deploy └── clowdapp.yaml ├── develfeatureflags.json ├── doc ├── metrics.md ├── schema.md └── vulnerability_engine_diagram.svg ├── docker-compose-dbdocs.yml ├── docker-compose.devel.yml ├── docker-compose.test.yml ├── docker-compose.yml ├── entrypoint.sh ├── evaluator ├── README.md ├── __init__.py ├── common.py ├── evaluator.py ├── logic.py └── processor.py ├── examples ├── README.md ├── __init__.py ├── connexion_deep_object_filters.py └── connexion_deep_object_filters.yaml ├── exploit_sync ├── __init__.py └── exploit_sync.py ├── grouper ├── __init__.py ├── common.py ├── grouper.py └── queue.py ├── listener ├── README.md ├── __init__.py ├── advisor_processor.py ├── common.py ├── inventory_processor.py ├── listener.py └── processor.py ├── manager.admin.spec.yaml ├── manager.healthz.spec.yaml ├── manager.spec.yaml ├── manager ├── README.md ├── __init__.py ├── admin.py ├── admin_handler.py ├── announcement_handler.py ├── api_status_handler.py ├── base.py ├── cve_handler.py ├── dashbar_handler.py ├── dashboard_handler.py ├── feature_handler.py ├── filters.py ├── gunicorn_conf.py ├── list_view.py ├── main.py ├── middlewares.py ├── playbook_handler.py ├── rbac_filters.py ├── rbac_manager.py ├── report_handler.py ├── risk_handler.py ├── status_handler.py ├── system_handler.py ├── version_handler.py └── vulnerabilities_handler.py ├── monitoring ├── grafana │ ├── Dockerfile │ ├── dashboards.yml │ ├── dashboards │ │ ├── grafana-dashboard-clouddot-insights-vulnerability-engine-usage-metrics.configmap.yml │ │ └── grafana-dashboard-clouddot-insights-vulnerability-engine.configmap.yml │ └── datasources.yml └── prometheus │ └── prometheus.yml ├── notificator ├── README.md ├── __init__.py ├── app.py ├── notificator.py └── notificator_queue.py ├── platform_mock ├── Dockerfile ├── __init__.py ├── common ├── data │ ├── __init__.py │ ├── misc.py │ ├── packages.py │ ├── rules.py │ └── yum_repos.py ├── entrypoint.sh ├── platform_mock.py └── traffic_generator.py ├── poetry.lock ├── pull_request_template.md ├── pyproject.toml ├── renovate.json ├── run_tests.sh ├── scripts ├── 3scale-mock ├── README.md ├── __init__.py ├── check_init_py.sh ├── check_vars.sh ├── db_docs_generator.sh ├── db_upgrade_local.sh ├── devel-compose ├── extract_dashboard_configmap.py ├── gabi │ ├── __init__.py │ ├── cve_stats.py │ ├── risk_report.py │ └── utils.py ├── generate_insights_archive.py ├── openshift-common.sh ├── openshift-devel-container.sh ├── openshift-remove-required-resources.sh ├── openshift-rsync.sh ├── poetry-lock.sh ├── run_integration_tests.sh ├── schemaspy.properties ├── turnpike-mock └── validate_dashboards.py ├── security-scan.sh ├── taskomatic ├── README.md ├── __init__.py ├── jobs │ ├── __init__.py │ ├── cacheman.py │ ├── common.py │ ├── db_metrics.py │ ├── delete_notifications.py │ ├── delete_systems.py │ ├── missing_refs.py │ ├── rules_git_sync.py │ └── stale_systems.py └── taskomatic.py ├── tests ├── __init__.py ├── common_tests │ ├── __init__.py │ ├── conftest.py │ ├── test_bounded_executor.py │ ├── test_logging.py │ ├── test_status_app.py │ └── test_utils.py ├── conftest.py ├── data │ └── truncate_dev_data.sql ├── listener_tests │ ├── __init__.py │ ├── conftest.py │ └── test_common.py ├── manager_tests │ ├── __init__.py │ ├── conftest.py │ ├── schemas.py │ ├── test_admin_handler.py │ ├── test_apistatus_handler.py │ ├── test_business_risk.py │ ├── test_cve_handler.py │ ├── test_dashbar_handler.py │ ├── test_dashboard_handler.py │ ├── test_links.py │ ├── test_manager.py │ ├── test_models.py │ ├── test_rbac_manager.py │ ├── test_readonly.py │ ├── test_report_handler.py │ ├── test_status_handler.py │ ├── test_system_handler.py │ ├── test_vulnerabilities_handler.py │ └── vuln_testcase.py ├── notificator_tests │ ├── __init__.py │ ├── conftest.py │ ├── test_app.py │ ├── test_notificator.py │ ├── test_notificator_conditions.py │ └── test_notificator_queue.py ├── scripts │ ├── __init__.py │ └── vmaas_mock.py ├── taskomatic_tests │ ├── __init__.py │ ├── conftest.py │ ├── data │ │ ├── insights-content-vulnerability │ │ │ └── content │ │ │ │ ├── CVE_123_456 │ │ │ │ ├── CVE_123_456_DISABLED │ │ │ │ │ ├── metadata.yaml │ │ │ │ │ └── summary.md │ │ │ │ ├── CVE_123_456_ENABLED │ │ │ │ │ ├── metadata.yaml │ │ │ │ │ └── summary.md │ │ │ │ ├── more_info.md │ │ │ │ ├── plugin.yaml │ │ │ │ ├── reason.md │ │ │ │ └── resolution.md │ │ │ │ └── config.yaml │ │ └── insights-playbooks │ │ │ └── playbooks │ │ │ └── security │ │ │ └── CVE_123_456 │ │ │ ├── CVE_123_456_DISABLED │ │ │ ├── disable_fixit.yml │ │ │ └── upgrade_fixit.yml │ │ │ └── CVE_123_456_ENABLED │ │ │ └── upgrade_fixit.yml │ ├── test_db_metrics.py │ ├── test_delete_notifications.py │ ├── test_delete_systems.py │ ├── test_rules_git_sync.py │ ├── test_stale_systems.py │ └── test_taskomatic.py ├── utils.py ├── vmaas_sync_tests │ ├── __init__.py │ ├── conftest.py │ └── test_vmaas_sync.py └── zz_database_tests │ ├── __init__.py │ └── test_upgrade.py └── vmaas_sync ├── README.md ├── __init__.py └── vmaas_sync.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = */gunicorn_conf.py 3 | */common/config.py 4 | source = common, database, evaluator, listener, manager, taskomatic, vmaas_sync 5 | 6 | [report] 7 | exclude_lines = 8 | pragma: no cover 9 | if __name__ == .__main__.: 10 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 160 3 | ignore = W503,W504,E712 4 | -------------------------------------------------------------------------------- /.github/workflows/db-docs.yml: -------------------------------------------------------------------------------- 1 | name: DB documentation generator 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | - "stable" 8 | 9 | jobs: 10 | db-docs: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v2 15 | - name: Cancel previous builds 16 | uses: rokroskar/workflow-run-cleanup-action@master 17 | env: 18 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 19 | - name: Extract branch name 20 | run: | 21 | echo "branch=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT 22 | id: extract_branch 23 | - name: Generate DB docs 24 | run: | 25 | bash scripts/db_docs_generator.sh 26 | env: 27 | BRANCH_NAME: "${{ steps.extract_branch.outputs.branch }}" 28 | VULNERABILITY_DOCS_TOKEN: "${{ secrets.VULNERABILITY_DOCS_TOKEN }}" 29 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | name: Pre-Commit Check 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | - "stable" 8 | pull_request: 9 | branches: 10 | - "master" 11 | - "stable" 12 | 13 | jobs: 14 | pre-commit: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v2 19 | with: 20 | fetch-depth: 0 21 | - name: Setup Python 22 | uses: actions/setup-python@v3 23 | - name: Pre-Commit 24 | uses: pre-commit/action@v3.0.0 25 | -------------------------------------------------------------------------------- /.github/workflows/semantic-release.yml: -------------------------------------------------------------------------------- 1 | name: Semantic release 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | 8 | jobs: 9 | semantic-release: 10 | runs-on: ubuntu-latest 11 | strategy: 12 | matrix: 13 | python-version: [3.8] 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v2 17 | with: 18 | fetch-depth: 0 19 | persist-credentials: false # The auth token is persisted in the local git config. semantic-release picks this one instead of GH_TOKEN 20 | - name: Cancel previous builds 21 | uses: rokroskar/workflow-run-cleanup-action@master 22 | env: 23 | GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 24 | - name: Set up Python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v2 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | - name: Run python-semantic-release 29 | run: | 30 | pip install python-semantic-release 31 | semantic-release version 32 | env: 33 | GH_TOKEN: "${{ secrets.GH_TOKEN }}" 34 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | - "stable" 8 | pull_request: 9 | branches: 10 | - "master" 11 | - "stable" 12 | 13 | jobs: 14 | tests: 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | python-version: [3.12] 19 | env: 20 | PGPORT: "5433" 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@v2 24 | with: 25 | fetch-depth: 0 26 | - name: Check environment variables 27 | run: | 28 | /bin/bash scripts/check_vars.sh 29 | - name: Set up Python ${{ matrix.python-version }} 30 | uses: actions/setup-python@v2 31 | with: 32 | python-version: ${{ matrix.python-version }} 33 | - name: Check init.py 34 | run: | 35 | /bin/bash scripts/check_init_py.sh 36 | - name: Install dependencies 37 | run: | 38 | python -m pip install --upgrade pip poetry~=2.0 poetry-plugin-export importlib-resources==1.5.0 39 | poetry export --with dev -f requirements.txt --output requirements.txt 40 | pip install -r requirements.txt 41 | - name: Validate Grafana dashboard 42 | run: | 43 | python3 scripts/validate_dashboards.py ./monitoring/grafana/dashboards/ 44 | - name: Run tests 45 | run: | 46 | /bin/bash run_tests.sh 47 | - name: Run codecov 48 | uses: codecov/codecov-action@v3.1.2 49 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *.cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # Jupyter Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # SageMath parsed files 79 | *.sage.py 80 | 81 | # virtualenv 82 | .venv 83 | venv/ 84 | ENV/ 85 | 86 | # Spyder project settings 87 | .spyderproject 88 | .spyproject 89 | 90 | # Rope project settings 91 | .ropeproject 92 | 93 | # mkdocs documentation 94 | /site 95 | 96 | # mypy 97 | .mypy_cache/ 98 | 99 | # JetBrains IDE 100 | .idea 101 | 102 | # VS Code 103 | .vscode 104 | 105 | # PyTest 106 | .pytest_cache 107 | 108 | # MacOS 109 | .DS_Store 110 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: check-ast 8 | - id: check-docstring-first 9 | - repo: https://github.com/psf/black 10 | rev: 25.1.0 11 | hooks: 12 | - id: black 13 | args: [--quiet] 14 | files: "common|database|grouper|listener|evaluator|notificator|vmaas_sync|taskomatic|scripts|exploit_sync|common|cluster|tests|platform_mock|system_handler|cve_handler" 15 | - repo: https://github.com/PyCQA/flake8 16 | rev: 7.2.0 17 | hooks: 18 | - id: flake8 19 | files: "common|database|grouper|listener|evaluator|notificator|manager|vmaas_sync|taskomatic|scripts|exploit_sync|common|cluster|tests|platform_mock" 20 | - repo: https://github.com/pycqa/isort 21 | rev: 6.0.1 22 | hooks: 23 | - id: isort 24 | name: isort 25 | args: [--force-single-line, --profile=black] 26 | -------------------------------------------------------------------------------- /Dockerfile.dbdocs: -------------------------------------------------------------------------------- 1 | FROM schemaspy/schemaspy 2 | 3 | ADD /scripts/schemaspy.properties . 4 | -------------------------------------------------------------------------------- /Dockerfile.test: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi9/ubi-minimal 2 | 3 | RUN curl -o /etc/yum.repos.d/postgresql.repo \ 4 | https://copr.fedorainfracloud.org/coprs/g/insights/postgresql-16/repo/epel-9/group_insights-postgresql-16-epel-9.repo 5 | 6 | RUN microdnf install -y --setopt=install_weak_deps=0 --setopt=tsflags=nodocs \ 7 | python312 python3.12-pip python3.12-devel libpq-devel gcc git postgresql-server which findutils diffutils && \ 8 | microdnf clean all 9 | 10 | # missing pg_config, gcc, python3-devel needed for psycopg on aarch64 11 | RUN [ "$(uname -m)" == "aarch64" ] && \ 12 | microdnf install -y --setopt=install_weak_deps=0 --setopt=tsflags=nodocs \ 13 | gcc-c++ && \ 14 | microdnf clean all || true 15 | 16 | # for testing.posgres python package to find postgres commands 17 | RUN ln -s /usr/bin/initdb /usr/local/bin/initdb && \ 18 | ln -s /usr/bin/postgres /usr/local/bin/postgres 19 | 20 | RUN mkdir /engine && \ 21 | chown -R postgres:postgres /engine 22 | 23 | WORKDIR /engine 24 | 25 | ADD pyproject.toml /engine/ 26 | ADD poetry.lock /engine/ 27 | 28 | ENV LC_ALL=C.utf8 29 | ENV LANG=C.utf8 30 | RUN pip3.12 install --upgrade pip && \ 31 | pip3.12 install --upgrade poetry~=2.0 poetry-plugin-export 32 | RUN poetry export --with dev -f requirements.txt --output requirements.txt && \ 33 | pip3.12 install -r requirements.txt 34 | 35 | ADD . /engine 36 | 37 | RUN chown -R postgres:postgres /engine 38 | 39 | USER postgres 40 | 41 | # config git, required by "test_upgrade" 42 | RUN git config --global user.email "test@test" && \ 43 | git config --global user.name "test" 44 | -------------------------------------------------------------------------------- /build_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -exv 4 | 5 | IMAGE="quay.io/cloudservices/vulnerability-engine-app" 6 | IMAGE_TAG=$(git rev-parse --short=7 HEAD) 7 | SECURITY_COMPLIANCE_TAG="sc-$(date +%Y%m%d)-$(git rev-parse --short=7 HEAD)" 8 | 9 | if [[ -z "$QUAY_USER" || -z "$QUAY_TOKEN" ]]; then 10 | echo "QUAY_USER and QUAY_TOKEN must be set" 11 | exit 1 12 | fi 13 | 14 | if [[ -z "$RH_REGISTRY_USER" || -z "$RH_REGISTRY_TOKEN" ]]; then 15 | echo "RH_REGISTRY_USER and RH_REGISTRY_TOKEN must be set" 16 | exit 1 17 | fi 18 | 19 | if [[ -z "$GIT_TOKEN" ]]; then 20 | echo "GIT_TOKEN must be set" 21 | exit 1 22 | fi 23 | 24 | AUTH_CONF_DIR="$(pwd)/.podman" 25 | mkdir -p $AUTH_CONF_DIR 26 | export REGISTRY_AUTH_FILE="$AUTH_CONF_DIR/auth.json" 27 | 28 | podman login -u="$QUAY_USER" -p="$QUAY_TOKEN" quay.io 29 | podman login -u="$RH_REGISTRY_USER" -p="$RH_REGISTRY_TOKEN" registry.redhat.io 30 | podman build --build-arg STATIC_ASSETS=1 --build-arg GIT_TOKEN="$GIT_TOKEN" --pull=true -f Dockerfile -t "${IMAGE}:${IMAGE_TAG}" . 31 | 32 | if [[ $GIT_BRANCH == "origin/security-compliance" ]]; then 33 | podman tag "${IMAGE}:${IMAGE_TAG}" "${IMAGE}:${SECURITY_COMPLIANCE_TAG}" 34 | podman push "${IMAGE}:${SECURITY_COMPLIANCE_TAG}" 35 | else 36 | podman push "${IMAGE}:${IMAGE_TAG}" 37 | podman tag "${IMAGE}:${IMAGE_TAG}" "${IMAGE}:latest" 38 | podman push "${IMAGE}:latest" 39 | fi 40 | -------------------------------------------------------------------------------- /cluster/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/cluster/__init__.py -------------------------------------------------------------------------------- /common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/common/__init__.py -------------------------------------------------------------------------------- /common/bounded_executor.py: -------------------------------------------------------------------------------- 1 | """ 2 | ThreadPoolExecutor wrapper to limit number of submited items. 3 | """ 4 | 5 | from concurrent.futures import ThreadPoolExecutor 6 | from threading import BoundedSemaphore 7 | 8 | 9 | # pylint: disable=consider-using-with 10 | class BoundedExecutor: 11 | """ 12 | BoundedExecutor behaves as a ThreadPoolExecutor which will block on 13 | calls to submit() once the limit given as "bound" work items are queued for 14 | execution. 15 | :param max_queue_size: Integer - the maximum number of items in the work queue 16 | :param max_workers: Integer - the size of the thread pool 17 | """ 18 | 19 | def __init__(self, max_queue_size, max_workers=None): 20 | self.semaphore = BoundedSemaphore(max_queue_size) 21 | self.executor = ThreadPoolExecutor(max_workers=max_workers) 22 | 23 | def submit(self, func, *args, **kwargs): 24 | """blocking submit method""" 25 | self.semaphore.acquire() 26 | try: 27 | future = self.executor.submit(func, *args, **kwargs) 28 | except: # noqa: E722 29 | self.semaphore.release() 30 | raise 31 | else: 32 | future.add_done_callback(lambda x: self.semaphore.release()) 33 | return future 34 | 35 | def shutdown(self, wait=True): 36 | """pass shutdown to executor""" 37 | self.executor.shutdown(wait=wait) 38 | -------------------------------------------------------------------------------- /common/constants.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common constants declaration module. 3 | """ 4 | 5 | from enum import Enum 6 | from enum import StrEnum 7 | 8 | from common.config import Config 9 | 10 | CFG = Config() 11 | 12 | APP_VERSION = "2.56.3" 13 | TIMESTAMP_LAST_REPO_BASED_EVAL = "last_eval_repo_based" 14 | VMAAS_CVES_ENDPOINT = f"{CFG.vmaas_host}/api/vmaas/v3/cves" 15 | VMAAS_REPOS_ENDPOINT = f"{CFG.vmaas_host}/api/vmaas/v3/repos" 16 | VMAAS_OS_ENDPOINT = f"{CFG.vmaas_host}/api/vmaas/v3/os/vulnerability/report" 17 | 18 | 19 | class remediation(Enum): # pylint: disable=invalid-name 20 | """Types of remediation""" 21 | 22 | NONE = 0 23 | MANUAL = 1 24 | PLAYBOOK = 2 25 | 26 | 27 | def format_vmaas_cve_endpoint(cve: str): 28 | """Format endpoint URI for single CVE endpoint in vmaas""" 29 | return VMAAS_CVES_ENDPOINT + "/" + cve 30 | 31 | 32 | class HostType(StrEnum): 33 | """Types of hosts""" 34 | 35 | EDGE = "edge" 36 | RPMDNF = "rpmdnf" 37 | NONE = "none" 38 | 39 | 40 | class EvaluatorMessageType(StrEnum): 41 | """Message types which can arrive at kafka""" 42 | 43 | EVALUATE_SYSTEM = "upload_new_file" 44 | RE_EVALUATE_SYSTEM = "re-evaluate_system" 45 | -------------------------------------------------------------------------------- /common/identity.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3scale authentication functions. 3 | """ 4 | 5 | import base64 6 | import json 7 | 8 | from common.config import Config 9 | from common.logging import get_logger 10 | 11 | LOGGER = get_logger(__name__) 12 | CFG = Config() 13 | 14 | 15 | def get_identity(x_rh_identity: str) -> dict: 16 | """Get identity from given b64 string.""" 17 | try: 18 | decoded_value = base64.b64decode(x_rh_identity).decode("utf-8") 19 | except Exception: # pylint: disable=broad-except 20 | LOGGER.warning("Error decoding b64 string: %s", x_rh_identity) 21 | decoded_value = "" 22 | else: 23 | LOGGER.debug("Identity decoded: %s", decoded_value) 24 | try: 25 | identity = json.loads(decoded_value) 26 | except json.decoder.JSONDecodeError: 27 | LOGGER.warning("Error parsing JSON identity: %s", decoded_value) 28 | identity = None 29 | return identity 30 | -------------------------------------------------------------------------------- /common/peewee_database.py: -------------------------------------------------------------------------------- 1 | """ 2 | Postgresql settings for peewee mappings. 3 | """ 4 | 5 | from playhouse.postgres_ext import PostgresqlExtDatabase 6 | 7 | from common.config import Config 8 | from common.logging import get_logger 9 | 10 | LOGGER = get_logger(__name__) 11 | 12 | CFG = Config() 13 | 14 | DB = PostgresqlExtDatabase( 15 | CFG.db_name, 16 | user=CFG.db_user, 17 | password=CFG.db_pass, 18 | host=CFG.db_host, 19 | port=CFG.db_port, 20 | sslmode=CFG.db_ssl_mode, 21 | sslrootcert=CFG.db_ssl_root_cert_path, 22 | options=f"-c statement_timeout={CFG.db_statement_timeout}", 23 | ) 24 | 25 | DB_READ_REPLICA = None 26 | if CFG.db_read_replica_enabled: 27 | if not CFG.db_host_read_replica: 28 | LOGGER.error("db_host_read_replica not set!") 29 | elif not CFG.db_port_read_replica: 30 | LOGGER.error("db_port_read_replica not set!") 31 | else: 32 | DB_READ_REPLICA = PostgresqlExtDatabase( 33 | CFG.db_name, 34 | user=CFG.db_user, 35 | password=CFG.db_pass, 36 | host=CFG.db_host_read_replica, 37 | port=CFG.db_port_read_replica, 38 | sslmode=CFG.db_ssl_mode, 39 | sslrootcert=CFG.db_ssl_root_cert_path, 40 | options=f"-c statement_timeout={CFG.db_statement_timeout}", 41 | ) 42 | -------------------------------------------------------------------------------- /common/status_app.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module represents health endpoint + prometheus metrics as one async server 3 | called status. 4 | """ 5 | 6 | from aiohttp import web 7 | from prometheus_client import generate_latest 8 | 9 | 10 | async def metrics(_): 11 | """Metrics handler""" 12 | return web.Response(body=generate_latest()) 13 | 14 | 15 | def create_status_app(logger): 16 | """Create instance of status applications""" 17 | app = web.Application(logger=logger) 18 | app.router.add_get("/metrics", metrics) 19 | return app 20 | 21 | 22 | def create_status_runner(app, port, logger, loop): 23 | """Create AIOHTTP runner to run server async""" 24 | runner = web.AppRunner(app, logger=logger) 25 | 26 | loop.run_until_complete(runner.setup()) 27 | 28 | site = web.TCPSite(runner, "0.0.0.0", port) 29 | return runner, site 30 | -------------------------------------------------------------------------------- /common/strtobool.py: -------------------------------------------------------------------------------- 1 | """Module providing `strtobool` function as replacement of distutils.strtobool.""" 2 | 3 | 4 | def strtobool(val: str) -> bool: 5 | """Convert a string representation of truth to bool. 6 | 7 | True values are y, yes, t, true, on and 1; false values are n, no, f, false, off and 0. 8 | Raises TypeError if `val` is not string. 9 | Raises ValueError if `val` is anything else. 10 | """ 11 | if not isinstance(val, str): 12 | raise TypeError(f"`{val}` is not of type str") 13 | trues = ("y", "yes", "t", "true", "on", "1") 14 | falses = ("n", "no", "f", "false", "off", "0") 15 | 16 | val = val.lower() 17 | if val in trues: 18 | return True 19 | if val in falses: 20 | return False 21 | raise ValueError(f"`{val}` not in {trues + falses}") 22 | -------------------------------------------------------------------------------- /common/vmaas_client.py: -------------------------------------------------------------------------------- 1 | """ 2 | Async VMaaS API client 3 | """ 4 | 5 | import asyncio 6 | import ssl 7 | 8 | import aiohttp 9 | 10 | from common.config import Config 11 | from common.logging import get_logger 12 | from common.utils import VMAAS_CNX_ERR 13 | from common.utils import VMAAS_RETURN_ERR 14 | 15 | LOGGER = get_logger(__name__) 16 | 17 | CFG = Config() 18 | 19 | 20 | async def vmaas_request(endpoint, data_json=None, method="POST"): 21 | """Sends request to VMAAS""" 22 | headers = {"Content-type": "application/json", "Accept": "application/json"} 23 | tries = 0 24 | while True: 25 | if tries >= CFG.request_retries: 26 | break 27 | ssl_ctx = ssl.create_default_context(cafile=CFG.tls_ca_path) 28 | try: 29 | async with aiohttp.ClientSession() as session: 30 | async with session.request(method, endpoint, json=data_json, headers=headers, ssl=ssl_ctx) as response: 31 | if response.status == 200: 32 | return await response.json() 33 | if response.status == 503: 34 | LOGGER.info("VMAAS temporarily unavailable, retrying...") 35 | await asyncio.sleep(1) 36 | else: 37 | tries += 1 38 | VMAAS_RETURN_ERR.inc() 39 | LOGGER.error( 40 | "Error during request to VMaaS endpoint %s: HTTP %s, %s", endpoint, response.status, await response.text() 41 | ) 42 | if data_json: 43 | LOGGER.debug("JSON: %s", str(data_json)) 44 | # Do not retry for 4xx HTTP codes 45 | if 400 <= response.status < 500: 46 | break 47 | except aiohttp.ClientError: 48 | tries += 1 49 | VMAAS_CNX_ERR.inc() 50 | LOGGER.exception("Error calling VMAAS: ") 51 | return None 52 | -------------------------------------------------------------------------------- /conf/common.env: -------------------------------------------------------------------------------- 1 | POSTGRES_DB=vulnerability 2 | POSTGRES_HOST=ve_database 3 | POSTGRES_PORT=5432 4 | POSTGRES_HOST_READ_REPLICA=ve_database 5 | POSTGRES_PORT_READ_REPLICA=5432 6 | POSTGRES_READ_REPLICA_ENABLED=FALSE 7 | POSTGRES_SSL_MODE=prefer 8 | POSTGRES_SSL_ROOT_CERT_PATH=/opt/rds-ca/rds-cacert 9 | POSTGRES_USER=ve_db_admin 10 | POSTGRES_PASSWORD=ve_db_admin_pwd 11 | KAFKA_HOST=platform_mock 12 | KAFKA_PORT=9092 13 | KAFKA_GROUP_ID=vulnerability 14 | KAFKA_SSL_ENABLED=FALSE 15 | KAFKA_CA_CERT=/opt/certs/kafka-cacert 16 | KAFKA_USERNAME= 17 | KAFKA_PASSWORD= 18 | RETRY_INTERVAL=5 19 | VMAAS_HOST=http://vmaas_webapp_go:8000 20 | LOGGING_TYPE=DEVEL 21 | LOGGING_LEVEL_APP=INFO 22 | LOGGING_LEVEL_LIBS=WARNING 23 | AWS_REGION=us-east-1 24 | CW_ENABLED=FALSE 25 | CW_LOG_GROUP=platform-dev 26 | # this option has different meaning in manager and listener 27 | MAX_QUEUE_SIZE=30 28 | PAYLOAD_TRACKER_TOPIC=platform.payload-status 29 | REMEDIATION_UPDATES_TOPIC=platform.remediation-updates.vulnerability 30 | REQUEST_RETRIES=10 31 | LISTENERS_VALID_SYSTEM_SECONDS=300 32 | EVALUATOR_RESULTS_TOPIC=vulnerability.evaluator.results 33 | GIT_TOKEN= 34 | IS_FEDRAMP=FALSE 35 | ALLOWED_REPORTERS=puptoo,rhsm-system-profile-bridge 36 | EVENTS_TOPIC=platform.inventory.events 37 | DISABLE_OPTIMISATION=False 38 | UNLEASH_CACHE_DIR=/tmp/unleash_cache 39 | UNLEASH_TOKEN= 40 | UNLEASH_URL= 41 | UNLEASH_BOOTSTRAP_FILE=develfeatureflags.json 42 | DB_POOL_TIMEOUT=300 43 | DB_STATEMENT_TIMEOUT=1800000 44 | DB_WORK_MEM=256000 45 | -------------------------------------------------------------------------------- /conf/database.env: -------------------------------------------------------------------------------- 1 | # The admin user is intended only for db setup and administration. 2 | # The postgresql database listens on port 5432 locally (ie. running 3 | # in the vulnerability-engine-database container) while anything 4 | # connecting from outside the container uses port 15432. Because 5 | # the POSTGRESQL_PORT environment variable is not actually used 6 | # by anything running in the database container, it is set to 7 | # 15432 to remind anyone looking at this configuration that the 8 | # database container maps external connections on port 15432 to 9 | # database's local 5432 port. 10 | PGUSER=ve_db_admin 11 | -------------------------------------------------------------------------------- /conf/evaluator.env: -------------------------------------------------------------------------------- 1 | VMAAS_VULNERABILITIES_API=/api/vmaas/v3/vulnerabilities 2 | PROMETHEUS_PORT=8085 3 | DB_MIN_POOL_SIZE=10 4 | DB_MAX_POOL_SIZE=30 5 | MAX_LOADED_EVALUATOR_MSGS=20 6 | USE_VMAAS_GO=true 7 | -------------------------------------------------------------------------------- /conf/evaluator_recalc.env: -------------------------------------------------------------------------------- 1 | EVALUATOR_TOPIC=vulnerability.evaluator.recalc 2 | -------------------------------------------------------------------------------- /conf/evaluator_upload.env: -------------------------------------------------------------------------------- 1 | EVALUATOR_TOPIC=vulnerability.evaluator.upload 2 | -------------------------------------------------------------------------------- /conf/exploit-sync.env: -------------------------------------------------------------------------------- 1 | EXPLOIT_FILE_URL=http://platform_mock:8000/api/v1/exploits 2 | -------------------------------------------------------------------------------- /conf/grouper.env: -------------------------------------------------------------------------------- 1 | KAFKA_GROUP_ID=vulnerability-grouper 2 | GROUPER_MESSAGES_TIMEOUT_SECS=10 3 | MAX_LOADED_GROUPER_MSGS=4000 4 | GROUPER_INVENTORY_TOPIC=vulnerability.grouper.inventory.upload 5 | GROUPER_ADVISOR_TOPIC=vulnerability.grouper.advisor.upload 6 | -------------------------------------------------------------------------------- /conf/listener.env: -------------------------------------------------------------------------------- 1 | PROMETHEUS_PORT=8089 2 | KAFKA_GROUP_ID=vulnerability-listener2 3 | MAX_LOADED_LISTENER_MSGS=50 4 | ADVISOR_RESULTS_TOPIC=platform.engine.results 5 | MESSAGE_TOPIC=vulnerability.evaluator.upload 6 | ALLOWED_OSES=RHEL 7 | -------------------------------------------------------------------------------- /conf/manager.env: -------------------------------------------------------------------------------- 1 | DISABLE_RBAC=FALSE 2 | GRANULAR_RBAC=FALSE 3 | MAX_REQUEST_SIZE_MB=2 4 | MAXIMUM_PAGE_SIZE=1000 5 | API_MAX_RPS=100 6 | -------------------------------------------------------------------------------- /conf/manager_base.env: -------------------------------------------------------------------------------- 1 | APP_NAME=vulnerability 2 | PATH_PREFIX=/api 3 | GUNICORN_WORKERS=4 4 | READ_ONLY_MODE=FALSE 5 | RBAC_URL=http://platform_mock:8000 6 | RBAC_TIMEOUT=15 7 | API_VERSION=v1 8 | TASKOMATIC_HOST=http://ve_taskomatic:8000 9 | NOTIFICATOR_HOST=http://ve_notificator:8000 10 | DASHBOARD_RULES_AGE=30 11 | -------------------------------------------------------------------------------- /conf/notificator.env: -------------------------------------------------------------------------------- 1 | PROMETHEUS_PORT=8088 2 | CVE_FRESHNESS_THRESHOLD_DAYS=3 3 | NOTIFICATIONS_TOPIC=platform.notifications.ingress 4 | -------------------------------------------------------------------------------- /conf/reevaluation.env: -------------------------------------------------------------------------------- 1 | MESSAGE_TOPIC=vulnerability.evaluator.recalc 2 | RE_EVALUATION_KAFKA_BATCH_SIZE=10000 3 | RE_EVALUATION_KAFKA_BATCHES=10 4 | -------------------------------------------------------------------------------- /conf/settings.local.yaml: -------------------------------------------------------------------------------- 1 | default: 2 | JIRA: 3 | # JIRA credentials for skipping tests failing due to existing bug 4 | username: jira_username 5 | password: jira_password 6 | VULNERABILITY: 7 | rest: 8 | # internal service hostname 9 | vuln_hostname: localhost 10 | USERS: 11 | primary_user: 12 | identity: 13 | account_number: "6089719" 14 | type: "User" 15 | user: 16 | username: "jdoe" 17 | email: "jdoe@acme.com" 18 | first_name: "John" 19 | last_name: "Doe" 20 | is_active: true 21 | is_org_admin: false 22 | is_internal: false 23 | locale: "en_US" 24 | internal: 25 | org_id: "3340851" 26 | auth_type: "basic-auth" 27 | auth_time: 6300 28 | entitlements: 29 | hybrid_cloud: 30 | is_entitled: true 31 | insights: 32 | is_entitled: true 33 | openshift: 34 | is_entitled: true 35 | smart_management: 36 | is_entitled: true 37 | no_entitlement: 38 | username: username 39 | password: password 40 | invalid: 41 | username: invalid 42 | password: invalid 43 | dev: 44 | main: 45 | default_user: primary_user 46 | hostname: localhost:8300 47 | path: / 48 | api_path: api 49 | scheme: http 50 | ssl_verify: false 51 | http: 52 | default_auth_type: identity 53 | cacert_path: False 54 | -------------------------------------------------------------------------------- /conf/taskomatic.env: -------------------------------------------------------------------------------- 1 | JOBS=stale_systems:5,delete_systems:30,rules_git_sync:240,db_metrics:15,cacheman:5,delete_notifications:720 2 | JOBS_STARTUP=db_metrics,cacheman 3 | SYSTEM_DELETION_THRESHOLD=1 4 | CONTENT_GIT_REPO= 5 | PLAYBOOKS_GIT_REPO= 6 | ACCOUNTS_BLACKLIST= 7 | CACHE_MINIMAL_ACCOUNT_SYSTEMS=400 8 | -------------------------------------------------------------------------------- /conf/vmaas-sync.env: -------------------------------------------------------------------------------- 1 | PROMETHEUS_PORT=8087 2 | ENABLE_RE_EVALUATION=YES 3 | DEFAULT_PAGE_SIZE=5000 4 | DEFAULT_REPO_PAGE_SIZE=200 5 | -------------------------------------------------------------------------------- /database/RPM-GPG-KEY-CENTOS: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v2.0.22 (GNU/Linux) 3 | 4 | mQINBFzMWxkBEADHrskpBgN9OphmhRkc7P/YrsAGSvvl7kfu+e9KAaU6f5MeAVyn 5 | rIoM43syyGkgFyWgjZM8/rur7EMPY2yt+2q/1ZfLVCRn9856JqTIq0XRpDUe4nKQ 6 | 8BlA7wDVZoSDxUZkSuTIyExbDf0cpw89Tcf62Mxmi8jh74vRlPy1PgjWL5494b3X 7 | 5fxDidH4bqPZyxTBqPrUFuo+EfUVEqiGF94Ppq6ZUvrBGOVo1V1+Ifm9CGEK597c 8 | aevcGc1RFlgxIgN84UpuDjPR9/zSndwJ7XsXYvZ6HXcKGagRKsfYDWGPkA5cOL/e 9 | f+yObOnC43yPUvpggQ4KaNJ6+SMTZOKikM8yciyBwLqwrjo8FlJgkv8Vfag/2UR7 10 | JINbyqHHoLUhQ2m6HXSwK4YjtwidF9EUkaBZWrrskYR3IRZLXlWqeOi/+ezYOW0m 11 | vufrkcvsh+TKlVVnuwmEPjJ8mwUSpsLdfPJo1DHsd8FS03SCKPaXFdD7ePfEjiYk 12 | nHpQaKE01aWVSLUiygn7F7rYemGqV9Vt7tBw5pz0vqSC72a5E3zFzIIuHx6aANry 13 | Gat3aqU3qtBXOrA/dPkX9cWE+UR5wo/A2UdKJZLlGhM2WRJ3ltmGT48V9CeS6N9Y 14 | m4CKdzvg7EWjlTlFrd/8WJ2KoqOE9leDPeXRPncubJfJ6LLIHyG09h9kKQARAQAB 15 | tDpDZW50T1MgKENlbnRPUyBPZmZpY2lhbCBTaWduaW5nIEtleSkgPHNlY3VyaXR5 16 | QGNlbnRvcy5vcmc+iQI3BBMBAgAhBQJczFsZAhsDBgsJCAcDAgYVCAIJCgsDFgIB 17 | Ah4BAheAAAoJEAW1VbOEg8ZdjOsP/2ygSxH9jqffOU9SKyJDlraL2gIutqZ3B8pl 18 | Gy/Qnb9QD1EJVb4ZxOEhcY2W9VJfIpnf3yBuAto7zvKe/G1nxH4Bt6WTJQCkUjcs 19 | N3qPWsx1VslsAEz7bXGiHym6Ay4xF28bQ9XYIokIQXd0T2rD3/lNGxNtORZ2bKjD 20 | vOzYzvh2idUIY1DgGWJ11gtHFIA9CvHcW+SMPEhkcKZJAO51ayFBqTSSpiorVwTq 21 | a0cB+cgmCQOI4/MY+kIvzoexfG7xhkUqe0wxmph9RQQxlTbNQDCdaxSgwbF2T+gw 22 | byaDvkS4xtR6Soj7BKjKAmcnf5fn4C5Or0KLUqMzBtDMbfQQihn62iZJN6ZZ/4dg 23 | q4HTqyVpyuzMXsFpJ9L/FqH2DJ4exGGpBv00ba/Zauy7GsqOc5PnNBsYaHCply0X 24 | 407DRx51t9YwYI/ttValuehq9+gRJpOTTKp6AjZn/a5Yt3h6jDgpNfM/EyLFIY9z 25 | V6CXqQQ/8JRvaik/JsGCf+eeLZOw4koIjZGEAg04iuyNTjhx0e/QHEVcYAqNLhXG 26 | rCTTbCn3NSUO9qxEXC+K/1m1kaXoCGA0UWlVGZ1JSifbbMx0yxq/brpEZPUYm+32 27 | o8XfbocBWljFUJ+6aljTvZ3LQLKTSPW7TFO+GXycAOmCGhlXh2tlc6iTc41PACqy 28 | yy+mHmSv 29 | =kkH7 30 | -----END PGP PUBLIC KEY BLOCK----- 31 | -------------------------------------------------------------------------------- /database/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/database/__init__.py -------------------------------------------------------------------------------- /database/schema/local_init_db.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | # Initialize local development database with data, cyndi inventory schema and more. 4 | # Needs to be run on initialized vulnerability DB (usually ba running dbupgrade.sh) 5 | 6 | function psql_exec { 7 | PGPASSWORD="${POSTGRES_PASSWORD}" psql --no-password -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}" -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" -t -f "$1" 8 | } 9 | 10 | # wait for postgres to be up 11 | until pg_isready -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}" -U "${POSTGRES_USER}" -d "${POSTGRES_DB}"; 12 | do sleep 2; 13 | done 14 | 15 | # Try to initialize local schema, if there is not a cyndi scheme 16 | EXISTING_TABLES=$(echo "select 1 from information_schema.views where table_name = 'hosts' and table_schema = 'inventory'" | psql_exec - | sed 's/[[:space:]]//g') 17 | RETVAL=$? 18 | if [[ "$RETVAL" == "0" && "$EXISTING_TABLES" != "1" ]]; then 19 | echo "Initializing cyndi." 20 | psql_exec ./database/schema/ve_db_dev_cyndi.sql 21 | 22 | echo "Inserting mock data." 23 | psql_exec ./database/schema/ve_db_dev_data.sql 24 | fi 25 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/001-db-upgrade-support.sql: -------------------------------------------------------------------------------- 1 | -- ---------------------------------------------------------------------------- 2 | -- db upgrade support 3 | -- 4 | -- This upgrade script adds the tables required for db upgrade support. 5 | -- ---------------------------------------------------------------------------- 6 | 7 | -- create db_version table 8 | CREATE TABLE db_version ( 9 | name TEXT NOT NULL, 10 | version INT NOT NULL, 11 | PRIMARY KEY (name) 12 | ) TABLESPACE pg_default; 13 | 14 | -- create db_upgrade_log table 15 | CREATE TABLE db_upgrade_log ( 16 | id SERIAL, 17 | version INT NOT NULL, 18 | status TEXT NOT NULL, 19 | script TEXT, 20 | returncode INT, 21 | stdout TEXT, 22 | stderr TEXT, 23 | last_updated TIMESTAMP WITH TIME ZONE NOT NULL 24 | ) TABLESPACE pg_default; 25 | 26 | CREATE INDEX ON db_upgrade_log(version); 27 | 28 | CREATE TRIGGER db_upgrade_log_set_last_updated 29 | BEFORE INSERT OR UPDATE ON db_upgrade_log 30 | FOR EACH ROW EXECUTE PROCEDURE set_last_updated(); 31 | 32 | 33 | -- user for evaluator component 34 | GRANT SELECT ON db_version TO ve_db_user_evaluator; 35 | GRANT SELECT ON db_upgrade_log TO ve_db_user_evaluator; 36 | GRANT USAGE, SELECT ON db_upgrade_log_id_seq TO ve_db_user_evaluator; 37 | 38 | -- user for listener component 39 | GRANT SELECT ON db_version TO ve_db_user_listener; 40 | GRANT SELECT ON db_upgrade_log TO ve_db_user_listener; 41 | GRANT USAGE, SELECT ON db_upgrade_log_id_seq TO ve_db_user_listener; 42 | 43 | -- user for UI manager component 44 | GRANT SELECT ON db_version TO ve_db_user_manager; 45 | GRANT SELECT ON db_upgrade_log TO ve_db_user_manager; 46 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/003-add-vmaas-sync-user.sql: -------------------------------------------------------------------------------- 1 | -- Add VMaaS sync user 2 | 3 | CREATE USER ve_db_user_vmaas_sync; 4 | ALTER USER ve_db_user_vmaas_sync WITH PASSWORD 've_db_user_vmaas_sync_pwd'; 5 | 6 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_vmaas_sync; 7 | GRANT SELECT, INSERT, UPDATE, DELETE ON cve_metadata TO ve_db_user_vmaas_sync; 8 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/005-grant-listener-delete.sql: -------------------------------------------------------------------------------- 1 | GRANT DELETE ON system_vulnerabilities TO ve_db_user_listener; 2 | REVOKE UPDATE (direct_systems_affected) ON cve_affected_systems_cache FROM ve_db_user_listener; 3 | GRANT SELECT, INSERT, UPDATE, DELETE ON cve_affected_systems_cache TO ve_db_user_listener; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/006-fix-refresh-caches.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION refresh_account_cached_counts(rh_account_in varchar) 2 | RETURNS void AS 3 | $refresh_account_cached_counts$ 4 | BEGIN 5 | UPDATE system_platform sp SET cve_count_cache = ( 6 | SELECT COUNT(cve) FROM system_vulnerabilities sv 7 | WHERE sp.rh_account = rh_account_in AND sv.inventory_id = sp.inventory_id AND sv.when_mitigated IS NULL 8 | ) WHERE sp.rh_account = rh_account_in; 9 | DELETE FROM cve_affected_systems_cache where rh_account = rh_account_in; 10 | INSERT INTO cve_affected_systems_cache (cve, rh_account, systems_affected, direct_systems_affected) 11 | SELECT sv.cve, sp.rh_account, count(sv.inventory_id), count(CASE WHEN sp.satellite_managed THEN NULL ELSE 1 END) 12 | FROM system_vulnerabilities sv INNER JOIN 13 | system_platform sp USING (inventory_id) 14 | WHERE sp.rh_account = rh_account_in AND 15 | sp.last_evaluation IS NOT NULL AND 16 | sp.opt_out = FALSE AND 17 | sv.when_mitigated IS NULL 18 | GROUP BY sv.cve, sp.rh_account; 19 | END; 20 | $refresh_account_cached_counts$ 21 | LANGUAGE 'plpgsql'; 22 | 23 | CREATE OR REPLACE FUNCTION refresh_system_cached_counts(inventory_id_in varchar) 24 | RETURNS void AS 25 | $refresh_system_cached_counts$ 26 | BEGIN 27 | UPDATE system_platform sp SET cve_count_cache = ( 28 | SELECT COUNT(cve) FROM system_vulnerabilities sv 29 | WHERE sp.inventory_id = inventory_id_in AND sv.inventory_id = sp.inventory_id AND sv.when_mitigated IS NULL 30 | ) WHERE sp.inventory_id = inventory_id_in; 31 | END; 32 | $refresh_system_cached_counts$ 33 | LANGUAGE 'plpgsql'; 34 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/009-deleted_systems.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS deleted_systems ( 2 | inventory_id TEXT NOT NULL, CHECK (NOT empty(inventory_id)), 3 | when_deleted TIMESTAMP WITH TIME ZONE NOT NULL, 4 | UNIQUE (inventory_id) 5 | ) TABLESPACE pg_default; 6 | 7 | CREATE INDEX ON deleted_systems(when_deleted); 8 | 9 | GRANT SELECT, INSERT, UPDATE, DELETE ON deleted_systems TO ve_db_user_listener; 10 | 11 | GRANT SELECT ON TABLE deleted_systems TO ve_db_user_evaluator; 12 | GRANT SELECT ON TABLE deleted_systems TO ve_db_user_manager; 13 | GRANT SELECT ON TABLE deleted_systems TO ve_db_user_vmaas_sync; 14 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/010-add-repo-tables.sql: -------------------------------------------------------------------------------- 1 | -- repo 2 | CREATE TABLE repo ( 3 | id SERIAL, 4 | name TEXT NOT NULL UNIQUE, CHECK (NOT empty(name)), 5 | PRIMARY KEY (id) 6 | ) TABLESPACE pg_default; 7 | 8 | GRANT SELECT, INSERT, UPDATE, DELETE ON repo TO ve_db_user_listener; 9 | 10 | -- system_repo 11 | CREATE TABLE system_repo ( 12 | inventory_id TEXT NOT NULL, CHECK (NOT empty(inventory_id)), 13 | repo_id INT NOT NULL, 14 | UNIQUE (inventory_id, repo_id), 15 | CONSTRAINT inventory_id 16 | FOREIGN KEY (inventory_id) 17 | REFERENCES system_platform (inventory_id), 18 | CONSTRAINT repo_id 19 | FOREIGN KEY (repo_id) 20 | REFERENCES repo (id) 21 | ) TABLESPACE pg_default; 22 | 23 | CREATE INDEX ON system_repo(inventory_id); 24 | CREATE INDEX ON system_repo(repo_id); 25 | 26 | GRANT SELECT, INSERT, UPDATE, DELETE ON system_repo TO ve_db_user_listener; 27 | 28 | -- user for evaluator component 29 | GRANT SELECT ON repo TO ve_db_user_evaluator; 30 | GRANT SELECT ON system_repo TO ve_db_user_evaluator; 31 | GRANT USAGE, SELECT ON repo_id_seq TO ve_db_user_evaluator; 32 | 33 | -- user for listener component 34 | GRANT SELECT ON repo TO ve_db_user_listener; 35 | GRANT SELECT ON system_repo TO ve_db_user_listener; 36 | GRANT USAGE, SELECT ON repo_id_seq TO ve_db_user_listener; 37 | 38 | -- user for UI manager component 39 | GRANT SELECT ON repo TO ve_db_user_manager; 40 | GRANT SELECT ON system_repo TO ve_db_user_manager; 41 | 42 | GRANT SELECT ON repo TO ve_db_user_vmaas_sync; 43 | GRANT SELECT ON system_repo TO ve_db_user_vmaas_sync; 44 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/011-delete_system_manager.sql: -------------------------------------------------------------------------------- 1 | -- manager needs to be able to update things like 'status' on a sysid/cve combination, also needs to delete 2 | GRANT UPDATE, DELETE ON system_vulnerabilities TO ve_db_user_manager; 3 | 4 | -- manager needs to update cache and delete systems 5 | GRANT UPDATE (cve_count_cache), DELETE ON system_platform TO ve_db_user_manager; 6 | 7 | GRANT SELECT, INSERT, UPDATE, DELETE ON deleted_systems TO ve_db_user_manager; 8 | 9 | GRANT DELETE ON system_repo TO ve_db_user_manager; 10 | 11 | CREATE OR REPLACE FUNCTION delete_system(inventory_id_in varchar) 12 | RETURNS TABLE (deleted_inventory_id TEXT) AS 13 | $delete_system$ 14 | BEGIN 15 | -- register deleted system 16 | INSERT INTO deleted_systems (inventory_id, when_deleted) 17 | VALUES (inventory_id_in, CURRENT_TIMESTAMP) 18 | ON CONFLICT (inventory_id) DO UPDATE SET 19 | when_deleted = EXCLUDED.when_deleted; 20 | 21 | -- opt out to refresh cache and then delete 22 | WITH locked_row AS ( 23 | SELECT inventory_id 24 | FROM system_platform 25 | WHERE inventory_id = inventory_id_in 26 | FOR UPDATE 27 | ) 28 | UPDATE system_platform SET opt_out = true 29 | WHERE inventory_id = inventory_id_in; 30 | DELETE FROM system_vulnerabilities 31 | WHERE inventory_id = inventory_id_in; 32 | DELETE FROM system_repo 33 | WHERE inventory_id = inventory_id_in; 34 | RETURN QUERY DELETE FROM system_platform 35 | WHERE inventory_id = inventory_id_in 36 | RETURNING inventory_id; 37 | END; 38 | $delete_system$ 39 | LANGUAGE 'plpgsql'; 40 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/012-add-timestamp-table.sql: -------------------------------------------------------------------------------- 1 | -- timestamp_kv 2 | CREATE TABLE IF NOT EXISTS timestamp_kv ( 3 | name TEXT NOT NULL UNIQUE, CHECK (NOT empty(name)), 4 | value TIMESTAMP WITH TIME ZONE NOT NULL 5 | ) TABLESPACE pg_default; 6 | 7 | GRANT SELECT, INSERT, UPDATE, DELETE ON timestamp_kv TO ve_db_user_vmaas_sync; 8 | 9 | -- allow select on table for other components 10 | GRANT SELECT ON timestamp_kv TO ve_db_user_evaluator; 11 | GRANT SELECT ON timestamp_kv TO ve_db_user_listener; 12 | GRANT SELECT ON timestamp_kv TO ve_db_user_manager; 13 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/014-delete-fix.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION delete_system(inventory_id_in varchar) 2 | RETURNS TABLE (deleted_inventory_id TEXT) AS 3 | $delete_system$ 4 | BEGIN 5 | -- opt out to refresh cache and then delete 6 | WITH locked_row AS ( 7 | SELECT inventory_id 8 | FROM system_platform 9 | WHERE inventory_id = inventory_id_in 10 | FOR UPDATE 11 | ) 12 | UPDATE system_platform SET opt_out = true 13 | WHERE inventory_id = inventory_id_in; 14 | DELETE FROM system_vulnerabilities 15 | WHERE inventory_id = inventory_id_in; 16 | DELETE FROM system_repo 17 | WHERE inventory_id = inventory_id_in; 18 | RETURN QUERY DELETE FROM system_platform 19 | WHERE inventory_id = inventory_id_in 20 | RETURNING inventory_id; 21 | END; 22 | $delete_system$ 23 | LANGUAGE 'plpgsql'; 24 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/016-add-last-upload.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD COLUMN last_upload TIMESTAMP WITH TIME ZONE; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/017-rh-acc-table.sql: -------------------------------------------------------------------------------- 1 | -- rh_account table 2 | CREATE TABLE IF NOT EXISTS rh_account ( 3 | id SERIAL, 4 | name TEXT NOT NULL UNIQUE, CHECK (NOT empty(name)), 5 | PRIMARY KEY (id) 6 | ) TABLESPACE pg_default; 7 | 8 | GRANT SELECT, INSERT, UPDATE, DELETE ON rh_account TO ve_db_user_listener; 9 | GRANT SELECT, DELETE ON rh_account TO ve_db_user_manager; 10 | GRANT SELECT ON rh_account TO ve_db_user_vmaas_sync; 11 | GRANT SELECT ON rh_account TO ve_db_user_evaluator; 12 | 13 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_evaluator; 14 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_listener; 15 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_vmaas_sync; 16 | 17 | -- fill rh_account table 18 | INSERT INTO rh_account(name) SELECT rh_account FROM system_platform UNION SELECT rh_account FROM cve_affected_systems_cache; 19 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/018-rh-acc-fg-keys.sql: -------------------------------------------------------------------------------- 1 | -- system_platform table 2 | ALTER TABLE system_platform ADD COLUMN rh_account_id INT; 3 | UPDATE system_platform sp SET rh_account_id = sub.id FROM (SELECT id, name FROM rh_account) AS sub WHERE sp.rh_account = sub.name; 4 | ALTER TABLE system_platform ALTER rh_account_id SET NOT NULL; 5 | ALTER TABLE system_platform ADD CONSTRAINT rh_account_id FOREIGN KEY (rh_account_id) REFERENCES rh_account(id); 6 | CREATE INDEX ON system_platform(rh_account_id); 7 | 8 | -- cve_affected_systems_cache 9 | ALTER TABLE cve_affected_systems_cache ADD COLUMN rh_account_id INT; 10 | UPDATE cve_affected_systems_cache casc SET rh_account_id = sub.id FROM (SELECT id, name FROM rh_account) AS sub WHERE casc.rh_account = sub.name; 11 | ALTER TABLE cve_affected_systems_cache ALTER rh_account_id SET NOT NULL; 12 | ALTER TABLE cve_affected_systems_cache ADD CONSTRAINT rh_account_id FOREIGN KEY (rh_account_id) REFERENCES rh_account(id); 13 | ALTER TABLE cve_affected_systems_cache ADD CONSTRAINT cve_affected_systems_cache_cve_id_rh_account_id_key UNIQUE (cve_id, rh_account_id); 14 | CREATE INDEX ON cve_affected_systems_cache(rh_account_id); 15 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/020-business-risk.sql: -------------------------------------------------------------------------------- 1 | -- business_risk table 2 | CREATE TABLE IF NOT EXISTS business_risk ( 3 | id INT NOT NULL, 4 | name VARCHAR NOT NULL UNIQUE, 5 | CHECK (NOT empty(name)), 6 | PRIMARY KEY (id) 7 | ) TABLESPACE pg_default; 8 | 9 | INSERT INTO business_risk (id, name) VALUES 10 | (0, 'Not Defined'), (1, 'Low'), (2, 'Medium'), (3, 'High'); 11 | 12 | -- cve_preferences 13 | CREATE TABLE IF NOT EXISTS cve_account_data ( 14 | cve_id INT NOT NULL, 15 | rh_account_id INT NOT NULL, 16 | business_risk_id INT NOT NULL DEFAULT 0, 17 | business_risk_text TEXT, 18 | CONSTRAINT cve_id 19 | FOREIGN KEY (cve_id) 20 | REFERENCES cve_metadata (id), 21 | CONSTRAINT rh_account_id 22 | FOREIGN KEY (rh_account_id) 23 | REFERENCES rh_account (id), 24 | CONSTRAINT business_risk_id 25 | FOREIGN KEY (business_risk_id) 26 | REFERENCES business_risk (id), 27 | UNIQUE (cve_id, rh_account_id) 28 | ) TABLESPACE pg_default; 29 | 30 | GRANT SELECT, INSERT, UPDATE, DELETE ON cve_account_data TO ve_db_user_manager; 31 | 32 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_evaluator; 33 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_listener; 34 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_manager; 35 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_vmaas_sync; 36 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/021-store-more-cve-metadata.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_metadata ADD COLUMN redhat_url TEXT; 2 | ALTER TABLE cve_metadata ADD COLUMN secondary_url TEXT; 3 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/022-cve_status.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_account_data ADD COLUMN status_id INT NOT NULL DEFAULT 0; 2 | ALTER TABLE cve_account_data ADD COLUMN status_text TEXT; 3 | ALTER TABLE cve_account_data ADD CONSTRAINT status_id FOREIGN KEY (status_id) REFERENCES status (id); 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/023-cve_system_status_text.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_vulnerabilities ADD COLUMN status_text TEXT; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/024-vmaas-sync-permissions.sql: -------------------------------------------------------------------------------- 1 | -- vmaas_sync needs to delete from this tables to sync CVEs correctly 2 | GRANT DELETE ON system_vulnerabilities TO ve_db_user_vmaas_sync; 3 | GRANT DELETE ON cve_affected_systems_cache TO ve_db_user_vmaas_sync; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/026-cve_account_data_vmaas_sync.sql: -------------------------------------------------------------------------------- 1 | GRANT DELETE ON cve_account_data TO ve_db_user_vmaas_sync; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/029-manager-inserting-accounts.sql: -------------------------------------------------------------------------------- 1 | GRANT INSERT, UPDATE ON rh_account TO ve_db_user_manager; 2 | GRANT USAGE, SELECT ON rh_account_id_seq TO ve_db_user_manager; 3 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/030-fqdn.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD display_name TEXT; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/031-stale-dates.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD stale_timestamp TIMESTAMP WITH TIME ZONE; 2 | ALTER TABLE system_platform ADD stale_warning_timestamp TIMESTAMP WITH TIME ZONE; 3 | ALTER TABLE system_platform ADD culled_timestamp TIMESTAMP WITH TIME ZONE; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/032-add-metrics-user.sql: -------------------------------------------------------------------------------- 1 | -- Add metrics user 2 | 3 | CREATE USER ve_db_user_metrics; 4 | ALTER USER ve_db_user_metrics WITH PASSWORD 've_db_user_metrics_pwd'; 5 | 6 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_metrics; 7 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_metrics; 8 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/033-db-user-taskomatic.sql: -------------------------------------------------------------------------------- 1 | -- Add VMaaS sync user 2 | 3 | CREATE USER ve_db_user_taskomatic; 4 | ALTER USER ve_db_user_taskomatic WITH PASSWORD 've_db_user_taskomatic_pwd'; 5 | 6 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_taskomatic; 7 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/035-stale-timestamp-index.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX ON system_platform(stale_timestamp); 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/036-taskomatic-write-access.sql: -------------------------------------------------------------------------------- 1 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_taskomatic; 2 | 3 | GRANT SELECT, UPDATE ON system_platform TO ve_db_user_taskomatic; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/037-taskomatic-cve-cache.sql: -------------------------------------------------------------------------------- 1 | -- taskomatic user updates cve_account_data indirectly after a trigger fire 2 | 3 | GRANT SELECT, UPDATE ON cve_account_data TO ve_db_user_taskomatic; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/038-taskomatic-cve-cache-delete.sql: -------------------------------------------------------------------------------- 1 | -- taskomatic user updates cve_account_data indirectly after a trigger fire and sometimes deletes from that table 2 | 3 | GRANT SELECT, UPDATE, DELETE ON cve_account_data TO ve_db_user_taskomatic; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/039-advisor-listener.sql: -------------------------------------------------------------------------------- 1 | CREATE USER ve_db_user_advisor_listener; 2 | ALTER USER ve_db_user_advisor_listener WITH PASSWORD 've_db_user_advisor_listener_pwd'; 3 | 4 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_advisor_listener; 5 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_advisor_listener; 6 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/040-rules-table.sql: -------------------------------------------------------------------------------- 1 | -- insights rule table 2 | CREATE TABLE IF NOT EXISTS insights_rule ( 3 | id SERIAL, 4 | name TEXT NOT NULL UNIQUE, CHECK (NOT empty(name)), 5 | PRIMARY KEY (id) 6 | ) TABLESPACE pg_default; 7 | 8 | GRANT SELECT, INSERT ON insights_rule TO ve_db_user_advisor_listener; 9 | GRANT SELECT ON insights_rule TO ve_db_user_evaluator; 10 | GRANT SELECT ON insights_rule TO ve_db_user_listener; 11 | GRANT SELECT ON insights_rule TO ve_db_user_manager; 12 | GRANT SELECT ON insights_rule TO ve_db_user_vmaas_sync; 13 | GRANT SELECT ON insights_rule TO ve_db_user_metrics; 14 | GRANT SELECT ON insights_rule TO ve_db_user_taskomatic; 15 | 16 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_evaluator; 17 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_listener; 18 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_manager; 19 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_vmaas_sync; 20 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_metrics; 21 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_taskomatic; 22 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_advisor_listener; 23 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/041-advisor-listener-write.sql: -------------------------------------------------------------------------------- 1 | GRANT SELECT, INSERT, UPDATE, DELETE ON rh_account TO ve_db_user_advisor_listener; 2 | GRANT SELECT, INSERT, UPDATE, DELETE ON system_platform TO ve_db_user_advisor_listener; 3 | GRANT SELECT, INSERT, UPDATE, DELETE ON cve_metadata TO ve_db_user_advisor_listener; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/042-system-vulnerabilities.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_vulnerabilities ADD rule_id INT; 2 | 3 | GRANT SELECT, INSERT, UPDATE, DELETE ON system_vulnerabilities TO ve_db_user_advisor_listener; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/043-insights-rule-update.sql: -------------------------------------------------------------------------------- 1 | GRANT UPDATE ON insights_rule TO ve_db_user_advisor_listener; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/044-rule_id-constraint.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_vulnerabilities ADD CONSTRAINT rule_id FOREIGN KEY (rule_id) REFERENCES insights_rule (id); 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/045-cve_account_data-locking.sql: -------------------------------------------------------------------------------- 1 | GRANT SELECT, INSERT, UPDATE, DELETE ON cve_account_data TO ve_db_user_advisor_listener; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/048-advisor_evaluated-column.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD advisor_evaluated TIMESTAMP WITH TIME ZONE; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/050-cve_rule_mapping.sql: -------------------------------------------------------------------------------- 1 | -- cve_rule_mapping 2 | CREATE TABLE IF NOT EXISTS cve_rule_mapping ( 3 | cve_id INT NOT NULL, 4 | rule_id INT NOT NULL, 5 | CONSTRAINT cve_id 6 | FOREIGN KEY (cve_id) 7 | REFERENCES cve_metadata (id), 8 | CONSTRAINT rule_id 9 | FOREIGN KEY (rule_id) 10 | REFERENCES insights_rule (id) 11 | ) TABLESPACE pg_default; 12 | 13 | GRANT SELECT, INSERT, UPDATE, DELETE ON cve_rule_mapping TO ve_db_user_advisor_listener; 14 | GRANT SELECT ON cve_rule_mapping TO ve_db_user_evaluator; 15 | GRANT SELECT ON cve_rule_mapping TO ve_db_user_listener; 16 | GRANT SELECT ON cve_rule_mapping TO ve_db_user_manager; 17 | GRANT SELECT ON cve_rule_mapping TO ve_db_user_vmaas_sync; 18 | GRANT SELECT ON cve_rule_mapping TO ve_db_user_metrics; 19 | GRANT SELECT ON cve_rule_mapping TO ve_db_user_taskomatic; 20 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/051-inisghts_rule-additional-data.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE insights_rule ADD description_text TEXT; 2 | ALTER TABLE insights_rule ADD summary_text TEXT; 3 | ALTER TABLE insights_rule ADD generic_text TEXT; 4 | ALTER TABLE insights_rule ADD reboot_required BOOLEAN; 5 | ALTER TABLE insights_rule ADD playbook_count INT; 6 | ALTER TABLE insights_rule ADD change_risk INT; 7 | ALTER TABLE insights_rule ADD kbase_node_id INT; 8 | 9 | GRANT UPDATE (description_text, summary_text, generic_text, reboot_required, playbook_count, change_risk, kbase_node_id) ON insights_rule TO ve_db_user_taskomatic; 10 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/052-insights_rule-active.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE insights_rule ADD active BOOLEAN NOT NULL DEFAULT FALSE; 2 | 3 | GRANT UPDATE (active) ON insights_rule TO ve_db_user_taskomatic; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/053-cve_rule_mapping-unique.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_rule_mapping ADD CONSTRAINT cve_rule_mapping_cve_id_rule_id_key UNIQUE (cve_id, rule_id); 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/055-rule_hit_details.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_vulnerabilities ADD rule_hit_details TEXT; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/057-rules_more_info.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE insights_rule ADD reason_text TEXT; 2 | ALTER TABLE insights_rule ADD resolution_text TEXT; 3 | ALTER TABLE insights_rule ADD more_info_text TEXT; 4 | 5 | GRANT UPDATE (reason_text, resolution_text, more_info_text) ON insights_rule TO ve_db_user_taskomatic; 6 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/058-system_platform-indexes.sql: -------------------------------------------------------------------------------- 1 | DROP INDEX IF EXISTS system_platform_stale_timestamp_idx; 2 | 3 | CREATE INDEX ON system_platform(stale); 4 | 5 | CREATE INDEX ON system_platform(stale_warning_timestamp); 6 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/060-taskomatic-system_platform-delete.sql: -------------------------------------------------------------------------------- 1 | GRANT DELETE ON system_platform TO ve_db_user_taskomatic; 2 | GRANT DELETE ON system_vulnerabilities TO ve_db_user_taskomatic; 3 | GRANT DELETE ON system_repo TO ve_db_user_taskomatic; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/061-trigger-once.sql: -------------------------------------------------------------------------------- 1 | DROP TRIGGER system_platform_opt_out_cache ON system_platform; 2 | DROP TRIGGER system_platform_stale_cache ON system_platform; 3 | CREATE TRIGGER system_platform_cache 4 | AFTER UPDATE OF opt_out, stale ON system_platform 5 | FOR EACH ROW EXECUTE PROCEDURE opt_out_system_update_cache(); 6 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/062-mitigation_reason.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_vulnerabilities ADD COLUMN mitigation_reason TEXT; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/063-cyndi_integration.sql: -------------------------------------------------------------------------------- 1 | CREATE SCHEMA inventory; 2 | 3 | CREATE USER cyndi; 4 | 5 | CREATE ROLE cyndi_admin; 6 | GRANT ALL PRIVILEGES ON SCHEMA inventory TO cyndi_admin; 7 | 8 | CREATE ROLE cyndi_reader; 9 | GRANT USAGE ON SCHEMA inventory TO cyndi_reader; 10 | 11 | GRANT cyndi_admin TO cyndi; 12 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/064-set_cyndi_pwd.sql: -------------------------------------------------------------------------------- 1 | ALTER USER cyndi WITH PASSWORD 'cyndi_db_admin_pwd'; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/066-rules_git_sync.sql: -------------------------------------------------------------------------------- 1 | GRANT INSERT, UPDATE ON cve_metadata TO ve_db_user_taskomatic; 2 | REVOKE UPDATE (description_text, summary_text, generic_text, reason_text, resolution_text, more_info_text, reboot_required, playbook_count, change_risk, kbase_node_id, active) ON insights_rule FROM ve_db_user_taskomatic; 3 | GRANT INSERT, UPDATE, DELETE ON insights_rule TO ve_db_user_taskomatic; 4 | GRANT INSERT ON cve_rule_mapping TO ve_db_user_taskomatic; 5 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/069-add_advisories_list.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_metadata ADD COLUMN IF NOT EXISTS advisories_list JSONB; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/070-playbooks.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS playbook ( 2 | id SERIAL, 3 | rule_id INT, 4 | play TEXT, 5 | version TEXT, 6 | description TEXT, 7 | PRIMARY KEY (id), 8 | CONSTRAINT rule_id 9 | FOREIGN KEY (rule_id) 10 | REFERENCES insights_rule (id) 11 | ) TABLESPACE pg_default; 12 | 13 | GRANT INSERT, UPDATE, DELETE ON playbook TO ve_db_user_taskomatic; 14 | 15 | GRANT SELECT ON playbook TO ve_db_user_evaluator; 16 | GRANT SELECT ON playbook TO ve_db_user_listener; 17 | GRANT SELECT ON playbook TO ve_db_user_manager; 18 | GRANT SELECT ON playbook TO ve_db_user_vmaas_sync; 19 | GRANT SELECT ON playbook TO ve_db_user_metrics; 20 | GRANT SELECT ON playbook TO ve_db_user_taskomatic; 21 | GRANT SELECT ON playbook TO ve_db_user_advisor_listener; 22 | 23 | GRANT SELECT, USAGE ON SEQUENCE playbook_id_seq TO ve_db_user_advisor_listener; 24 | GRANT SELECT, USAGE ON SEQUENCE playbook_id_seq TO ve_db_user_evaluator; 25 | GRANT SELECT, USAGE ON SEQUENCE playbook_id_seq TO ve_db_user_listener; 26 | GRANT SELECT, USAGE ON SEQUENCE playbook_id_seq TO ve_db_user_manager; 27 | GRANT SELECT, USAGE ON SEQUENCE playbook_id_seq TO ve_db_user_metrics; 28 | GRANT SELECT, USAGE ON SEQUENCE playbook_id_seq TO ve_db_user_taskomatic; 29 | GRANT SELECT, USAGE ON SEQUENCE playbook_id_seq TO ve_db_user_vmaas_sync; 30 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/071-manager_cyndi_grant.sql: -------------------------------------------------------------------------------- 1 | GRANT cyndi_reader TO ve_db_user_manager; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/073-taskomatic_cyndi_grant.sql: -------------------------------------------------------------------------------- 1 | GRANT cyndi_reader TO ve_db_user_taskomatic; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/074-manager-admin-delete-sys.sql: -------------------------------------------------------------------------------- 1 | GRANT UPDATE (cve_count_cache, opt_out, stale, when_deleted), DELETE ON system_platform TO ve_db_user_manager; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/075-rules_impact_table.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE insights_rule ADD COLUMN IF NOT EXISTS rule_impact INT; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/079-drop-account-cache.sql: -------------------------------------------------------------------------------- 1 | DROP TRIGGER system_platform_cache ON system_platform; 2 | DROP FUNCTION opt_out_system_update_cache; 3 | DROP FUNCTION refresh_all_cached_counts; 4 | DROP FUNCTION refresh_account_cached_counts; 5 | DROP FUNCTION refresh_cve_cached_counts; 6 | DROP FUNCTION refresh_cve_account_cached_counts; 7 | DROP FUNCTION refresh_system_cached_counts; 8 | 9 | DELETE FROM cve_account_data WHERE business_risk_id = 0 AND business_risk_text IS NULL AND status_id = 0 AND status_text IS NULL; 10 | ALTER TABLE cve_account_data DROP COLUMN systems_affected; 11 | ALTER TABLE cve_account_data DROP COLUMN systems_status_divergent; 12 | 13 | ALTER TABLE system_platform DROP COLUMN cve_count_cache; 14 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/080-rule-publish-date.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE insights_rule ADD publish_date TIMESTAMP; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/081-system_vulnerabilities_cve_index.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX ON system_vulnerabilities_inactive(cve_id); 2 | 3 | CREATE OR REPLACE FUNCTION create_cve_index(parts INTEGER) 4 | RETURNS VOID AS 5 | $$ 6 | DECLARE 7 | I INTEGER; 8 | BEGIN 9 | I := 0; 10 | WHILE I < parts 11 | LOOP 12 | EXECUTE 'CREATE INDEX ON system_vulnerabilities_active_' || text(I) || '(cve_id);'; 13 | I = I + 1; 14 | END LOOP; 15 | END; 16 | $$ LANGUAGE plpgsql; 17 | 18 | SELECT create_cve_index(256); 19 | DROP FUNCTION create_cve_index; 20 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/082-add-exploits-field.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_metadata ADD COLUMN IF NOT EXISTS exploits BOOLEAN NOT NULL DEFAULT FALSE; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/083-drop-generic.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE insights_rule DROP generic_text; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/084-rule-only-field.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE insights_rule ADD COLUMN rule_only BOOLEAN NOT NULL DEFAULT FALSE; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/085-content_version.sql: -------------------------------------------------------------------------------- 1 | -- content version table 2 | 3 | CREATE TABLE IF NOT EXISTS content_version ( 4 | insights_content_vulnerability TEXT, 5 | insights_content_vulnerability_repo TEXT, 6 | insights_playbooks TEXT, 7 | insights_playbooks_repo TEXT 8 | ) TABLESPACE pg_default; 9 | 10 | INSERT INTO content_version VALUES (NULL, NULL, NULL, NULL); 11 | 12 | GRANT UPDATE ON content_version TO ve_db_user_taskomatic; 13 | 14 | GRANT SELECT ON content_version TO ve_db_user_evaluator; 15 | GRANT SELECT ON content_version TO ve_db_user_listener; 16 | GRANT SELECT ON content_version TO ve_db_user_manager; 17 | GRANT SELECT ON content_version TO ve_db_user_vmaas_sync; 18 | GRANT SELECT ON content_version TO ve_db_user_metrics; 19 | GRANT SELECT ON content_version TO ve_db_user_taskomatic; 20 | GRANT SELECT ON content_version TO ve_db_user_advisor_listener; 21 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/086-cve_name.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_metadata ADD celebrity_name TEXT; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/087-bigserial-migration.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rh_account ALTER COLUMN id TYPE BIGINT; 2 | ALTER SEQUENCE rh_account_id_seq AS BIGINT MAXVALUE 9223372036854775807; 3 | SELECT setval('rh_account_id_seq', (SELECT COALESCE(MAX(id), 0) + 1 FROM rh_account)); 4 | 5 | ALTER TABLE system_platform ALTER COLUMN id TYPE BIGINT; 6 | ALTER SEQUENCE system_platform_id_seq AS BIGINT MAXVALUE 9223372036854775807; 7 | SELECT setval('system_platform_id_seq', (SELECT COALESCE(MAX(id), 0) + 1 FROM system_platform)); 8 | 9 | ALTER TABLE cve_metadata ALTER COLUMN id TYPE BIGINT; 10 | ALTER SEQUENCE cve_metadata_id_seq AS BIGINT MAXVALUE 9223372036854775807; 11 | SELECT setval('cve_metadata_id_seq', (SELECT COALESCE(MAX(id), 0) + 1 FROM cve_metadata)); 12 | 13 | ALTER TABLE insights_rule ALTER COLUMN id TYPE BIGINT; 14 | ALTER SEQUENCE insights_rule_id_seq AS BIGINT MAXVALUE 9223372036854775807; 15 | SELECT setval('insights_rule_id_seq', (SELECT COALESCE(MAX(id), 0) + 1 FROM insights_rule)); 16 | 17 | ALTER TABLE playbook ALTER COLUMN id TYPE BIGINT; 18 | ALTER SEQUENCE playbook_id_seq AS BIGINT MAXVALUE 9223372036854775807; 19 | SELECT setval('playbook_id_seq', (SELECT COALESCE(MAX(id), 0) + 1 FROM playbook)); 20 | 21 | ALTER TABLE system_vulnerabilities ALTER COLUMN id TYPE BIGINT; 22 | ALTER SEQUENCE system_vulnerabilities_id_seq AS BIGINT MAXVALUE 9223372036854775807; 23 | SELECT setval('system_vulnerabilities_id_seq', (SELECT COALESCE(MAX(id), 0) + 1 FROM system_vulnerabilities)); 24 | 25 | ALTER TABLE repo ALTER COLUMN id TYPE BIGINT; 26 | ALTER SEQUENCE repo_id_seq AS BIGINT MAXVALUE 9223372036854775807; 27 | SELECT setval('repo_id_seq', (SELECT COALESCE(MAX(id), 0) + 1 FROM repo)); 28 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/088-cyndi-fedramp-fix.sql: -------------------------------------------------------------------------------- 1 | ALTER USER cyndi WITH PASSWORD 'cyndi_db_admin_pwd'; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/089-metrics-migration.sql: -------------------------------------------------------------------------------- 1 | -- usage statistics table 2 | CREATE TABLE IF NOT EXISTS usage_statistics ( 3 | id SERIAL, 4 | recorded DATE, 5 | name TEXT NOT NULL, CHECK (NOT empty(name)), 6 | cve_status_usage INT, 7 | system_cve_status_usage INT, 8 | cve_business_risk_usage INT, 9 | PRIMARY KEY (id) 10 | ) TABLESPACE pg_default; 11 | 12 | GRANT SELECT, INSERT, UPDATE, DELETE ON usage_statistics TO ve_db_user_metrics; 13 | GRANT SELECT, INSERT, UPDATE, DELETE ON usage_statistics TO ve_db_user_manager; 14 | 15 | GRANT SELECT ON TABLE usage_statistics TO ve_db_user_advisor_listener; 16 | GRANT SELECT ON TABLE usage_statistics TO ve_db_user_evaluator; 17 | GRANT SELECT ON TABLE usage_statistics TO ve_db_user_listener; 18 | GRANT SELECT ON TABLE usage_statistics TO ve_db_user_taskomatic; 19 | GRANT SELECT ON TABLE usage_statistics TO ve_db_user_vmaas_sync; 20 | 21 | GRANT SELECT, USAGE ON SEQUENCE usage_statistics_id_seq TO ve_db_user_metrics; 22 | GRANT SELECT, USAGE ON SEQUENCE usage_statistics_id_seq TO ve_db_user_manager; 23 | GRANT SELECT, USAGE ON SEQUENCE usage_statistics_id_seq TO ve_db_user_advisor_listener; 24 | GRANT SELECT, USAGE ON SEQUENCE usage_statistics_id_seq TO ve_db_user_evaluator; 25 | GRANT SELECT, USAGE ON SEQUENCE usage_statistics_id_seq TO ve_db_user_listener; 26 | GRANT SELECT, USAGE ON SEQUENCE usage_statistics_id_seq TO ve_db_user_taskomatic; 27 | GRANT SELECT, USAGE ON SEQUENCE usage_statistics_id_seq TO ve_db_user_vmaas_sync; 28 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/090-vmaas-sync-rule-mapping.sql: -------------------------------------------------------------------------------- 1 | GRANT DELETE ON cve_rule_mapping TO ve_db_user_vmaas_sync; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/091-add-rule-checksum.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD COLUMN advisor_checksum TEXT; 2 | ALTER TABLE system_platform ADD COLUMN advisor_unchanged_since TIMESTAMP WITH TIME ZONE NOT NULL default CURRENT_TIMESTAMP; 3 | ALTER TABLE system_platform ALTER COLUMN advisor_unchanged_since DROP DEFAULT; 4 | 5 | create or replace function check_unchanged() 6 | RETURNS TRIGGER AS 7 | $check_unchanged$ 8 | BEGIN 9 | IF (TG_OP = 'INSERT') THEN 10 | IF (NEW.unchanged_since IS NULL) THEN 11 | NEW.unchanged_since := CURRENT_TIMESTAMP; 12 | END IF; 13 | IF (NEW.advisor_unchanged_since IS NULL) THEN 14 | NEW.advisor_unchanged_since := CURRENT_TIMESTAMP; 15 | END IF; 16 | END IF; 17 | IF (TG_OP = 'UPDATE') THEN 18 | IF (NEW.json_checksum <> OLD.json_checksum) THEN 19 | NEW.unchanged_since := CURRENT_TIMESTAMP; 20 | END IF; 21 | IF (NEW.advisor_checksum <> OLD.advisor_checksum) THEN 22 | NEW.advisor_unchanged_since := CURRENT_TIMESTAMP; 23 | END IF; 24 | END IF; 25 | RETURN NEW; 26 | END; 27 | $check_unchanged$ 28 | LANGUAGE 'plpgsql'; 29 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/092-add-system-cache.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD COLUMN cve_count_cache INT NOT NULL DEFAULT 0; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/093-cve-account-cache.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rh_account ADD COLUMN cve_cache_from TIMESTAMP WITH TIME ZONE; 2 | 3 | GRANT SELECT, UPDATE ON rh_account TO ve_db_user_taskomatic; 4 | 5 | -- cve_account_cache 6 | CREATE TABLE IF NOT EXISTS cve_account_cache ( 7 | rh_account_id INT NOT NULL, 8 | cve_id INT NOT NULL, 9 | systems_affected INT NOT NULL, 10 | systems_status_divergent INT NOT NULL, 11 | CONSTRAINT rh_account_id 12 | FOREIGN KEY (rh_account_id) 13 | REFERENCES rh_account (id), 14 | CONSTRAINT cve_id 15 | FOREIGN KEY (cve_id) 16 | REFERENCES cve_metadata (id), 17 | UNIQUE (rh_account_id, cve_id) 18 | ) TABLESPACE pg_default; 19 | 20 | GRANT SELECT, INSERT, UPDATE, DELETE ON cve_account_cache TO ve_db_user_taskomatic; 21 | 22 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_evaluator; 23 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_evaluator; 24 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_listener; 25 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_listener; 26 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_manager; 27 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_manager; 28 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_vmaas_sync; 29 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_vmaas_sync; 30 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_metrics; 31 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_metrics; 32 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_taskomatic; 33 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_taskomatic; 34 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_advisor_listener; 35 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_advisor_listener; 36 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/094-metrics-disable.sql: -------------------------------------------------------------------------------- 1 | REVOKE ALL PRIVILEGES ON TABLE usage_statistics FROM ve_db_user_manager; 2 | 3 | REVOKE ALL PRIVILEGES ON TABLE usage_statistics FROM ve_db_user_advisor_listener; 4 | REVOKE ALL PRIVILEGES ON TABLE usage_statistics FROM ve_db_user_evaluator; 5 | REVOKE ALL PRIVILEGES ON TABLE usage_statistics FROM ve_db_user_listener; 6 | REVOKE ALL PRIVILEGES ON TABLE usage_statistics FROM ve_db_user_taskomatic; 7 | REVOKE ALL PRIVILEGES ON TABLE usage_statistics FROM ve_db_user_vmaas_sync; 8 | 9 | REVOKE ALL PRIVILEGES ON SEQUENCE usage_statistics_id_seq FROM ve_db_user_manager; 10 | REVOKE ALL PRIVILEGES ON SEQUENCE usage_statistics_id_seq FROM ve_db_user_advisor_listener; 11 | REVOKE ALL PRIVILEGES ON SEQUENCE usage_statistics_id_seq FROM ve_db_user_evaluator; 12 | REVOKE ALL PRIVILEGES ON SEQUENCE usage_statistics_id_seq FROM ve_db_user_listener; 13 | REVOKE ALL PRIVILEGES ON SEQUENCE usage_statistics_id_seq FROM ve_db_user_taskomatic; 14 | REVOKE ALL PRIVILEGES ON SEQUENCE usage_statistics_id_seq FROM ve_db_user_vmaas_sync; 15 | 16 | REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM ve_db_user_metrics; 17 | REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM ve_db_user_metrics; 18 | 19 | DROP USER ve_db_user_metrics; 20 | 21 | DROP TABLE IF EXISTS usage_statistics; 22 | DROP SEQUENCE IF EXISTS usage_statistics_id_seq; 23 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/095-add_host_type.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD host_type TEXT; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/096-grant-cyndi-reader-listeners.sql: -------------------------------------------------------------------------------- 1 | GRANT cyndi_reader TO ve_db_user_advisor_listener; 2 | GRANT cyndi_reader TO ve_db_user_listener; 3 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/097-last-status-change.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rh_account ADD COLUMN last_status_change TIMESTAMP WITH TIME ZONE; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/098-advisory-remediation-flags.sql: -------------------------------------------------------------------------------- 1 | -- remediation_type 2 | CREATE TABLE IF NOT EXISTS remediation_type ( 3 | id INT NOT NULL, 4 | name TEXT NOT NULL UNIQUE, CHECK (NOT empty(name)), 5 | PRIMARY KEY (id) 6 | )TABLESPACE pg_default; 7 | 8 | INSERT INTO remediation_type (id, name) VALUES 9 | (0, 'None'), (1, 'Manual'), (2, 'Playbook'); 10 | 11 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_evaluator; 12 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_evaluator; 13 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_listener; 14 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_listener; 15 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_manager; 16 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_manager; 17 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_vmaas_sync; 18 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_vmaas_sync; 19 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_taskomatic; 20 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_taskomatic; 21 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_advisor_listener; 22 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_advisor_listener; 23 | 24 | ALTER TABLE system_vulnerabilities ADD COLUMN advisory_available BOOLEAN; 25 | ALTER TABLE system_vulnerabilities ADD COLUMN remediation_type_id INT; 26 | ALTER TABLE system_vulnerabilities ADD CONSTRAINT remediation_type_id FOREIGN KEY (remediation_type_id) REFERENCES remediation_type (id); 27 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/099-vmaas-sync-delete-cache.sql: -------------------------------------------------------------------------------- 1 | GRANT DELETE ON cve_account_cache TO ve_db_user_vmaas_sync; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/100-account-cve-cache-keepalive.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rh_account ADD COLUMN cve_cache_keepalive TIMESTAMP WITH TIME ZONE; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/101-add-business-risk-critical.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO business_risk (id, name) VALUES (4, 'Critical'); 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/102-add-announcement.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS announcement ( 2 | id SERIAL, 3 | message TEXT NOT NULL, 4 | last_updated TIMESTAMP WITH TIME ZONE NOT NULL 5 | ); 6 | 7 | GRANT SELECT, INSERT, DELETE ON announcement TO ve_db_user_manager; 8 | 9 | GRANT SELECT ON announcement TO ve_db_user_evaluator; 10 | GRANT SELECT ON announcement TO ve_db_user_listener; 11 | GRANT SELECT ON announcement TO ve_db_user_manager; 12 | GRANT SELECT ON announcement TO ve_db_user_vmaas_sync; 13 | GRANT SELECT ON announcement TO ve_db_user_taskomatic; 14 | GRANT SELECT ON announcement TO ve_db_user_advisor_listener; 15 | 16 | GRANT SELECT, USAGE ON SEQUENCE announcement_id_seq TO ve_db_user_advisor_listener; 17 | GRANT SELECT, USAGE ON SEQUENCE announcement_id_seq TO ve_db_user_evaluator; 18 | GRANT SELECT, USAGE ON SEQUENCE announcement_id_seq TO ve_db_user_listener; 19 | GRANT SELECT, USAGE ON SEQUENCE announcement_id_seq TO ve_db_user_manager; 20 | GRANT SELECT, USAGE ON SEQUENCE announcement_id_seq TO ve_db_user_taskomatic; 21 | GRANT SELECT, USAGE ON SEQUENCE announcement_id_seq TO ve_db_user_vmaas_sync; 22 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/103-rules-account-cache.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS rule_account_cache ( 2 | rh_account_id INT NOT NULL, 3 | rule_id INT NOT NULL, 4 | systems_affected INT NOT NULL, 5 | CONSTRAINT rh_account_id 6 | FOREIGN KEY (rh_account_id) 7 | REFERENCES rh_account (id), 8 | CONSTRAINT rule_id 9 | FOREIGN KEY (rule_id) 10 | REFERENCES insights_rule (id), 11 | UNIQUE (rh_account_id, rule_id) 12 | ) TABLESPACE pg_default; 13 | 14 | GRANT SELECT, INSERT, UPDATE, DELETE ON rule_account_cache TO ve_db_user_taskomatic; 15 | 16 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_evaluator; 17 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_evaluator; 18 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_listener; 19 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_listener; 20 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_manager; 21 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_manager; 22 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_vmaas_sync; 23 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_vmaas_sync; 24 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_taskomatic; 25 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_taskomatic; 26 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_advisor_listener; 27 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_advisor_listener; 28 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/104-notificator.sql: -------------------------------------------------------------------------------- 1 | -- Notifications integration with vulnerability 2 | CREATE USER ve_db_user_notificator; 3 | ALTER USER ve_db_user_notificator WITH PASSWORD 've_db_user_notificator_pwd'; 4 | 5 | CREATE TYPE notification AS enum('new-cve-cvss', 'new-cve-severity', 'new-cve-security-rule', 'any-cve-known-exploit'); 6 | 7 | CREATE TABLE IF NOT EXISTS notified_accounts ( 8 | rh_account_id INT NOT NULL, 9 | cve_id INT NOT NULL, 10 | notif_type notification NOT NULL, 11 | UNIQUE (rh_account_id, cve_id, notif_type), 12 | CONSTRAINT rh_account_id 13 | FOREIGN KEY (rh_account_id) 14 | REFERENCES rh_account (id), 15 | CONSTRAINT cve_id 16 | FOREIGN KEY (cve_id) 17 | REFERENCES cve_metadata (id) 18 | ) TABLESPACE pg_default; 19 | 20 | GRANT SELECT, INSERT, UPDATE, DELETE ON notified_accounts TO ve_db_user_notificator; 21 | 22 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_evaluator; 23 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_evaluator; 24 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_listener; 25 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_listener; 26 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_manager; 27 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_manager; 28 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_vmaas_sync; 29 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_vmaas_sync; 30 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_taskomatic; 31 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_taskomatic; 32 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_advisor_listener; 33 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_advisor_listener; 34 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_notificator; 35 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_notificator; 36 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/105-add-org-id.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rh_account ADD COLUMN org_id TEXT UNIQUE CONSTRAINT rh_account_org_id_check CHECK (NOT empty(org_id)); 2 | ALTER TABLE rh_account RENAME COLUMN name TO account_number; 3 | 4 | ALTER TABLE rh_account RENAME CONSTRAINT rh_account_name_key TO rh_account_account_number_key; 5 | ALTER TABLE rh_account RENAME CONSTRAINT rh_account_name_check TO rh_account_account_number_check; 6 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/106-delete-notif-taskomatic.sql: -------------------------------------------------------------------------------- 1 | GRANT SELECT, INSERT, UPDATE, DELETE ON notified_accounts TO ve_db_user_taskomatic; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/107-notif-accounts-index.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX ON notified_accounts(rh_account_id); 2 | CREATE INDEX ON notified_accounts(cve_id); 3 | CREATE INDEX ON notified_accounts(notif_type); 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/108-system_vulnerabilities_rule_id_index.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS sv_rule_id ON system_vulnerabilities_active (rule_id); 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/109-notificator-manager.sql: -------------------------------------------------------------------------------- 1 | GRANT SELECT, INSERT, UPDATE, DELETE ON notified_accounts TO ve_db_user_manager; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/110-account_number-null.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rh_account ALTER COLUMN account_number DROP NOT NULL; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/111-exploit_data.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_metadata ADD exploit_data JSONB; 2 | 3 | ALTER TABLE cve_metadata DROP exploits; 4 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/112-add-insights_needs_reboot.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE playbook ADD COLUMN insights_needs_reboot BOOLEAN; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/113-rule_autoplaybook.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE insights_rule ADD COLUMN generate_autoplaybook BOOLEAN DEFAULT TRUE; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/114-add_advisories_column.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_vulnerabilities ADD COLUMN advisories TEXT; 2 | CREATE INDEX IF NOT EXISTS advisories_idx ON system_vulnerabilities_active (advisories); 3 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/115-truncate_inactive_partition.sql: -------------------------------------------------------------------------------- 1 | TRUNCATE TABLE system_vulnerabilities_inactive; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/116-system_platform_rules.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE system_platform ADD COLUMN rule_results JSONB; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/117-system_platform_indexes.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX ON system_platform(opt_out); 2 | CREATE INDEX ON system_platform(host_type); 3 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/118-system_vulnerabilities_indexes.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS account_cve ON system_vulnerabilities_active(rh_account_id, cve_id); 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/119-system_vulnerabilities_state.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE VULNERABILITY_STATE AS ENUM ('NOT_VULNERABLE', 'VULNERABLE_BY_PACKAGE', 'VULNERABLE_BY_RULE', 'VULNERABLE_BY_RULE_AND_PACKAGE', 'VULNERABLE_BY_PACKAGE_NOT_RULE'); 2 | 3 | ALTER TABLE system_vulnerabilities ADD COLUMN state VULNERABILITY_STATE; 4 | 5 | CREATE INDEX ON system_vulnerabilities_active(state); 6 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/120-evaluator_permissions.sql: -------------------------------------------------------------------------------- 1 | GRANT SELECT, INSERT, UPDATE ON insights_rule TO ve_db_user_evaluator; 2 | GRANT SELECT, INSERT, UPDATE, DELETE ON cve_rule_mapping TO ve_db_user_evaluator; 3 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/121-resolution_type.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE playbook ADD COLUMN resolution_type TEXT; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/123-unfixed_feature_flag.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rh_account ADD COLUMN cves_without_errata BOOLEAN NOT NULL DEFAULT TRUE; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/124-advisory_available_cache.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_account_cache ADD COLUMN advisory_available BOOLEAN; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/125-delete_system_vuln_package.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE FUNCTION delete_system(inventory_id_in UUID) 2 | RETURNS TABLE (deleted_inventory_id UUID) AS 3 | $delete_system$ 4 | DECLARE 5 | system_id_in INT; 6 | rh_account_id_in INT; 7 | BEGIN 8 | -- opt out to refresh cache and then delete 9 | SELECT id, rh_account_id FROM system_platform WHERE inventory_id = inventory_id_in INTO system_id_in, rh_account_id_in FOR UPDATE; 10 | UPDATE system_platform SET opt_out = true WHERE id = system_id_in; 11 | DELETE FROM system_vulnerabilities WHERE system_id = system_id_in AND rh_account_id = rh_account_id_in; 12 | DELETE FROM system_vulnerable_package WHERE system_id = system_id_in AND rh_account_id = rh_account_id_in; 13 | DELETE FROM system_repo WHERE system_id = system_id_in; 14 | RETURN QUERY DELETE FROM system_platform WHERE id = system_id_in RETURNING inventory_id; 15 | END; 16 | $delete_system$ 17 | LANGUAGE 'plpgsql'; 18 | 19 | GRANT SELECT, DELETE ON system_vulnerable_package TO ve_db_user_taskomatic; 20 | GRANT SELECT, DELETE ON system_vulnerable_package TO ve_db_user_manager; 21 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/126-unpatched_cache.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_account_cache ADD COLUMN systems_affected_unpatched INT; 2 | ALTER TABLE cve_account_cache ADD COLUMN systems_status_divergent_unpatched INT; 3 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/127-inventory_groups_cache.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE rh_account ADD COLUMN cve_cache_groups JSONB; 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/129-migrate_system_cve_data.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO system_cve_data (system_id, cve_id, status_id, status_text) 2 | SELECT system_id, cve_id, status_id, status_text 3 | FROM system_vulnerabilities_active 4 | WHERE status_id != 0 OR status_text IS NOT NULL; 5 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/130-edge_cache.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_account_cache ADD COLUMN systems_affected_rpmdnf INT; 2 | ALTER TABLE cve_account_cache ADD COLUMN systems_affected_edge INT; 3 | ALTER TABLE cve_account_cache ADD COLUMN systems_affected_unpatched_rpmdnf INT; 4 | ALTER TABLE cve_account_cache ADD COLUMN systems_affected_unpatched_edge INT; 5 | 6 | UPDATE cve_account_cache SET systems_affected_rpmdnf = 0, systems_affected_edge = 0, systems_affected_unpatched_rpmdnf = 0, systems_affected_unpatched_edge = 0; 7 | 8 | ALTER TABLE cve_account_cache ALTER COLUMN systems_affected_rpmdnf SET NOT NULL; 9 | ALTER TABLE cve_account_cache ALTER COLUMN systems_affected_edge SET NOT NULL; 10 | ALTER TABLE cve_account_cache ALTER COLUMN systems_affected_unpatched_rpmdnf SET NOT NULL; 11 | ALTER TABLE cve_account_cache ALTER COLUMN systems_affected_unpatched_edge SET NOT NULL; 12 | 13 | UPDATE rh_account SET cve_cache_from = NULL, cve_cache_keepalive = NULL; 14 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/131-drop-systems_affected.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_account_cache DROP COLUMN systems_affected; 2 | ALTER TABLE cve_account_cache DROP COLUMN systems_affected_unpatched; 3 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/132-add-vulnerable_package_cve-index.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX ON vulnerable_package_cve(cve_id); 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/133-vulnerable_package-module.sql: -------------------------------------------------------------------------------- 1 | -- module table 2 | CREATE TABLE IF NOT EXISTS module ( 3 | id BIGSERIAL, 4 | name TEXT NOT NULL, CHECK (NOT empty(name)), 5 | stream TEXT NOT NULL, CHECK (NOT empty(stream)), 6 | UNIQUE (name, stream), 7 | PRIMARY KEY (id) 8 | ) TABLESPACE pg_default; 9 | 10 | GRANT SELECT, INSERT, UPDATE, DELETE ON module TO ve_db_user_evaluator; 11 | 12 | -- user for evaluator component 13 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_evaluator; 14 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_evaluator; 15 | 16 | -- user for listener component 17 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_listener; 18 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_listener; 19 | 20 | -- user for UI manager component 21 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_manager; 22 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_manager; 23 | GRANT INSERT, DELETE ON announcement TO ve_db_user_manager; 24 | 25 | -- user for VMaaS sync component 26 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_vmaas_sync; 27 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_vmaas_sync; 28 | 29 | -- user for taskomatic service 30 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_taskomatic; 31 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_taskomatic; 32 | 33 | -- user for advisor listener 34 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_advisor_listener; 35 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_advisor_listener; 36 | 37 | -- user for notificator 38 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_notificator; 39 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_notificator; 40 | 41 | ALTER TABLE vulnerable_package ADD COLUMN module_id BIGINT; 42 | ALTER TABLE vulnerable_package DROP CONSTRAINT vulnerable_package_package_name_id_cpe_id_key; 43 | ALTER TABLE vulnerable_package ADD CONSTRAINT module_id FOREIGN KEY (module_id) REFERENCES module (id); 44 | 45 | CREATE UNIQUE INDEX ON vulnerable_package(package_name_id, cpe_id) WHERE module_id IS NULL; 46 | CREATE UNIQUE INDEX ON vulnerable_package(package_name_id, cpe_id, module_id) WHERE module_id IS NOT NULL; 47 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/134-cve_account_data_idx.sql: -------------------------------------------------------------------------------- 1 | CREATE INDEX IF NOT EXISTS cve_account_data_rh_account_id_idx ON cve_account_data (rh_account_id); 2 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/135-recalc-events.sql: -------------------------------------------------------------------------------- 1 | -- recalc_event table 2 | CREATE TABLE IF NOT EXISTS recalc_event ( 3 | id BIGSERIAL, 4 | created TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP, 5 | changed_packages JSONB NOT NULL, 6 | PRIMARY KEY (id) 7 | ) TABLESPACE pg_default; 8 | 9 | GRANT SELECT, INSERT, UPDATE, DELETE ON recalc_event TO ve_db_user_vmaas_sync; 10 | 11 | -- user for evaluator component 12 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_evaluator; 13 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_evaluator; 14 | 15 | -- user for listener component 16 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_listener; 17 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_listener; 18 | 19 | -- user for UI manager component 20 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_manager; 21 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_manager; 22 | GRANT INSERT, DELETE ON announcement TO ve_db_user_manager; 23 | 24 | -- user for VMaaS sync component 25 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_vmaas_sync; 26 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_vmaas_sync; 27 | 28 | -- user for taskomatic service 29 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_taskomatic; 30 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_taskomatic; 31 | 32 | -- user for advisor listener 33 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_advisor_listener; 34 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_advisor_listener; 35 | 36 | -- user for notificator 37 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_notificator; 38 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_notificator; 39 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/136-primary-keys.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE cve_rule_mapping ADD PRIMARY KEY (cve_id, rule_id), 2 | DROP CONSTRAINT cve_rule_mapping_cve_id_rule_id_key; 3 | 4 | ALTER TABLE cve_account_data ADD PRIMARY KEY (cve_id, rh_account_id), 5 | DROP CONSTRAINT cve_account_data_cve_id_rh_account_id_key; 6 | 7 | ALTER TABLE cve_account_cache ADD PRIMARY KEY (rh_account_id, cve_id), 8 | DROP CONSTRAINT cve_account_cache_rh_account_id_cve_id_key; 9 | 10 | ALTER TABLE system_repo ADD PRIMARY KEY (system_id, repo_id), 11 | DROP CONSTRAINT system_repo_system_id_repo_id_key; 12 | 13 | ALTER TABLE timestamp_kv ADD PRIMARY KEY (name), 14 | DROP CONSTRAINT timestamp_kv_name_key; 15 | 16 | ALTER TABLE announcement ADD PRIMARY KEY (id); 17 | 18 | ALTER TABLE rule_account_cache ADD PRIMARY KEY (rh_account_id, rule_id), 19 | DROP CONSTRAINT rule_account_cache_rh_account_id_rule_id_key; 20 | 21 | ALTER TABLE notified_accounts ADD PRIMARY KEY (rh_account_id, cve_id, notif_type), 22 | DROP CONSTRAINT notified_accounts_rh_account_id_cve_id_notif_type_key; 23 | 24 | ALTER TABLE system_cve_data ADD PRIMARY KEY (system_id, cve_id), 25 | DROP CONSTRAINT system_cve_data_system_id_cve_id_key; 26 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/137-operating-system.sql: -------------------------------------------------------------------------------- 1 | -- operating_system table 2 | CREATE TABLE IF NOT EXISTS operating_system ( 3 | id SERIAL, 4 | name TEXT NOT NULL, CHECK (NOT empty(name)), 5 | major INT NOT NULL, 6 | minor INT NOT NULL, 7 | cves_critical INT NOT NULL, 8 | cves_important INT NOT NULL, 9 | cves_moderate INT NOT NULL, 10 | cves_low INT NOT NULL, 11 | cves_unpatched_critical INT NOT NULL, 12 | cves_unpatched_important INT NOT NULL, 13 | cves_unpatched_moderate INT NOT NULL, 14 | cves_unpatched_low INT NOT NULL, 15 | PRIMARY KEY (id), 16 | CONSTRAINT operating_system_name_major_minor_uq 17 | UNIQUE (name, major, minor) 18 | ) TABLESPACE pg_default; 19 | 20 | GRANT SELECT, INSERT, UPDATE, DELETE ON operating_system TO ve_db_user_vmaas_sync; 21 | 22 | -- user for evaluator component 23 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_evaluator; 24 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_evaluator; 25 | 26 | -- user for listener component 27 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_listener; 28 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_listener; 29 | 30 | -- user for UI manager component 31 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_manager; 32 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_manager; 33 | GRANT INSERT, DELETE ON announcement TO ve_db_user_manager; 34 | 35 | -- user for VMaaS sync component 36 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_vmaas_sync; 37 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_vmaas_sync; 38 | 39 | -- user for taskomatic service 40 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_taskomatic; 41 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_taskomatic; 42 | 43 | -- user for advisor listener 44 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_advisor_listener; 45 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_advisor_listener; 46 | 47 | -- user for notificator 48 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_notificator; 49 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_notificator; 50 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/138-os-lifecycle_phase.sql: -------------------------------------------------------------------------------- 1 | CREATE TYPE lp AS ENUM ('minor', 'eus', 'aus', 'e4s', 'els', 'tus'); 2 | 3 | ALTER TABLE operating_system ADD COLUMN lifecycle_phase lp; 4 | UPDATE operating_system SET lifecycle_phase = 'minor'; 5 | ALTER TABLE operating_system ALTER COLUMN lifecycle_phase SET NOT NULL; 6 | ALTER TABLE operating_system ADD CONSTRAINT operating_system_name_major_minor_lifecycle_phase_uq UNIQUE (name, major, minor, lifecycle_phase); 7 | ALTER TABLE operating_system DROP CONSTRAINT operating_system_name_major_minor_uq; 8 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/139-taskomatic-read-all-schemas.sql: -------------------------------------------------------------------------------- 1 | -- Ensure taskomatic user can do metrics for all schemas 2 | -- For existing 3 | DO $$ DECLARE 4 | r RECORD; 5 | BEGIN 6 | FOR r IN (SELECT nspname FROM pg_namespace WHERE nspname IN ('inventory', 'repack')) LOOP 7 | EXECUTE 'GRANT USAGE ON SCHEMA ' || r.nspname || ' TO ve_db_user_taskomatic;'; 8 | EXECUTE 'GRANT SELECT ON ALL TABLES IN SCHEMA ' || r.nspname || ' TO ve_db_user_taskomatic;'; 9 | END LOOP; 10 | END $$; 11 | -- For newly created 12 | ALTER DEFAULT PRIVILEGES GRANT USAGE ON SCHEMAS to ve_db_user_taskomatic; 13 | ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES to ve_db_user_taskomatic; 14 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/140-os-fk-system_platform.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE operating_system ADD COLUMN usable_for_report BOOLEAN; 2 | UPDATE operating_system SET usable_for_report = TRUE; 3 | ALTER TABLE operating_system ALTER COLUMN usable_for_report SET NOT NULL; 4 | 5 | GRANT SELECT, INSERT, UPDATE, DELETE ON operating_system TO ve_db_user_listener; 6 | 7 | ALTER TABLE system_platform ADD COLUMN operating_system_id INT; 8 | ALTER TABLE system_platform ADD CONSTRAINT operating_system_id FOREIGN KEY (operating_system_id) REFERENCES operating_system (id); 9 | CREATE INDEX ON system_platform(operating_system_id); 10 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/141-group_set.sql: -------------------------------------------------------------------------------- 1 | -- system_group table 2 | CREATE TABLE IF NOT EXISTS system_group_set ( 3 | id BIGSERIAL, 4 | groups JSONB NOT NULL, 5 | groups_checksum TEXT NOT NULL, 6 | PRIMARY KEY (id), 7 | UNIQUE (groups_checksum) 8 | ) TABLESPACE pg_default; 9 | 10 | CREATE INDEX ON system_group_set USING gin (groups jsonb_path_ops); 11 | 12 | GRANT SELECT, INSERT, UPDATE ON system_group_set TO ve_db_user_listener; 13 | 14 | ALTER TABLE system_platform ADD COLUMN group_set_id BIGINT; 15 | ALTER TABLE system_platform ADD CONSTRAINT group_set_id FOREIGN KEY (group_set_id) REFERENCES system_group_set (id); 16 | CREATE INDEX ON system_platform(group_set_id); 17 | 18 | -- user for evaluator component 19 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_evaluator; 20 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_evaluator; 21 | 22 | -- user for listener component 23 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_listener; 24 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_listener; 25 | 26 | -- user for UI manager component 27 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_manager; 28 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_manager; 29 | GRANT INSERT, DELETE ON announcement TO ve_db_user_manager; 30 | 31 | -- user for VMaaS sync component 32 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_vmaas_sync; 33 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_vmaas_sync; 34 | 35 | -- user for taskomatic service 36 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_taskomatic; 37 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_taskomatic; 38 | 39 | -- user for advisor listener 40 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_advisor_listener; 41 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_advisor_listener; 42 | 43 | -- user for notificator 44 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO ve_db_user_notificator; 45 | GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO ve_db_user_notificator; 46 | -------------------------------------------------------------------------------- /database/schema/upgrade_scripts/143-numeric-collation.sql: -------------------------------------------------------------------------------- 1 | CREATE COLLATION IF NOT EXISTS numeric (provider = icu, locale = 'en-u-kn-true'); 2 | -------------------------------------------------------------------------------- /database/schema/ve_db_user_create_postgresql.sql: -------------------------------------------------------------------------------- 1 | -- --------------------------------------------------------------------------- 2 | -- Vulnerability Engine DB Users 3 | -- --------------------------------------------------------------------------- 4 | 5 | -- user for evaluator component 6 | CREATE USER ve_db_user_evaluator; 7 | 8 | -- user for listener component 9 | CREATE USER ve_db_user_listener; 10 | 11 | -- user for UI manager component 12 | CREATE USER ve_db_user_manager; 13 | 14 | -- user for VMaaS CVE sync 15 | CREATE USER ve_db_user_vmaas_sync; 16 | 17 | -- user for taskomatic component 18 | CREATE USER ve_db_user_taskomatic; 19 | 20 | -- user for advisor listener 21 | CREATE USER ve_db_user_advisor_listener; 22 | 23 | -- user for notificator 24 | CREATE USER ve_db_user_notificator; 25 | 26 | -- user and roles for cyndi 27 | -- clowder deployemnt already have cyndi roles in DB 28 | DO $$ 29 | BEGIN 30 | CREATE ROLE cyndi_admin; 31 | EXCEPTION WHEN DUPLICATE_OBJECT THEN 32 | RAISE NOTICE '`cyndi_admin` role already exists'; 33 | END 34 | $$; 35 | DO $$ 36 | BEGIN 37 | CREATE ROLE cyndi_reader; 38 | EXCEPTION WHEN DUPLICATE_OBJECT THEN 39 | RAISE NOTICE '`cyndi_reader` role already exists'; 40 | END 41 | $$; 42 | 43 | DO $$ 44 | BEGIN 45 | CREATE USER cyndi; 46 | EXCEPTION WHEN DUPLICATE_OBJECT THEN 47 | RAISE NOTICE 'user `cyndi` already exists'; 48 | END 49 | $$; 50 | 51 | GRANT cyndi_admin TO cyndi; 52 | -------------------------------------------------------------------------------- /database/upgrade/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/database/upgrade/__init__.py -------------------------------------------------------------------------------- /database/upgrade/dbupgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | function psql_exec { 4 | PGPASSWORD="${POSTGRES_PASSWORD}" psql --no-password -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}" -U "${POSTGRES_USER}" -d "${POSTGRES_DB}" -t -f "$1" 5 | } 6 | 7 | cd $(dirname $0) 8 | 9 | # wait for postgres to be up 10 | until pg_isready -h "${POSTGRES_HOST}" -p "${POSTGRES_PORT}" -U "${POSTGRES_USER}" -d "${POSTGRES_DB}"; 11 | do sleep 2; 12 | done 13 | 14 | # Try to initialize schema if there are no tables (typically first run in deployments without database container - RDS) 15 | EXISTING_TABLES=$(echo "select count(*) from pg_stat_user_tables where schemaname = 'public'" | psql_exec - | sed 's/[[:space:]]//g') 16 | RETVAL=$? 17 | if [[ "$RETVAL" == "0" && "$EXISTING_TABLES" == "0" ]]; then 18 | echo "Empty database, initializing..." 19 | psql_exec ./database/schema/ve_db_user_create_postgresql.sql 20 | psql_exec ./database/schema/ve_db_postgresql.sql 21 | echo "ALTER USER ve_db_user_manager WITH PASSWORD '${VE_DB_USER_MANAGER_PASSWORD:-ve_db_user_manager_pwd}'" | psql_exec - 22 | echo "ALTER USER ve_db_user_evaluator WITH PASSWORD '${VE_DB_USER_EVALUATOR_PASSWORD:-ve_db_user_evaluator_pwd}'" | psql_exec - 23 | echo "ALTER USER ve_db_user_listener WITH PASSWORD '${VE_DB_USER_LISTENER_PASSWORD:-ve_db_user_listener_pwd}'" | psql_exec - 24 | echo "ALTER USER ve_db_user_vmaas_sync WITH PASSWORD '${VE_DB_USER_VMAAS_SYNC_PASSWORD:-ve_db_user_vmaas_sync_pwd}'" | psql_exec - 25 | echo "ALTER USER ve_db_user_taskomatic WITH PASSWORD '${VE_DB_USER_TASKOMATIC_PASSWORD:-ve_db_user_taskomatic_pwd}'" | psql_exec - 26 | echo "ALTER USER ve_db_user_advisor_listener WITH PASSWORD '${VE_DB_USER_ADVISOR_LISTENER_PASSWORD:-ve_db_user_advisor_listener_pwd}'" | psql_exec - 27 | echo "ALTER USER ve_db_user_notificator WITH PASSWORD '${VE_DB_USER_NOTIFICATOR_PASSWORD:-ve_db_user_notificator_pwd}'" | psql_exec - 28 | echo "ALTER USER cyndi WITH PASSWORD '${CYNDI_DB_ADMIN_PASSWORD:-cyndi_db_admin_pwd}'" | psql_exec - 29 | else 30 | python3.12 -m database.upgrade.upgrade 31 | fi 32 | -------------------------------------------------------------------------------- /develfeatureflags.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "features": [ 4 | { 5 | "name": "vulnerability.cves_without_errata", 6 | "type": "release", 7 | "enabled": true, 8 | "stale": false, 9 | "strategies": [ 10 | { 11 | "name": "default", 12 | "parameters": {} 13 | } 14 | ], 15 | "strategy": "default", 16 | "parameters": {} 17 | }, 18 | { 19 | "name": "vulnerability.inventory_groups", 20 | "type": "release", 21 | "enabled": true, 22 | "stale": false, 23 | "strategies": [ 24 | { 25 | "name": "default", 26 | "parameters": {} 27 | } 28 | ], 29 | "strategy": "default", 30 | "parameters": {} 31 | }, 32 | { 33 | "name": "vulnerability.account_cache", 34 | "type": "release", 35 | "enabled": true, 36 | "stale": false, 37 | "strategies": [ 38 | { 39 | "name": "default", 40 | "parameters": {} 41 | } 42 | ], 43 | "strategy": "default", 44 | "parameters": {} 45 | }, 46 | { 47 | "name": "vulnerability.os_exposure_report", 48 | "type": "release", 49 | "enabled": true, 50 | "stale": false, 51 | "strategies": [ 52 | { 53 | "name": "default", 54 | "parameters": {} 55 | } 56 | ], 57 | "strategy": "default", 58 | "parameters": {} 59 | }, 60 | { 61 | "name": "vulnerability.granular_caches", 62 | "type": "release", 63 | "enabled": true, 64 | "stale": false, 65 | "strategies": [ 66 | { 67 | "name": "default", 68 | "parameters": {} 69 | } 70 | ], 71 | "strategy": "default", 72 | "parameters": {} 73 | } 74 | ] 75 | } 76 | -------------------------------------------------------------------------------- /doc/metrics.md: -------------------------------------------------------------------------------- 1 | # Metrics 2 | Prometheus metrics are used to monitor components running. 3 | For development purposes both Prometheus and Grafana containers are included in docker-compose setup. 4 | When adding new metric, ensure it is exposed correctly using dev setup. 5 | 6 | ## Dev containers 7 | After running `docker-compose up --build`, you can access Prometheus and Grafana web interfaces: 8 | - Prometheus: 9 | - Grafana: , login: admin:passwd 10 | 11 | ## How to add new metric 12 | 1. Include new metric into the code (Counter, Gauge, Histogram etc.). 13 | 2. Visualize metric in dev Grafana board (add new chart). 14 | 3. Export new board version to json and save to `monitoring/grafana/provisioning/dashboards/vulnerability-dashboard.json`. 15 | 4. When new metric is in production, use dashboard import and copy new json to production Grafana. 16 | -------------------------------------------------------------------------------- /doc/schema.md: -------------------------------------------------------------------------------- 1 | ![engine](https://user-images.githubusercontent.com/6339153/120200721-eade9900-c224-11eb-85d7-0e4c4d765e43.jpg) 2 | -------------------------------------------------------------------------------- /docker-compose-dbdocs.yml: -------------------------------------------------------------------------------- 1 | services: 2 | vulnerability_database: 3 | container_name: vulnerability-engine-database 4 | build: 5 | context: ./ 6 | dockerfile: ./database/Dockerfile.centos 7 | image: vulnerability-engine/database:latest 8 | restart: unless-stopped 9 | env_file: 10 | - ./conf/common.env 11 | - ./conf/database.env 12 | ports: 13 | - 5432:5432 14 | 15 | schema_spy: 16 | container_name: schema-spy 17 | privileged: true 18 | build: 19 | context: . 20 | dockerfile: ./Dockerfile.dbdocs 21 | depends_on: 22 | - vulnerability_database 23 | volumes: 24 | - ./scripts/output:/output 25 | env_file: 26 | - ./conf/database.env 27 | command: java -jar schemaspy.jar 28 | -------------------------------------------------------------------------------- /docker-compose.devel.yml: -------------------------------------------------------------------------------- 1 | services: 2 | ve_database: 3 | volumes: 4 | - ./database:/git 5 | security_opt: 6 | - label=disable 7 | working_dir: /git 8 | 9 | ve_manager: 10 | volumes: 11 | - .:/git 12 | - ./database:/git/database 13 | security_opt: 14 | - label=disable 15 | working_dir: /git 16 | command: ["sleep", "infinity"] 17 | 18 | ve_manager_admin: 19 | volumes: 20 | - .:/git 21 | - ./database:/git/database 22 | security_opt: 23 | - label=disable 24 | working_dir: /git 25 | command: ["sleep", "infinity"] 26 | 27 | ve_taskomatic: 28 | volumes: 29 | - .:/git 30 | security_opt: 31 | - label=disable 32 | working_dir: /git 33 | command: ["sleep", "infinity"] 34 | 35 | ve_notificator: 36 | volumes: 37 | - .:/git 38 | security_opt: 39 | - label=disable 40 | working_dir: /git 41 | command: ["sleep", "infinity"] 42 | 43 | ve_grouper: 44 | volumes: 45 | - .:/git 46 | security_opt: 47 | - label=disable 48 | working_dir: /git 49 | command: ["sleep", "infinity"] 50 | 51 | ve_listener: 52 | volumes: 53 | - .:/git 54 | security_opt: 55 | - label=disable 56 | working_dir: /git 57 | command: ["sleep", "infinity"] 58 | 59 | ve_evaluator_recalc: 60 | volumes: 61 | - .:/git 62 | security_opt: 63 | - label=disable 64 | working_dir: /git 65 | command: ["sleep", "infinity"] 66 | 67 | ve_evaluator_upload: 68 | volumes: 69 | - .:/git 70 | security_opt: 71 | - label=disable 72 | working_dir: /git 73 | command: ["sleep", "infinity"] 74 | -------------------------------------------------------------------------------- /docker-compose.test.yml: -------------------------------------------------------------------------------- 1 | services: 2 | test: 3 | build: 4 | context: . 5 | dockerfile: Dockerfile.test 6 | command: bash -c "cd /engine && ./run_tests.sh" 7 | -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | public_port () { 4 | python3.12 -c "import app_common_python as a;print(a.LoadedConfig.publicPort or 8000)" 5 | } 6 | 7 | metrics_port () { 8 | python3.12 -c "import app_common_python as a;print(a.LoadedConfig.metricsPort or 9000)" 9 | } 10 | 11 | cd $(dirname $0) 12 | 13 | if [[ ! -z $1 ]]; then 14 | if [[ "$1" == "vmaas-sync" ]]; then 15 | exec python3.12 -m vmaas_sync.vmaas_sync 16 | elif [[ "$1" == "manager" ]]; then 17 | exec gunicorn -c manager/gunicorn_conf.py -w ${GUNICORN_WORKERS:-4} --bind=0.0.0.0:$(public_port) --bind=0.0.0.0:$(metrics_port) --timeout=60 --limit-request-field_size=65535 manager.main 18 | elif [[ "$1" == "manager-dev" ]]; then 19 | exec gunicorn --reload -c manager/gunicorn_conf.py -w ${GUNICORN_WORKERS:-4} --bind=0.0.0.0:$(public_port) --bind=0.0.0.0:$(metrics_port) --timeout=60 --limit-request-field_size=65535 manager.main 20 | elif [[ "$1" == "manager-admin" ]]; then 21 | exec gunicorn -c manager/gunicorn_conf.py -w ${GUNICORN_WORKERS:-4} --bind=0.0.0.0:$(public_port) --bind=0.0.0.0:$(metrics_port) --timeout=60 --limit-request-field_size=65535 manager.admin 22 | elif [[ "$1" == "taskomatic" ]]; then 23 | exec python3.12 -m taskomatic.taskomatic 24 | elif [[ "$1" == "notificator" ]]; then 25 | exec python3.12 -m notificator.notificator 26 | elif [[ "$1" == "exploit-sync" ]]; then 27 | exec python3.12 -m exploit_sync.exploit_sync 28 | elif [[ "$1" == "grouper" ]]; then 29 | exec python3.12 -m grouper.grouper 30 | elif [[ "$1" == "listener" ]]; then 31 | exec python3.12 -m listener.listener 32 | elif [[ "$1" == "evaluator" ]]; then 33 | exec python3.12 -m evaluator.evaluator 34 | elif [[ "$1" == "cluster" ]]; then 35 | exec python3.12 -m cluster.cluster 36 | fi 37 | fi 38 | 39 | echo "Please specify service name as the first argument." 40 | -------------------------------------------------------------------------------- /evaluator/README.md: -------------------------------------------------------------------------------- 1 | # Vulnerability Engine Evaluator Service 2 | 3 | ## Overview 4 | Evaluator service own topic with message events from Listener service. Performs a vulnerability analysis on system by requesting VMaaS with provided system packages and stores the system vulnerabilities (system-cve links) to the database. 5 | 6 | ## Design 7 | Evaluator is run in two instances, the `upload` or `recalc` instance. 8 | Upload instance is listening for events created by Listener on `vulnerability.evaluator.upload`, thus performs the vulnerability analysis on updated systems straight from the inventory side. 9 | Recalc instance is listening for events from vmaas-sync service on `vlnerability.evaluator.recalc`, thus performs the recalculation of systems already inside of the vulnerability (ex. new CVE is detected, systems needs to be recalculated). 10 | The vulnerability analysis is based on the packages instaled on given system, for each system a VMaaS request is sent and processed. 11 | System CVEs returned from VMaaS are processed by a given logic: 12 | * CVE is present for system in Vulnerability but not in VMaaS response - CVE is mitigated, mark it to db. 13 | * CVE is present for system in Vulnerability and also in VMaaS response - CVE is still present, do not change. 14 | * CVE is not present in Vulnerablity but present in VMaaS response - Insert CVE-system link into database. 15 | 16 | System-CVE links are represented in `system_vulnerabilities` table. Evaluator is caching the number of current CVEs to which system is vulnerable to the database, which is later used in some endpoints. 17 | After given evaluation, the new changes are sent to the Notificator service, `vulnerability.evaluator.results`. 18 | 19 | ### Incoming message 20 | ``` 21 | { 22 | "type": , 23 | "host": { 24 | "id": , 25 | "account": , 26 | "org_id": 27 | }, 28 | "platform_metadata": { 29 | "request_id": 30 | }, 31 | "timestamp": 32 | } 33 | ``` 34 | -------------------------------------------------------------------------------- /evaluator/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/evaluator/__init__.py -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | This directory contains various examples of functionality our frameworks used provide. 4 | -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/examples/__init__.py -------------------------------------------------------------------------------- /examples/connexion_deep_object_filters.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | Deep object filtering done in connexion 4 | """ 5 | from collections import deque 6 | from copy import deepcopy 7 | 8 | import connexion 9 | 10 | 11 | def inplace_filter(condition, deck): 12 | """Filters items in place so all references are pointing to filtered list as opposed to filter() which creates new list() instance""" 13 | for _ in range(len(deck)): 14 | item = deck.popleft() 15 | if condition(item): 16 | deck.append(item) 17 | 18 | 19 | DATA = [ 20 | { 21 | 'id': 1, 22 | 'details': { 23 | 'sap': True, 24 | 'text': 'xxee' 25 | } 26 | }, 27 | { 28 | 'id': 2, 29 | 'details': { 30 | 'sap': True, 31 | 'text': 'eexx' 32 | } 33 | }, 34 | { 35 | 'id': 3, 36 | 'details': { 37 | 'sap': False, 38 | 'text': 'xxee' 39 | } 40 | }, 41 | { 42 | 'id': 4, 43 | 'details': { 44 | 'sap': False, 45 | 'text': 'eexx' 46 | } 47 | }, 48 | ] 49 | 50 | FILTER_DEFINITIONS = { 51 | 'id': lambda retval, filter_value: inplace_filter(lambda x: x['id'] == filter_value, retval), 52 | 'details': { 53 | 'sap': lambda retval, filter_value: inplace_filter(lambda x: x['details']['sap'] == filter_value, retval), 54 | 'text': lambda retval, filter_value: inplace_filter(lambda x: x['details']['text'] == filter_value, retval), 55 | } 56 | } 57 | 58 | 59 | def walk_filters(retval, node, path): 60 | """walk through filters dictionary""" 61 | for key, item in node.items(): 62 | if isinstance(item, dict): 63 | walk_filters(retval, item, path + [key]) 64 | else: 65 | filter_ = FILTER_DEFINITIONS 66 | for elem in path: 67 | filter_ = filter_[elem] 68 | filter_[key](retval, item) 69 | 70 | 71 | def getData(**kwargs): # pylint: disable=invalid-name 72 | """Return data""" 73 | retval = deque(deepcopy(DATA)) 74 | walk_filters(retval, kwargs['filter'], []) 75 | return list(retval) 76 | 77 | 78 | def create_app(): 79 | """Creates an aplication object""" 80 | app = connexion.App('Connexion deep filtering', options={'swagger_ui': True}) 81 | app.add_api('connexion_deep_object_filters.yaml') 82 | app.app.url_map.strict_slashes = False 83 | 84 | return app 85 | 86 | 87 | application = create_app() # pylint: disable=invalid-name 88 | 89 | 90 | if __name__ == '__main__': 91 | application.run(host='0.0.0.0', port=8080) 92 | -------------------------------------------------------------------------------- /examples/connexion_deep_object_filters.yaml: -------------------------------------------------------------------------------- 1 | openapi: "3.0.0" 2 | 3 | info: 4 | title: Deep object filtering 5 | version: '1.0' 6 | 7 | paths: 8 | /data: 9 | get: 10 | summary: Get data. 11 | description: Return data (with applied filters) 12 | operationId: connexion_deep_object_filters.getData 13 | responses: 14 | 200: 15 | description: Data 16 | content: 17 | application/vnd.api+json: 18 | schema: 19 | type: array 20 | items: 21 | type: object 22 | properties: 23 | id: 24 | type: integer 25 | example: 1 26 | details: 27 | type: object 28 | properties: 29 | sap: 30 | type: boolean 31 | example: true 32 | text: 33 | type: string 34 | example: xxee 35 | required: 36 | - sap 37 | - text 38 | required: 39 | - details 40 | - id 41 | parameters: 42 | - in: query 43 | name: filter 44 | description: Filter by deep object 45 | schema: 46 | type: object 47 | properties: 48 | id: 49 | type: integer 50 | example: 1 51 | details: 52 | type: object 53 | properties: 54 | sap: 55 | type: boolean 56 | example: true 57 | text: 58 | type: string 59 | example: xxee 60 | style: deepObject 61 | explode: true 62 | -------------------------------------------------------------------------------- /exploit_sync/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/exploit_sync/__init__.py -------------------------------------------------------------------------------- /grouper/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/grouper/__init__.py -------------------------------------------------------------------------------- /grouper/common.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common grouper tools. 3 | """ 4 | 5 | import asyncio 6 | from dataclasses import dataclass 7 | from enum import Enum 8 | 9 | from prometheus_client import Counter 10 | from prometheus_client import Gauge 11 | 12 | from common.config import Config 13 | 14 | CFG = Config() 15 | 16 | QUEUE_SIZE = Gauge("ve_grouper_queue_size", "Number of items in queue") 17 | ADVISOR_QUEUE_SIZE = Gauge("ve_grouper_advisor_queue_size", "Number of advior items in queue") 18 | INVENTORY_QUEUE_SIZE = Gauge("ve_grouper_inventory_queue_size", "Number of inventory items in queue") 19 | PAIR_HIT = Counter("ve_grouper_pair_hits", "Number of pairs, which were matched") 20 | PAIR_MISS = Counter("ve_grouper_pair_miss", "Number of pairs, which were not matched") 21 | UNCHANGED_SYSTEM = Counter("ve_grouper_unchanged_system", "Number of unchanged systems, which doesn't need evaluation") 22 | CHANGED_SYSTEM = Counter("ve_grouper_changed_system", "Number of changed systems, which need evaluation") 23 | 24 | 25 | class BoundedSemaphorePrometheus: 26 | """Asyncio bounded semaphore, which reports status 27 | in specified prometheus metric gauge""" 28 | 29 | def __init__(self, size: int, metric: Gauge): 30 | """Constructor""" 31 | self._semaphore = asyncio.BoundedSemaphore(size) 32 | self._metric = metric 33 | 34 | async def acquire(self): 35 | """Acquire semaphore, inc gauge""" 36 | await self._semaphore.acquire() 37 | self._metric.inc() 38 | 39 | def release(self): 40 | """Release semaphore, dec gauge""" 41 | self._semaphore.release() 42 | self._metric.dec() 43 | 44 | 45 | class GrouperMessageType(Enum): 46 | """Message types which can arrive at kafka""" 47 | 48 | INVENTORY_UPLOAD = "inventory_upload" 49 | ADVISOR_UPLOAD = "advisor_upload" 50 | 51 | 52 | @dataclass 53 | class QueueItem: 54 | """Single item in Evaluator queue""" 55 | 56 | inventory_upload: bool 57 | inventory_changed: bool 58 | 59 | advisor_upload: bool 60 | advisor_changed: bool 61 | 62 | request_id: str 63 | 64 | second_upload_event: asyncio.Event = None 65 | 66 | def __post_init__(self): 67 | self.second_upload_event = asyncio.Event() 68 | -------------------------------------------------------------------------------- /listener/README.md: -------------------------------------------------------------------------------- 1 | # Vulnerability Engine Listener Service 2 | 3 | ## Overview 4 | Listener service is always consuming specific kafka topic where the inventory produces update messages for its systems. 5 | Listener updates the system information (does not do evaluation of vulnerabilities yet) inside the vulnerability database based on the message and passes the event to the evaluator component. 6 | 7 | ## Design 8 | Listener is consuming `platform.inventory.events` topic. 9 | Message type from inventory can be: 10 | * New system (created): New system was uploaded to the inventory. 11 | * Reupload of system (updated): Already existing system uploaded to inventory. 12 | * Deleted system (deleted): System got deleted from inventory. 13 | 14 | Listener processes one of this message. 15 | On new system, it creates a row with system info into vulnerability DB (`system_platform`, `repo`, `system_repo` table). 16 | On update it updates already existing system fields only if the package profile changed from the last created/updated event. 17 | On both of these operations, listener prepares a request for VMaaS with provided list of packages from the message and stores it into the db (`vmaas_json` field). 18 | On delete, it marks system as deleted (system gets deleted later, optimization, `when_deleted` field). 19 | After each operation (besides delete), system is passed to the evaluator for vulnerabilities analysis through another kafka topic `vulnerability.evaluator.upload` with message type `upload_new_file`. 20 | 21 | ### Incoming message 22 | Details of the incoming kafka message can be found [here](https://consoledot.pages.redhat.com/docs/dev/services/inventory.html). 23 | -------------------------------------------------------------------------------- /listener/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/listener/__init__.py -------------------------------------------------------------------------------- /manager.healthz.spec.yaml: -------------------------------------------------------------------------------- 1 | openapi: "3.0.0" 2 | 3 | info: 4 | title: Vulnerability Engine Manager 5 | version: {{ app_version }} 6 | 7 | 8 | paths: 9 | /healthz: 10 | get: 11 | summary: Health status of application 12 | description: Checks database availability and API response threshold time. 13 | operationId: manager.api_status_handler.GetApiStatus.get 14 | x-methodName: getApiStatus 15 | responses: 16 | 200: 17 | description: API is healthy 18 | 503: 19 | description: Database is unavaiable 20 | /metrics: 21 | get: 22 | summary: Application metrics 23 | description: Return prometheus metrics 24 | operationId: manager.main.metrics 25 | x-methodName: getMetrics 26 | responses: 27 | 200: 28 | description: Application metrics 29 | content: 30 | text/plain: 31 | schema: 32 | type: string 33 | -------------------------------------------------------------------------------- /manager/README.md: -------------------------------------------------------------------------------- 1 | # Vulnerability Engine Manager Service 2 | 3 | ## Overview 4 | 5 | The vulnerability engine manager service provides backend API for the UI. It's connexion (Flask) application, ran inside of tornado WSGI container. 6 | 7 | ### API design 8 | 9 | The most important part of our API is `manager.spec.yaml` file which is our swagger documentation and which defines the API (the way how connexion framework works), the most important part of the documentation is the `operationId` parameter which defines which python function will be responsible for API call execution. 10 | 11 | Existing API endpoints are all classmethods of {Get,Patch,Post}Request, a class which is full of class and static methods. That's because unlike tornado, Flask does not instantiate new objects each time API call is processed, but executes a function, static or class method. 12 | 13 | ### Adding new API endpoints 14 | 15 | To add new API, simply add description of your API to the `manager.spec.yaml`. After that create a new class which inherits from manager.base.{Get,Patch,Post}Request and fill in the `handle_{get,patch,post}` class method. 16 | The `operationId` in the manager spec should point to `{get,patch,post}` method of your new class as we're using inheritance to take care of error handling for us, e.g. `manager.$YOUR_NEW_MODULE.$YOUR_NEW_CLASS.get`. 17 | 18 | To access information like e.g. `rh_account_number` simply `import connexion` and use the `connexion.context['user']` for that. Similarly you can access headers via `connexion.request.headers`. 19 | 20 | All functions should accept `**kwargs` arguments as there is stored all input (described by the `manager.spec.yaml`), which means that if you e.g. specify a required integer parameter `length` in the spec it will be accessible via `kwargs['length']` and WILL always be there and WILL always be an integer. No additional checks required. 21 | 22 | To access requestBody use `kwargs['data']`. 23 | 24 | To return any data to client just `return` the data from the `handle_{get,patch,post}`. There's no need to call `json.dumps()` as it's done automatically by the connexion. If you want to return an error use `cls.format_exception(message, status_code).` 25 | -------------------------------------------------------------------------------- /manager/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/manager/__init__.py -------------------------------------------------------------------------------- /manager/admin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | vulnerability-manager-admin 4 | """ 5 | from common.config import Config 6 | from common.logging import get_logger 7 | from common.logging import init_logging 8 | 9 | from .main import create_app 10 | 11 | LOGGER = get_logger(__name__) 12 | CFG = Config() 13 | 14 | 15 | init_logging(num_servers=CFG.gunicorn_workers) 16 | 17 | # gunicorn expects an object called "application" hence the pylint disable 18 | application = create_app({CFG.default_route: "manager.admin.spec.yaml", # pylint: disable=invalid-name 19 | "": "manager.healthz.spec.yaml"}) 20 | -------------------------------------------------------------------------------- /manager/announcement_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for /announcement API endpoint 3 | """ 4 | import peewee 5 | 6 | from common.peewee_model import Announcement 7 | 8 | from .base import GetRequest 9 | 10 | 11 | class GetAnnouncement(GetRequest): 12 | """GET to /v1/announcement""" 13 | 14 | _endpoint_name = r"/v1/announcement" 15 | 16 | @classmethod 17 | def handle_get(cls, **kwargs): 18 | try: 19 | result = (Announcement.get()) 20 | except peewee.DoesNotExist: 21 | return {"message": "", "last_updated": ""}, 204 22 | 23 | return {"message": result.message, 24 | "last_updated": result.last_updated}, 200 25 | -------------------------------------------------------------------------------- /manager/api_status_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Redhat status page API 3 | """ 4 | from .base import GetRequest 5 | 6 | 7 | class GetApiStatus(GetRequest): 8 | """Class for handling the Api status requests""" 9 | 10 | _endpoint_name = r"/v1/apistatus" 11 | 12 | @classmethod 13 | def handle_get(cls, **kwargs): # pylint: disable=unused-argument 14 | """Measure the status of the API, db""" 15 | return 200 16 | -------------------------------------------------------------------------------- /manager/feature_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for /feature API endpoint 3 | """ 4 | from connexion import context 5 | 6 | from common.peewee_model import NotifiedAccounts 7 | from common.peewee_model import RHAccount 8 | from manager.base import ApplicationException 9 | from manager.base import DeleteRequest 10 | from manager.base import PatchRequest 11 | from manager.base import get_account_data 12 | from manager.rbac_manager import RbacManager as RBAC 13 | from manager.rbac_manager import RbacRoutePermissions 14 | 15 | 16 | class PatchCvesWithoutErrata(PatchRequest): 17 | """PATCH to /feature/cves_without_errata""" 18 | 19 | _endpoint_name = r"/v1/feature/cves_without_errata" 20 | 21 | @classmethod 22 | @RBAC.need_permissions(RbacRoutePermissions.TOGGLE_CVES_WITHOUT_ERRATA) 23 | def handle_patch(cls, **kwargs): 24 | """Set cves_without_errata DB flag for account.""" 25 | org_id = context.context["user"]["org_id"] 26 | enable = kwargs["body"]["enable"] 27 | RHAccount.update(cves_without_errata=enable).where(RHAccount.org_id == org_id).execute() 28 | return {"updated": {"org_id": org_id, "cves_without_errata": {"enabled": enable}}} 29 | 30 | 31 | class DeleteNotifications(DeleteRequest): 32 | """DELETE to /notifications""" 33 | 34 | _endpoint_name = r"/v1/notifications" 35 | 36 | @classmethod 37 | def handle_delete(cls, **kwargs): 38 | """Deletes customer sent notifications""" 39 | rh_account_id = get_account_data(context.context["user"]).id 40 | if rh_account_id is None: 41 | raise ApplicationException("user does not exist", 403) 42 | 43 | deleted_cnt = NotifiedAccounts.delete().where(NotifiedAccounts.rh_account_id == rh_account_id).execute() 44 | return {"deleted": deleted_cnt} 45 | -------------------------------------------------------------------------------- /manager/middlewares.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom middlewares for connexion/starlette. 3 | """ 4 | import json 5 | 6 | from starlette.datastructures import MutableHeaders 7 | 8 | from common.logging import get_logger 9 | 10 | LOGGER = get_logger(__name__) 11 | 12 | 13 | class ErrorHandlerMiddleware: 14 | """Middleware to wrap error message into a list if it's not already (errors coming from framework).""" 15 | def __init__(self, app): 16 | self.app = app 17 | 18 | async def __call__(self, scope, receive, send): 19 | if scope["type"] != "http": 20 | await self.app(scope, receive, send) 21 | return 22 | 23 | start_message = {} 24 | 25 | async def send_with_formatted_error(message): 26 | nonlocal start_message 27 | if message["type"] == "http.response.start": 28 | start_message = message 29 | if start_message["status"] >= 400: 30 | headers = MutableHeaders(raw=start_message["headers"]) 31 | del headers["Content-Length"] 32 | await send(start_message) 33 | elif message["type"] == "http.response.body": 34 | if start_message["status"] >= 400 and message["body"]: 35 | try: 36 | err = json.loads(message["body"].decode("utf-8")) 37 | except json.JSONDecodeError: 38 | LOGGER.warning("Expected JSON error message: %s", message["body"].decode("utf-8")) 39 | else: 40 | if "errors" not in err and "detail" in err and "status" in err: 41 | message["body"] = json.dumps({"errors": [{"detail": err["detail"], "status": str(err["status"])}]}).encode("utf-8") 42 | await send(message) 43 | 44 | await self.app(scope, receive, send_with_formatted_error) 45 | -------------------------------------------------------------------------------- /manager/playbook_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for /playbook APIs 3 | """ 4 | from peewee import DoesNotExist 5 | from peewee import Value 6 | 7 | from common.peewee_model import InsightsRule 8 | from common.peewee_model import Playbook 9 | 10 | from .base import GetRequest 11 | from .rbac_manager import RbacManager as RBAC 12 | from .rbac_manager import RbacRoutePermissions 13 | 14 | 15 | class GetTemplate(GetRequest): 16 | """GET to /playbooks/templates/{rule_id}""" 17 | 18 | _endpoint_name = r"/playbooks/templates/{rule_id}" 19 | 20 | @classmethod 21 | @RBAC.need_permissions(RbacRoutePermissions.REMEDIATIONS_READ, allow_system_auth=True) 22 | def handle_get(cls, **kwargs): 23 | try: 24 | rule = InsightsRule.select(InsightsRule.id, 25 | InsightsRule.reboot_required) \ 26 | .where(InsightsRule.name == kwargs["rule_id"]) \ 27 | .get() 28 | except DoesNotExist: 29 | return cls.format_exception("Rule %s does not exist" % kwargs["rule_id"], 404) 30 | data = Playbook.select(Playbook.description, 31 | Playbook.play, 32 | Playbook.version, 33 | Value(rule.reboot_required).alias("reboot_required"), 34 | Playbook.resolution_type) \ 35 | .where(Playbook.rule_id == rule.id) \ 36 | .dicts() 37 | return {"data": [playbook for playbook in data]} # pylint: disable=bad-option-value,unnecessary-comprehension 38 | -------------------------------------------------------------------------------- /manager/rbac_filters.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for RBAC based filters. 3 | """ 4 | from .rbac_manager import RbacPermission 5 | from .rbac_manager import RbacRoutePermissions 6 | 7 | 8 | def filter_excluded(perms: [RbacPermission]) -> dict: 9 | """RBAC filter excludes systems if user does not have opt_out read""" 10 | if RbacRoutePermissions.BASE_OPT_OUT_READ not in perms: 11 | return {"excluded": [False]} 12 | return None 13 | -------------------------------------------------------------------------------- /manager/risk_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for /business_risk API endpoint 3 | """ 4 | from common.peewee_model import BusinessRisk 5 | 6 | from .base import GetRequest 7 | 8 | 9 | class GetRisk(GetRequest): 10 | """GET to /v1/business_risk""" 11 | 12 | _endpoint_name = r"/v1/business_risk" 13 | 14 | @classmethod 15 | def handle_get(cls, **kwargs): # pylint: disable=unused-argument 16 | """Return the data from the business_risk table as JSON""" 17 | query = (BusinessRisk.select().order_by(BusinessRisk.id.asc()).dicts()) 18 | risk_list = [] 19 | for risk in query: 20 | risk_list.append(risk) 21 | return {"data": risk_list, "meta": {"total_items": len(risk_list)}}, 200 22 | -------------------------------------------------------------------------------- /manager/version_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for /version API endpoint 3 | """ 4 | from peewee import DoesNotExist 5 | 6 | from common.constants import APP_VERSION 7 | from common.logging import get_logger 8 | from common.peewee_model import DbVersion 9 | 10 | from .base import GetRequest 11 | 12 | LOGGER = get_logger(__name__) 13 | 14 | 15 | class GetVersion(GetRequest): 16 | """GET to /v1/version""" 17 | 18 | _endpoint_name = r"/v1/version" 19 | 20 | @classmethod 21 | def handle_get(cls, **kwargs): # pylint: disable=unused-argument 22 | """Return the data from the db_version table as JSON""" 23 | try: 24 | schema_version = DbVersion.get(DbVersion.name == "schema_version").version 25 | except DoesNotExist: 26 | schema_version = "unknown" 27 | return {"application_version": APP_VERSION, "database_version": schema_version}, 200 28 | -------------------------------------------------------------------------------- /monitoring/grafana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM grafana/grafana:12.0.0 2 | 3 | USER root 4 | RUN apk add --no-cache python3 py3-yaml 5 | USER grafana 6 | 7 | ENV GF_AUTH_ANONYMOUS_ENABLED=true 8 | ENV GF_AUTH_ANONYMOUS_ORG_ROLE=Admin 9 | ENV GF_AUTH_DISABLE_LOGIN_FORM=true 10 | 11 | ADD /scripts/extract_dashboard_configmap.py /usr/local/bin 12 | ADD /monitoring/grafana/datasources.yml /etc/grafana/provisioning/datasources 13 | ADD /monitoring/grafana/dashboards.yml /etc/grafana/provisioning/dashboards 14 | 15 | ADD /monitoring/grafana/dashboards/grafana-dashboard-clouddot-insights-vulnerability-engine.configmap.yml /etc/grafana 16 | 17 | RUN extract_dashboard_configmap.py /etc/grafana/grafana-dashboard-clouddot-insights-vulnerability-engine.configmap.yml > /etc/grafana/provisioning/dashboards/grafana-dashboard-clouddot-insights-vulnerability-engine.json 18 | -------------------------------------------------------------------------------- /monitoring/grafana/dashboards.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'Prometheus' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | editable: true 10 | options: 11 | path: /etc/grafana/provisioning/dashboards 12 | -------------------------------------------------------------------------------- /monitoring/grafana/datasources.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | deleteDatasources: 4 | - name: devel-prometheus 5 | orgId: 1 6 | 7 | datasources: 8 | - name: devel-prometheus 9 | type: prometheus 10 | access: proxy 11 | orgId: 1 12 | url: http://vulnerability-engine-prometheus:9090 13 | password: 14 | user: 15 | database: 16 | basicAuth: true 17 | basicAuthUser: admin 18 | basicAuthPassword: passwd 19 | withCredentials: 20 | isDefault: true 21 | jsonData: 22 | graphiteVersion: "1.1" 23 | tlsAuth: false 24 | tlsAuthWithCACert: false 25 | secureJsonData: 26 | tlsCACert: "..." 27 | tlsClientCert: "..." 28 | tlsClientKey: "..." 29 | version: 1 30 | editable: true 31 | -------------------------------------------------------------------------------- /monitoring/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 5s 3 | evaluation_interval: 5s 4 | 5 | scrape_configs: 6 | - job_name: vulnerability-engine-manager-service 7 | static_configs: 8 | - targets: 9 | - ve_manager:8000 10 | metric_relabel_configs: 11 | - source_labels: [endpoint] 12 | target_label: exported_endpoint 13 | replacement: $1 14 | - source_labels: [job] 15 | target_label: pod 16 | replacement: $1 17 | - job_name: vulnerability-engine-manager-admin-service 18 | static_configs: 19 | - targets: 20 | - ve_manager_admin:8000 21 | metric_relabel_configs: 22 | - source_labels: [endpoint] 23 | target_label: exported_endpoint 24 | replacement: $1 25 | - source_labels: [job] 26 | target_label: pod 27 | replacement: $1 28 | - job_name: vulnerability-engine-grouper-service 29 | static_configs: 30 | - targets: 31 | - ve_grouper:8089 32 | metric_relabel_configs: 33 | - source_labels: [job] 34 | target_label: pod 35 | replacement: $1 36 | - job_name: vulnerability-engine-notificator-service 37 | static_configs: 38 | - targets: 39 | - ve_notificator:8088 40 | metric_relabel_configs: 41 | - source_labels: [job] 42 | target_label: pod 43 | replacement: $1 44 | - job_name: vulnerability-engine-taskomatic-service 45 | static_configs: 46 | - targets: 47 | - ve_taskomatic:8085 48 | metric_relabel_configs: 49 | - source_labels: [job] 50 | target_label: pod 51 | replacement: $1 52 | - job_name: vulnerability-engine-listener-service 53 | static_configs: 54 | - targets: 55 | - ve_listener:8089 56 | metric_relabel_configs: 57 | - source_labels: [job] 58 | target_label: pod 59 | replacement: $1 60 | - job_name: vulnerability-engine-evaluator-recalc-service 61 | static_configs: 62 | - targets: 63 | - ve_evaluator_recalc:8085 64 | metric_relabel_configs: 65 | - source_labels: [job] 66 | target_label: pod 67 | replacement: $1 68 | - job_name: vulnerability-engine-evaluator-upload-service 69 | static_configs: 70 | - targets: 71 | - ve_evaluator_upload:8085 72 | metric_relabel_configs: 73 | - source_labels: [job] 74 | target_label: pod 75 | replacement: $1 76 | -------------------------------------------------------------------------------- /notificator/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/notificator/__init__.py -------------------------------------------------------------------------------- /notificator/app.py: -------------------------------------------------------------------------------- 1 | """ 2 | Notificator api handlers 3 | """ 4 | 5 | from aiohttp import web 6 | 7 | from .notificator_queue import NotificatorQueue 8 | 9 | 10 | class CacheHandler(web.View): 11 | """Cache handler""" 12 | 13 | async def put(self): 14 | """Refresh notified accs from DB""" 15 | queue = NotificatorQueue(None, None, None, None) 16 | await queue.refresh_map() 17 | return web.Response(status=200) 18 | 19 | 20 | def create_notificator_app(logger, views): 21 | """Create instance of notificator app""" 22 | app = web.Application(logger=logger) 23 | for url, view in views: 24 | app.router.add_view(url, view) 25 | return app 26 | 27 | 28 | def create_notif_app_runner(app, port, logger, loop): 29 | """Create AIOHTTP runner""" 30 | runner = web.AppRunner(app, logger=logger) 31 | 32 | loop.run_until_complete(runner.setup()) 33 | 34 | site = web.TCPSite(runner, "0.0.0.0", port) 35 | return runner, site 36 | -------------------------------------------------------------------------------- /platform_mock/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi9/ubi-minimal 2 | 3 | RUN curl -o /etc/yum.repos.d/postgresql.repo \ 4 | https://copr.fedorainfracloud.org/coprs/g/insights/postgresql-16/repo/epel-9/group_insights-postgresql-16-epel-9.repo 5 | 6 | RUN microdnf install -y --setopt=install_weak_deps=0 --setopt=tsflags=nodocs \ 7 | python312 python3.12-pip python3.12-devel libpq-devel gcc which java-openjdk-headless shadow-utils tar gzip file vim systemd libicu postgresql && \ 8 | microdnf clean all 9 | 10 | RUN ln -s -f /usr/bin/python3.12 /usr/bin/python 11 | 12 | WORKDIR /platform_mock 13 | 14 | ADD pyproject.toml /platform_mock/ 15 | ADD poetry.lock /platform_mock/ 16 | 17 | ENV LC_ALL=C.utf8 18 | ENV LANG=C.utf8 19 | RUN pip3.12 install --upgrade pip && \ 20 | pip3.12 install --upgrade poetry~=2.0 poetry-plugin-export 21 | RUN poetry export --with dev -f requirements.txt --output requirements.txt && \ 22 | pip3.12 install -r requirements.txt 23 | 24 | RUN cd /platform_mock && \ 25 | mkdir kafka && \ 26 | curl -L https://downloads.apache.org/kafka/3.8.0/kafka_2.12-3.8.0.tgz \ 27 | | tar -xz --strip-components=1 -C kafka/ 28 | 29 | RUN adduser --gid 0 -d /platform_mock --no-create-home insights && \ 30 | chown -R insights /platform_mock 31 | 32 | USER insights 33 | 34 | EXPOSE 9092 8000 35 | 36 | ADD /platform_mock/*.sh /platform_mock/ 37 | ADD /platform_mock/__init__.py /platform_mock/platform_mock/ 38 | ADD /platform_mock/platform_mock.py /platform_mock/platform_mock/ 39 | ADD /platform_mock/traffic_generator.py /platform_mock/ 40 | ADD /platform_mock/data /platform_mock/data/ 41 | ADD /common/*.py /platform_mock/common/ 42 | 43 | CMD /platform_mock/entrypoint.sh 44 | -------------------------------------------------------------------------------- /platform_mock/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/platform_mock/__init__.py -------------------------------------------------------------------------------- /platform_mock/common: -------------------------------------------------------------------------------- 1 | ../common -------------------------------------------------------------------------------- /platform_mock/data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/platform_mock/data/__init__.py -------------------------------------------------------------------------------- /platform_mock/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | DIR=$(dirname $0) 4 | 5 | cd $DIR 6 | 7 | # run zookeeper 8 | ./kafka/bin/zookeeper-server-start.sh -daemon kafka/config/zookeeper.properties 9 | 10 | # run kafka 11 | ./kafka/bin/kafka-server-start.sh -daemon kafka/config/server.properties --override auto.create.topics.enable=false 12 | 13 | # wait until kafka starts 14 | >&2 echo "Checking if Kafka server is up" 15 | until ./kafka/bin/kafka-topics.sh --list --bootstrap-server localhost:9092 &> /dev/null; do 16 | >&2 echo "Kafka server is unavailable - sleeping" 17 | sleep 1 18 | done 19 | 20 | # create topics with multiple partitions for scaling 21 | for topic in platform.inventory.events vulnerability.evaluator.upload \ 22 | vulnerability.evaluator.recalc platform.engine.results platform.remediation-updates.vulnerability \ 23 | vulnerability.evaluator.results platform.notifications.ingress platform.payload-status \ 24 | vulnerability.grouper.inventory.upload vulnerability.grouper.advisor.upload 25 | do 26 | ./kafka/bin/kafka-topics.sh --create --topic $topic --partitions 3 --bootstrap-server localhost:9092 --replication-factor 1 27 | done 28 | 29 | # run upload mock 30 | exec python3.12 -m platform_mock.platform_mock 31 | -------------------------------------------------------------------------------- /pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Secure Coding Practices Checklist GitHub Link 2 | - https://github.com/RedHatInsights/secure-coding-checklist 3 | 4 | ## Secure Coding Checklist 5 | - [x] Input Validation 6 | - [x] Output Encoding 7 | - [x] Authentication and Password Management 8 | - [x] Session Management 9 | - [x] Access Control 10 | - [x] Cryptographic Practices 11 | - [x] Error Handling and Logging 12 | - [x] Data Protection 13 | - [x] Communication Security 14 | - [x] System Configuration 15 | - [x] Database Security 16 | - [x] File Management 17 | - [x] Memory Management 18 | - [x] General Coding Practices 19 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 140 3 | target-version = ["py312"] 4 | 5 | [tool.semantic_release] 6 | version_variables = ["common/constants.py:APP_VERSION"] 7 | version_toml = ["pyproject.toml:tool.poetry.version"] 8 | commit_author = "vmaas-bot <40663028+vmaas-bot@users.noreply.github.com>" 9 | 10 | [tool.semantic_release.commit_parser_options] 11 | allowed_tags = [ 12 | "build", 13 | "chore", 14 | "ci", 15 | "docs", 16 | "feat", 17 | "fix", 18 | "perf", 19 | "style", 20 | "refactor", 21 | "test", 22 | ] 23 | minor_tags = ["feat"] 24 | patch_tags = ["build", "chore", "ci", "fix", "perf", "refactor"] 25 | 26 | [tool.isort] 27 | profile = "black" 28 | 29 | [tool.poetry] 30 | name = "vulnerability-engine" 31 | version = "2.56.3" 32 | description = "" 33 | authors = ["RH Insights "] 34 | readme = "README.md" 35 | packages = [] 36 | package-mode = false 37 | 38 | [tool.poetry.dependencies] 39 | python = "~3.12" 40 | python-dateutil = "^2.8.2" 41 | a2wsgi = "^1.10.4" 42 | aiohttp = "^3.10.11" 43 | aiokafka = "^0.12.0" 44 | app-common-python = "^0.2.7" 45 | apscheduler = "^3.10.4" 46 | asyncpg = "^0.29.0" 47 | boto3 = "^1.34.122" 48 | botocore = "^1.34.122" 49 | flask = "^3.0.2" 50 | gitpython = "^3.1.43" 51 | connexion = {version = "~=3.1.0", extras = ["swagger-ui", "flask"]} 52 | gunicorn = "^23.0.0" 53 | peewee = "^3.17.1" 54 | prometheus-client = "^0.20.0" 55 | psycopg2 = "^2.9.9" 56 | psycopg-pool = "^3.2.1" 57 | py-flags = "^1.1.4" 58 | pytz = "^2024.1" 59 | requests = "^2.32.3" 60 | watchtower = "^3.2.0" 61 | psycopg = {version = "^3.1.19", extras = ["pool"]} 62 | unleashclient = "^6.0.1" 63 | uvicorn = "^0.34.0" 64 | PyYAML = "^6.0.1" 65 | prometheus-async = "^25.0.0" 66 | 67 | [tool.poetry.group.dev.dependencies] 68 | pre-commit = "^4.0.0" 69 | pur = "^7.3.1" 70 | pytest = "^8.3.4" 71 | pytest-asyncio = "^0.25.1" 72 | schema = "^0.7.4" 73 | testing-postgresql = "^1.3.0" 74 | python-box = "^7.0.0" 75 | insights-core = "^3.3.2" 76 | pytest-cov = "^6.0.0" 77 | pytest-aiohttp = "^1.0.5" 78 | 79 | [build-system] 80 | requires = ["poetry-core"] 81 | build-backend = "poetry.core.masonry.api" 82 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "github>konflux-ci/mintmaker-presets:group-python-poetry" 5 | ], 6 | "schedule": [ 7 | "on Monday after 3am and before 10am" 8 | ], 9 | "timezone": "Europe/Prague", 10 | "tekton": { 11 | "automerge": true, 12 | "automergeStrategy": "rebase", 13 | "automergeType": "pr", 14 | "enabled": true, 15 | "ignoreTests": true, 16 | "platformAutomerge": true 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rc=0 4 | 5 | # Check for python deps updates. 6 | CYAN='\033[0;36m' 7 | NO_COLOR='\033[0m' 8 | echo -e "${CYAN}Dependency updates checking...${NO_COLOR}" 9 | pip freeze > /tmp/req.txt 10 | pur -r /tmp/req.txt -o /dev/null | grep "Updated" | sed -e "s/Updated/You can update:/g" 11 | echo -e "${CYAN}------------------------------${NO_COLOR}" 12 | 13 | # Run unit tests 14 | echo "Running unit tests:" 15 | pytest -vvv -s --cov-report term --cov --color=yes --durations=1 16 | rc=$(($rc+$?)) 17 | 18 | exit $rc 19 | -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | # vulnerability-engine scripts 2 | 3 | Support scripts for vulnerability-engine project 4 | 5 | ## 3scale-mock 6 | 7 | This is a command line tool for locally testing API calls. To use it, simply prefix the curl command you would use to hit the local system with the 3scale-mock script. For example: 8 | 9 | ```./3scale-mock curl -k -X GET https://localhost:8300/api/v1/cves/CVE-2014-0160/affectedsystems/ | python -m json.tool``` 10 | 11 | If ~/.3scale-mock exists, it will use it's contents for the identity it uses for the various commands. If it does not exist, it uses the identity defined in the 3scale-mock script itself. 12 | 13 | A few values in the identity can be overridden on the command line for easier testing. 3scale-mock help displays args that modify values in the identity. 14 | 15 | ```./3scale-mock --help``` 16 | 17 | 3scale-mock supports 3 commands: 'curl', 'save' and 'print' 18 | 19 | ```./3scale-mock [command] --help``` 20 | 21 | ### save 22 | 23 | Save the identity, modified by any 3scale-mock arguments that do so, to the optionally specified file, or the default ~/.3scale-mock 24 | 25 | The following command will load the default identity from ~/.3scale-mock, if it exists, or use the identity defined in the 3scale-mock script. Then it will set the account number to '00000013' and the username to 'shadowman'. Finally it will save the identity to ~/.3scale-mock. 26 | 27 | ```./3scale-mock -a 00000013 -u shadowman save``` 28 | 29 | ### print 30 | 31 | Print the base64 encoded identity after its been modified by any 3scale-mock arguments that do so. 32 | 33 | ### curl 34 | 35 | This executes a curl command, with the base64 encoded identity set in a header named 'x-rh-identity', followed by all arguments on the 3scale-mock command line that 3scale-mock does not itself recognize. 36 | 37 | Keep in mind, any args that 3scale-mock accepts cannot be passed to the curl command, but any other args not defined by 3scale-mock will get passed to the curl command. 38 | 39 | If you use the -d argument so that 3scale-mock prints out extra debug information, the output of the curl command will fail if piped to `python -m json.tool` 40 | -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/scripts/__init__.py -------------------------------------------------------------------------------- /scripts/check_init_py.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Make sure there is __init__.py in each dir containing python scripts 4 | find . -name '*.py' | xargs dirname | sort | uniq | xargs -I {} bash -c "test -f {}/__init__.py || ( echo {} directory does not have __init__.py! && false )" 5 | -------------------------------------------------------------------------------- /scripts/check_vars.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rc=0 4 | RED='\033[0;31m' 5 | NC='\033[0m' # No Color 6 | 7 | # Are there duplicated configuration variables? 8 | duplicates=$(cat conf/*.env | grep -o "^.*=.*" | sort | uniq -d) 9 | duplicate_cnt=0 10 | for dup in $duplicates 11 | do 12 | echo "$dup" 13 | duplicate_cnt=$(($duplicate_cnt+1)) 14 | done 15 | if [ $duplicate_cnt -gt 0 ]; then 16 | echo -e "${RED} Error: Duplicated variables were found!${NC}" 17 | rc=$(($rc+1)) 18 | else 19 | echo "Variables are unique." 20 | fi 21 | 22 | # Simple test to compare variables used in code and specified in conf/*.env files, allows to set ignored variables 23 | not_configurable="CW_AWS_ACCESS_KEY_ID\|CW_AWS_SECRET_ACCESS_KEY\|SLACK_WEBHOOK\|VULNERABILITY_ENV"\ 24 | "\|MINIMAL_SCHEMA\|HOSTNAME\|DB_UPGRADE_SCRIPTS_DIR\|VE_DB_USER_ADVISOR_LISTENER_PASSWORD"\ 25 | "\|VE_DB_USER_EVALUATOR_PASSWORD\|VE_DB_USER_LISTENER_PASSWORD\|VE_DB_USER_MANAGER_PASSWORD"\ 26 | "\|VE_DB_USER_TASKOMATIC_PASSWORD\|VE_DB_USER_VMAAS_SYNC_PASSWORD\|VE_DB_USER_NOTIFICATOR_PASSWORD" 27 | not_in_code="API_URLS\|CYNDI_MOCK\|PGUSER" 28 | configurable_variables=$(cat conf/*.env | grep -o "^.*=" | sed 's/.$//g' | sort -u | grep -v "$not_in_code") 29 | code_variables=$(find . -name '*.py' -not -path './.venv/*' -not -path './scripts/*' -not -path './tests/*' -not -path './database/upgrade/*' -exec grep -oP "os\.getenv.*?\)|os\.environ\.get.*?\)" {} \; | awk -F"['\"]" '{print $2}' | sort -u | grep -v "$not_configurable") 30 | diff <(echo "$configurable_variables") <(echo "$code_variables") 31 | diff_rc=$? 32 | if [ $diff_rc -gt 0 ]; then 33 | echo -e "${RED} Error: Some variables in code or conf/*.env are missing!${NC}" 34 | else 35 | echo "Variables in code and conf/*.env are OK" 36 | fi 37 | rc=$(($rc+$diff_rc)) 38 | 39 | exit $rc 40 | -------------------------------------------------------------------------------- /scripts/db_upgrade_local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This script runs the dbupgrade script in local development. 4 | # You can specify your OCI runtime as first argument if not docker. 5 | # Second argument can be the name of the specific container 6 | # where must be dbupgrade.sh script. 7 | # Script must be started from root directory, where the docker-compose 8 | # yaml is located. 9 | 10 | if [[ "$1" == "" ]]; then 11 | OCI=docker 12 | else 13 | OCI=$1 14 | fi 15 | 16 | if [[ "$2" == "" ]]; then 17 | CONTAINER=vulnerability-engine-manager 18 | else 19 | CONTAINER=$2 20 | fi 21 | 22 | STARTED=0 23 | 24 | if [[ $($OCI ps -f "name=$CONTAINER" --format '{{.Names}}') != $CONTAINER ]]; then 25 | echo "Container is not running, starting compose" 26 | $OCI-compose up -d --build 27 | STARTED=1 28 | fi 29 | 30 | $OCI exec -e POSTGRESQL_USER=ve_db_admin \ 31 | -e POSTGRESQL_PASSWORD=ve_db_admin_pwd \ 32 | -it $CONTAINER ./dbupgrade.sh 33 | 34 | if [[ "$STARTED" == 1 ]]; then 35 | $OCI-compose down 36 | fi 37 | -------------------------------------------------------------------------------- /scripts/devel-compose: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | exec docker-compose -f docker-compose.yml -f docker-compose.devel.yml "$@" 4 | -------------------------------------------------------------------------------- /scripts/extract_dashboard_configmap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import json 3 | import sys 4 | 5 | import yaml 6 | 7 | with open(sys.argv[1]) as fp: 8 | y = yaml.safe_load(fp) 9 | d = y["data"] 10 | key = list(d.keys())[0] 11 | dashboard = json.loads(d[key]) 12 | print(json.dumps(dashboard, indent=4, sort_keys=True)) 13 | -------------------------------------------------------------------------------- /scripts/gabi/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/scripts/gabi/__init__.py -------------------------------------------------------------------------------- /scripts/gabi/risk_report.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Risk report of affected systems for chosen CVEs.""" 3 | import pprint 4 | import re 5 | import sys 6 | 7 | from utils import GABI_CFG 8 | from utils import query 9 | from utils import set_gabi_token 10 | from utils import set_gabi_url 11 | from utils import validate_date 12 | 13 | PARTITIONS = 256 14 | CVE_REGEX = re.compile(r"^CVE-\d{4}-\d+$") 15 | 16 | 17 | def main(): 18 | example = f"Example run: {sys.argv[0]} 2024-01-01 2024-12-31 CVE-2024-1086,CVE-2024-36971" 19 | if len(sys.argv) != 4: 20 | print(f"Invalid arguments! {example}", file=sys.stderr) 21 | exit(1) 22 | 23 | if not validate_date(sys.argv[1]) or not validate_date(sys.argv[2]): 24 | print(f"Invalid date format! {example}", file=sys.stderr) 25 | sys.exit(1) 26 | 27 | cves = tuple(sys.argv[3].split(",")) 28 | if any(not CVE_REGEX.match(cve) for cve in cves): 29 | print(f"Invalid CVEs: {cves} {example}", file=sys.stderr) 30 | sys.exit(1) 31 | 32 | start_date = sys.argv[1] 33 | end_date = sys.argv[2] 34 | 35 | set_gabi_url() 36 | set_gabi_token() 37 | 38 | print(f"Gabi URL: {GABI_CFG['url']}") 39 | print(f"Start date: {start_date}") 40 | print(f"End date: {end_date}") 41 | print("") 42 | 43 | cve_sys = {} 44 | for idx in range(PARTITIONS): 45 | cnts = query( 46 | f""" 47 | select cve.cve, count(distinct system_id) 48 | from system_vulnerabilities_active_{idx} sv join 49 | cve_metadata cve on sv.cve_id = cve.id 50 | where 51 | cve.cve in {cves} and 52 | sv.first_reported >= '{start_date}' and 53 | sv.first_reported <= '{end_date}' 54 | group by cve.cve; 55 | """ 56 | )[1:] 57 | 58 | for cve, cnt_sys in cnts: 59 | if cve not in cve_sys: 60 | cve_sys[cve] = 0 61 | cve_sys[cve] += int(cnt_sys) 62 | 63 | if ((idx + 1) % 10) == 0: 64 | print(f"{idx + 1}/{PARTITIONS} partitions done...") 65 | print("All partitions done.") 66 | print("") 67 | 68 | print("Red Hat Risk Report:") 69 | pprint.pp(cve_sys, sort_dicts=True) 70 | 71 | 72 | if __name__ == "__main__": 73 | main() 74 | -------------------------------------------------------------------------------- /scripts/gabi/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import subprocess 4 | import sys 5 | from datetime import date 6 | 7 | import requests 8 | import yaml 9 | 10 | KUBE_CONFIG_FILE = os.path.expanduser("~/.kube/config") 11 | 12 | GABI_CFG = {"url": "", "headers": {}} 13 | 14 | 15 | def run_oc(args): 16 | cmd = ["oc"] 17 | cmd.extend(args) 18 | cmd.extend(["-o", "yaml"]) 19 | result = subprocess.run(cmd, capture_output=True) 20 | return yaml.safe_load(result.stdout) 21 | 22 | 23 | def set_gabi_url(): 24 | routes = run_oc(["get", "route"]) 25 | for route in routes["items"]: 26 | if route["metadata"]["name"].startswith("gabi-"): 27 | GABI_CFG["url"] = f"https://{route['spec']['host']}/query" 28 | return 29 | print("Gabi URL not found! Are you logged in? (oc login)", file=sys.stderr) 30 | sys.exit(2) 31 | 32 | 33 | def set_gabi_token(): 34 | with open(KUBE_CONFIG_FILE, "r") as kube_config: 35 | kube_config = yaml.safe_load(kube_config.read()) 36 | context_users = {} 37 | for context in kube_config["contexts"]: 38 | context_users[context["name"]] = context["context"]["user"] 39 | current_context_name = kube_config["current-context"] 40 | current_context_user = context_users[current_context_name] 41 | for user in kube_config["users"]: 42 | if user["name"] == current_context_user: 43 | GABI_CFG["headers"]["Authorization"] = f"Bearer {user['user']['token']}" 44 | return 45 | print("Gabi token not found! Are you logged in? (oc login)", file=sys.stderr) 46 | sys.exit(2) 47 | 48 | 49 | def query(query): 50 | tries = 0 51 | data = {"query": query} 52 | while tries <= 5: 53 | r = requests.post(GABI_CFG["url"], headers=GABI_CFG["headers"], json=data) 54 | if r.status_code == 200: 55 | return r.json()["result"] 56 | else: 57 | print(f"Query failed: {query}, HTTP code: {r.status_code}", file=sys.stderr) 58 | tries += 1 59 | sys.exit(3) 60 | 61 | 62 | def validate_date(date_text): 63 | try: 64 | date.fromisoformat(date_text) 65 | except ValueError: 66 | return False 67 | return True 68 | -------------------------------------------------------------------------------- /scripts/openshift-common.sh: -------------------------------------------------------------------------------- 1 | current_user=$(oc whoami 2> /dev/null) 2 | 3 | if [ "$current_user" == "" ]; then 4 | echo "Please login to OpenShift cluster and select project." 5 | exit 1 6 | fi 7 | 8 | filter="$2" 9 | if [ "$filter" != "" ]; then 10 | dcs=$(oc get dc 2> /dev/null | tail -n +2 | awk '{print $1}' | grep "$filter") 11 | else 12 | dcs=$(oc get dc 2> /dev/null | tail -n +2 | awk '{print $1}') 13 | fi 14 | 15 | echo "$(oc project)" 16 | echo "Logged in as user \"$current_user\"." 17 | echo "" 18 | if [ "$dcs" == "" ]; then 19 | echo "No deployment configs found in project. Exiting." 20 | exit 2 21 | fi 22 | 23 | warning="$warning" 24 | echo "$warning" 25 | 26 | for dc in $dcs; do 27 | echo "$dc" 28 | done 29 | echo "" 30 | echo -n "Continue? [y/N] " 31 | 32 | read choice 33 | choice=$(echo $choice | awk '{print toupper($0)}') 34 | 35 | if [ "$choice" != "Y" ]; then 36 | echo "Exiting." 37 | exit 3 38 | fi 39 | 40 | action="$3" 41 | 42 | for dc in $dcs; do 43 | containers=$(oc get dc/$dc -o json | python -c "import sys,json;obj=json.load(sys.stdin);containers=obj['spec']['template']['spec']['containers'];print(' '.join([c['name'] for c in containers]))" | wc -w) 44 | containers=$((containers-1)) 45 | for i in $(seq 0 $containers); do 46 | if [ "$action" == "remove-resources" ]; then 47 | oc patch dc/$dc --type json -p "[{\"op\": \"remove\", \"path\": \"/spec/template/spec/containers/$i/resources\"}]" 48 | elif [ "$action" == "devel-container" ]; then 49 | oc patch dc/$dc --type json -p "[{\"op\": \"add\", \"path\": \"/spec/template/spec/containers/$i/command\", \"value\": [\"sleep\", \"infinity\"]}]" 50 | oc patch dc/$dc --type json -p "[{\"op\": \"add\", \"path\": \"/spec/template/spec/containers/$i/securityContext\", \"value\": {\"runAsUser\": '0'}}]" 51 | fi 52 | done 53 | done 54 | 55 | echo "" 56 | echo "Done." 57 | -------------------------------------------------------------------------------- /scripts/openshift-devel-container.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | warning="WARNING: This script will OVERRIDE container entrypoint of following deployment configs:" 4 | filter="$1" 5 | 6 | . $(dirname $0)/openshift-common.sh "$warning" "$filter" "devel-container" 7 | -------------------------------------------------------------------------------- /scripts/openshift-remove-required-resources.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | warning="WARNING: This script will REMOVE resource requests/limits from following deployment configs:" 4 | filter="$1" 5 | 6 | . $(dirname $0)/openshift-common.sh "$warning" "$filter" "remove-resources" 7 | -------------------------------------------------------------------------------- /scripts/openshift-rsync.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | case $1 in 4 | evaluator|listener|manager|vmaas_sync) 5 | dir=$1 6 | shift 7 | pod=$(oc get pods | grep $dir | grep Running | awk '{print $1}') 8 | oc rsync $@ common/ $pod:$dir/common/ 9 | oc rsync $@ $dir/ $pod:$dir/$dir/ 10 | ;; 11 | *) 12 | echo "Usage: $0 evaluator|listener|manager|vmaas_sync [-h|-w|-q|...]" 13 | ;; 14 | esac 15 | -------------------------------------------------------------------------------- /scripts/poetry-lock.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Script to lock Poetry dependencies in container and copy generated lock file back 4 | 5 | workdir="/tmp/poetry-locker/" 6 | dockerfile="Dockerfile-poetry" 7 | 8 | for runtime in podman docker; do 9 | cmd=$(command -v $runtime) 10 | if [[ "$cmd" != "" ]] && $cmd ps &> /dev/null; then 11 | break 12 | else 13 | echo "Unable to use $runtime" 14 | cmd="" 15 | fi 16 | done 17 | 18 | if [[ "$cmd" != "" ]]; then 19 | echo "Using: $cmd" 20 | else 21 | echo "No container runtime found!" 22 | exit 1 23 | fi 24 | 25 | mkdir -p $workdir 26 | cat < $workdir$dockerfile 27 | FROM registry.access.redhat.com/ubi8/ubi-minimal 28 | RUN microdnf install --setopt=install_weak_deps=0 --setopt=tsflags=nodocs \ 29 | python312 python3.12-pip python3.12-devel libpq-devel gcc git && \ 30 | microdnf clean all 31 | RUN pip3 install --upgrade pip && pip3 install --upgrade poetry~=2.0 32 | EOF 33 | 34 | current_dir=$(pwd) 35 | cd $workdir 36 | $cmd build -t poetry-locker -f $dockerfile . 37 | cd $current_dir 38 | $cmd run --rm -d --name poetry-locker-container poetry-locker sleep infinity 39 | 40 | $cmd exec poetry-locker-container bash -c "mkdir -p /tmp" 41 | $cmd cp "pyproject.toml" poetry-locker-container:"/tmp/" 42 | $cmd exec poetry-locker-container bash -c "cd /tmp && poetry lock" 43 | $cmd cp poetry-locker-container:"/tmp/poetry.lock" "." 44 | 45 | $cmd kill poetry-locker-container 46 | -------------------------------------------------------------------------------- /scripts/run_integration_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | USAGE="Usage: 4 | $0 VMAAS_PATH [SETTINGS_YAML] 5 | 6 | Required parameters: 7 | VMAAS_PATH - path to vmaas directory 8 | 9 | Optional parameters: 10 | SETTINGS_YAML - path to directory with test settings (default: conf/settings.local.yaml) 11 | " 12 | 13 | [[ "$#" -gt 2 || -z $1 ]] && { echo "$USAGE" >&2; exit 1; } 14 | 15 | wait_for_build() { 16 | count=60 17 | while [ "$count" -gt 0 ]; do 18 | docker-compose ps | grep "$1 " | grep Up 19 | [ "$?" -eq 0 ] && break 20 | echo "$1 is not Up, waiting 5s" 21 | ((count--)) 22 | sleep 5 23 | done 24 | [ "$count" -gt 0 ] || exit 1 25 | } 26 | 27 | sync_vmaas() { 28 | count=5 29 | while [ "$count" -gt 0 ]; do 30 | curl -X PUT http://localhost:8081/api/v1/sync/cvemap -H "Authorization: token token" 31 | [ "$?" -eq 0 ] && break 32 | ((count--)) 33 | sleep 5 34 | done 35 | [ "$count" -gt 0 ] || exit 1 36 | } 37 | 38 | cd $1 39 | docker-compose up -d 40 | wait_for_build "vmaas-webapp" 41 | sync_vmaas 42 | cd - 43 | 44 | [ "$(docker-compose ps | wc -l)" -gt 2 ] && docker-compose down 45 | cp conf/common.env conf/common.env.bak 46 | sed -i 's|VMAAS_HOST=http://vmaas_webapp:8000|VMAAS_HOST=https://webapp-vmaas-stable.1b13.insights.openshiftapps.com|' conf/common.env 47 | docker-compose up -d --build 48 | wait_for_build "vulnerability-engine-manager" 49 | 50 | docker pull quay.io/cloudservices/iqe-tests:latest 51 | 52 | if [ -n "$2" ]; then 53 | settings="$2" 54 | else 55 | settings="conf/settings.local.yaml" 56 | fi 57 | 58 | docker run -it --name iqe --rm --network host \ 59 | -v $(readlink -f $settings):/iqe_settings/settings.local.yaml:z \ 60 | -e IQE_TESTS_LOCAL_CONF_PATH=/iqe_settings \ 61 | -e ENV_FOR_DYNACONF=dev \ 62 | quay.io/cloudservices/iqe-tests:latest \ 63 | iqe tests plugin vulnerability -k 'not basic_auth and not csaw' -m api -v --local 64 | 65 | mv conf/common.env.bak conf/common.env 66 | -------------------------------------------------------------------------------- /scripts/schemaspy.properties: -------------------------------------------------------------------------------- 1 | # type of database. Run with -dbhelp for details 2 | schemaspy.t=pgsql11 3 | # database properties: host, port number, name user, password 4 | schemaspy.host=vulnerability_database 5 | schemaspy.port=5432 6 | schemaspy.db=vulnerability 7 | schemaspy.u=ve_db_admin 8 | schemaspy.p=ve_db_admin_pwd 9 | -------------------------------------------------------------------------------- /scripts/validate_dashboards.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import json 3 | import os 4 | import sys 5 | 6 | import yaml 7 | 8 | 9 | class Error: 10 | def __init__(self, msg, f): 11 | self.msg = msg 12 | self.f = f 13 | 14 | def __repr__(self): 15 | return "ERROR %s: %s" % (self.f, self.msg) 16 | 17 | 18 | def validate(f): 19 | print("Validating %s" % f) 20 | if not f.endswith(".yaml") and not f.endswith(".yml"): 21 | yield Error("Bad file name", f) 22 | return 23 | 24 | try: 25 | with open(f) as fp: 26 | y = yaml.safe_load(fp) 27 | 28 | if not y["metadata"]["name"]: 29 | yield Error("Resource name not found", f) 30 | 31 | d = y["data"] 32 | 33 | if len(d) != 1: 34 | yield Error("Invalid number of keys in ConfigMap", f) 35 | 36 | key = list(d.keys())[0] 37 | 38 | if not key.endswith(".json"): 39 | yield Error("Key does not end with .json: %s" % key, f) 40 | 41 | json.loads(d[key]) 42 | except Exception as e: 43 | yield Error(e.msg, f) 44 | 45 | 46 | seen_error = False 47 | 48 | for f in os.listdir(sys.argv[1]): 49 | for err in validate(sys.argv[1] + f): 50 | seen_error = True 51 | print(err) 52 | 53 | if seen_error: 54 | sys.exit(1) 55 | -------------------------------------------------------------------------------- /security-scan.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ########################### 4 | # This script sources the security-scan.sh script from 5 | # https://github.com/RedHatInsights/platform-security-gh-workflow 6 | # This script, in combination with Jenkins, scans a repo's Dockerfile 7 | # to provide a Software Bill of Materials (SBOM) and scan security vulnerabilities. 8 | ########################### 9 | 10 | set -exv 11 | 12 | IMAGE_NAME="vulnerability-engine" 13 | DOCKERFILE_LOCATION="." 14 | 15 | # (Severity Options: negligible, low, medium, high, critical) 16 | FAIL_ON_SEVERITY="high" 17 | 18 | # Build on "podman" or "docker" 19 | PODMAN_OR_DOCKER="podman" 20 | 21 | curl -sSL https://raw.githubusercontent.com/RedHatInsights/platform-security-gh-workflow/master/jenkins/security-scan.sh | \ 22 | sh -s "${IMAGE_NAME}" "${DOCKERFILE_LOCATION}" "${FAIL_ON_SEVERITY}" "${PODMAN_OR_DOCKER}" 23 | -------------------------------------------------------------------------------- /taskomatic/README.md: -------------------------------------------------------------------------------- 1 | # Vulnerability Engine Taskomatic 2 | 3 | ## Overview 4 | Vulnerability Engine Taskomatic is a service for running periodic tasks in given intervals. It uses Python apscheduler as the backend for scheduling and all executed actions by the scheduler are blocking, which means if a job is taking too long and other run of the job should fire it will be skipped until the job is completed so no more than one instance of a job may run at given time. 5 | 6 | ### Adding new job 7 | To add a new job periodically executed by Taskomatic, simply add a Python file into the `jobs` directory and ensure the Python file contains the `run()` function which is the entrypoint to the job and function which will be executed by Taskomatic. Configuration of the job is done through environment variables as there are no parameters passed into the `run` function. 8 | 9 | ### Testing or manually running the job 10 | Jobs can be executed manually using Python, e.g. 11 | `python3 -m jobs.stale_systems` 12 | If you are using devel compose, which maps your git directory into the running container you need to prepend `taskomatic` to the path of the import, e.g. 13 | `python3 -m taskomatic.jobs.stale_systems` 14 | 15 | If you are using logging you need to do `init_logging()` first otherwise logs are not printed. 16 | Example of manually running the job in devel container with enabled logging: 17 | `python3 -c "from common.logging import init_logging; init_logging(); import taskomatic.jobs.stale_systems as ss; ss.run()"` 18 | 19 | ### Configuration 20 | As stated above configuration of the job has to be through environmental variables. 21 | To enable the newly added job you need to edit the `JOBS` variable by adding configuration of the job. 22 | Example configuration for a dummy job located in `jobs/foo.py` run each 3 minutes would be: 23 | `JOBS=foo:3` 24 | If multiple jobs are to be run, simply separate them with comma character, e.g. 25 | `JOBS=foo:3,bar:7,baz:15` 26 | It's also possible in the OpenShift environment to have multiple Taskomatic pods running, each with different jobs enabled. However if two pods are given the same job to execute, there's no mechanism in place to ensure synchronization between the pods executing the job in order to prevent simultaneous execution. 27 | -------------------------------------------------------------------------------- /taskomatic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/taskomatic/__init__.py -------------------------------------------------------------------------------- /taskomatic/jobs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/taskomatic/jobs/__init__.py -------------------------------------------------------------------------------- /taskomatic/jobs/common.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common code shared between taskomatic modules 3 | """ 4 | 5 | import psycopg2 6 | 7 | from common.config import Config 8 | 9 | CFG = Config() 10 | 11 | 12 | def get_conn(): 13 | """Returns DB connection""" 14 | return psycopg2.connect( 15 | dbname=CFG.db_name, 16 | user=CFG.db_user, 17 | password=CFG.db_pass, 18 | host=CFG.db_host, 19 | port=CFG.db_port, 20 | sslmode=CFG.db_ssl_mode, 21 | sslrootcert=CFG.db_ssl_root_cert_path, 22 | ) 23 | -------------------------------------------------------------------------------- /taskomatic/jobs/delete_notifications.py: -------------------------------------------------------------------------------- 1 | """ 2 | Periodic cleanup of sent notifications 3 | """ 4 | 5 | from datetime import datetime 6 | from datetime import timedelta 7 | from datetime import timezone 8 | 9 | from common.config import Config 10 | from common.logging import get_logger 11 | from common.logging import init_logging 12 | from taskomatic.jobs.common import get_conn 13 | 14 | LOGGER = get_logger(__name__) 15 | 16 | CFG = Config() 17 | 18 | 19 | def run(): 20 | """Job entrypoint""" 21 | LOGGER.info("Started delete_notifications job") 22 | current_date = datetime.now(timezone.utc).date() 23 | cve_deadline = current_date - timedelta(days=CFG.cve_freshness_threshold) 24 | 25 | conn = get_conn() 26 | cur = conn.cursor() 27 | 28 | cur.execute( 29 | """DELETE FROM notified_accounts AS na 30 | WHERE na.cve_id IN (SELECT id 31 | FROM cve_metadata 32 | WHERE public_date < %s) 33 | AND na.notif_type != 'any-cve-known-exploit' 34 | RETURNING na.cve_id 35 | """, 36 | (cve_deadline,), 37 | ) 38 | 39 | del_notifs = cur.rowcount 40 | cve_ids = [del_row[0] for del_row in cur] 41 | cves = set() 42 | if cve_ids: 43 | cur.execute("""SELECT cve FROM cve_metadata WHERE id IN %s""", (tuple(cve_ids),)) 44 | cves = {cve[0] for cve in cur} 45 | conn.commit() 46 | cur.close() 47 | conn.close() 48 | 49 | LOGGER.info("Deleted %d sent notifications for CVEs: %s", del_notifs, cves) 50 | 51 | 52 | if __name__ == "__main__": 53 | init_logging() 54 | run() 55 | -------------------------------------------------------------------------------- /taskomatic/jobs/delete_systems.py: -------------------------------------------------------------------------------- 1 | """ 2 | Periodic cleanup of deleted systems 3 | """ 4 | 5 | from datetime import datetime 6 | from datetime import timedelta 7 | 8 | import pytz 9 | 10 | from common.config import Config 11 | from common.logging import get_logger 12 | from common.logging import init_logging 13 | from taskomatic.jobs.common import get_conn 14 | 15 | LOGGER = get_logger(__name__) 16 | 17 | CFG = Config() 18 | 19 | 20 | def run(): 21 | """Application entrypoint""" 22 | LOGGER.info("Started delete_systems job.") 23 | 24 | conn = get_conn() 25 | cur = conn.cursor() 26 | deleted = 0 27 | 28 | while True: 29 | curr_time = datetime.now(tz=pytz.utc) 30 | cur.execute( 31 | """SELECT inventory_id from system_platform sp 32 | WHERE when_deleted IS NOT NULL 33 | AND when_deleted < %s 34 | LIMIT 1 FOR UPDATE OF sp""", 35 | (curr_time - timedelta(hours=CFG.system_deletion_threshold),), 36 | ) 37 | inventory_id = cur.fetchone() 38 | if not inventory_id: 39 | break 40 | cur.execute("""SELECT deleted_inventory_id FROM delete_system(%s)""", (inventory_id[0],)) 41 | success = cur.fetchone() 42 | if success: 43 | deleted += 1 44 | else: 45 | LOGGER.error("Unable to delete inventory_id: %s", inventory_id) 46 | conn.commit() 47 | cur.close() 48 | conn.close() 49 | 50 | LOGGER.info("Cleared %s deleted systems.", deleted) 51 | 52 | LOGGER.info("Finished delete_systems job.") 53 | 54 | 55 | if __name__ == "__main__": 56 | init_logging() 57 | run() 58 | -------------------------------------------------------------------------------- /taskomatic/jobs/stale_systems.py: -------------------------------------------------------------------------------- 1 | """ 2 | Periodic discovery of stale systems 3 | """ 4 | 5 | from common.logging import get_logger 6 | from common.logging import init_logging 7 | from taskomatic.jobs.common import get_conn 8 | 9 | LOGGER = get_logger(__name__) 10 | 11 | 12 | def run(): 13 | """Application entrypoint""" 14 | LOGGER.info("Started stale_systems job.") 15 | 16 | conn = get_conn() 17 | cur = conn.cursor() 18 | 19 | query = """ 20 | UPDATE system_platform 21 | SET stale = TRUE 22 | FROM ( 23 | SELECT id 24 | FROM system_platform 25 | WHERE when_deleted IS NULL 26 | AND culled_timestamp IS NOT NULL 27 | AND stale = FALSE 28 | AND now() > culled_timestamp 29 | ORDER BY id 30 | ) AS subquery 31 | WHERE system_platform.id = subquery.id 32 | """ 33 | cur.execute(query) 34 | LOGGER.info("Marked %s systems as stale", cur.rowcount) 35 | conn.commit() 36 | cur.close() 37 | 38 | cur = conn.cursor() 39 | query = """ 40 | UPDATE system_platform 41 | SET when_deleted = now() 42 | FROM ( 43 | SELECT id 44 | FROM system_platform 45 | WHERE when_deleted IS NULL 46 | AND culled_timestamp IS NOT NULL 47 | AND stale = TRUE 48 | AND now() > culled_timestamp + INTERVAL '1 day' 49 | ORDER BY id 50 | ) AS subquery 51 | WHERE system_platform.id = subquery.id 52 | """ 53 | cur.execute(query) 54 | LOGGER.info("Marked %s systems which should be culled for deletion.", cur.rowcount) 55 | conn.commit() 56 | cur.close() 57 | 58 | conn.close() 59 | LOGGER.info("Finished stale_systems job.") 60 | 61 | 62 | if __name__ == "__main__": 63 | init_logging() 64 | run() 65 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/__init__.py -------------------------------------------------------------------------------- /tests/common_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/common_tests/__init__.py -------------------------------------------------------------------------------- /tests/common_tests/conftest.py: -------------------------------------------------------------------------------- 1 | # pylint:disable=missing-docstring 2 | import psycopg2 3 | import pytest 4 | 5 | 6 | @pytest.fixture(scope="module") 7 | def pg_db_conn(pg_db_mod): 8 | """Returns connection to PostgreSQL database.""" 9 | conn = psycopg2.connect(**pg_db_mod.dsn()) 10 | yield conn 11 | conn.close() 12 | -------------------------------------------------------------------------------- /tests/common_tests/test_bounded_executor.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=no-self-use 2 | """ 3 | Test suite for bounded executor. 4 | """ 5 | import logging as log 6 | 7 | from common.bounded_executor import BoundedExecutor 8 | from common.logging import get_logger 9 | 10 | 11 | class TestBoundedExecutor: 12 | """Test suite""" 13 | 14 | @staticmethod 15 | def _executor_func_mock(number): 16 | logger = get_logger(__name__) 17 | logger.info(number) 18 | return number 19 | 20 | def test_executor_single(self): 21 | """Test executor as single task""" 22 | executor = BoundedExecutor(1) 23 | number = 1 24 | 25 | future = executor.submit(TestBoundedExecutor._executor_func_mock, number) 26 | res = future.result() 27 | 28 | assert res == number 29 | executor.shutdown() 30 | 31 | def test_executor_blocking(self, caplog): 32 | """Test executor if is blocking""" 33 | executor = BoundedExecutor(1) 34 | number = 1 35 | with caplog.at_level(log.INFO): 36 | future1 = executor.submit(TestBoundedExecutor._executor_func_mock, number) 37 | number = 2 38 | future2 = executor.submit(TestBoundedExecutor._executor_func_mock, number) 39 | res1 = future1.result() 40 | res2 = future2.result() 41 | 42 | assert res1 == 1 43 | assert res2 == 2 44 | 45 | assert "1" in caplog.messages[0] 46 | assert "2" in caplog.messages[1] 47 | -------------------------------------------------------------------------------- /tests/common_tests/test_logging.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # pylint: disable=no-self-use 3 | """ 4 | Test logging. 5 | """ 6 | import logging 7 | 8 | import pytest 9 | 10 | from common.config import Config 11 | from common.logging import setup_cw_logging 12 | 13 | 14 | class TestLogging: 15 | """TestLogging""" 16 | 17 | @pytest.fixture 18 | def cfg(self): 19 | """Return Config object.""" 20 | return Config() 21 | 22 | def test_cw_disabled(self, caplog): 23 | """tests disabled cloudwatch logging""" 24 | with caplog.at_level(logging.INFO): 25 | logger = logging.getLogger("test_cw") 26 | setup_cw_logging(logger) 27 | assert caplog.records[0].msg == "CloudWatch logging disabled" 28 | caplog.clear() 29 | 30 | def test_no_cw(self, caplog, cfg, monkeypatch): 31 | """test_no_cw""" 32 | monkeypatch.setattr(cfg, "cw_aws_access_key_id", None) 33 | monkeypatch.setattr(cfg, "cw_aws_secret_access_key", None) 34 | monkeypatch.setattr(cfg, "cw_enabled", "TRUE") 35 | with caplog.at_level(logging.INFO): 36 | logger = logging.getLogger("test_cw") 37 | setup_cw_logging(logger) 38 | assert caplog.records[0].msg == "CloudWatch logging disabled due to missing access key" 39 | caplog.clear() 40 | 41 | def test_cw_err(self, caplog, cfg, monkeypatch): 42 | """test_cw_err""" 43 | monkeypatch.setattr(cfg, "cw_aws_access_key_id", "cw-aws-access-key-id") 44 | monkeypatch.setattr(cfg, "cw_aws_secret_access_key", "cw-aws-secret-access-key") 45 | monkeypatch.setattr(cfg, "cw_enabled", "TRUE") 46 | with caplog.at_level(logging.INFO): 47 | logger = logging.getLogger("test_cw") 48 | setup_cw_logging(logger) 49 | assert caplog.records[0].msg.startswith("Unable to enable CloudWatch logging:") 50 | caplog.clear() 51 | -------------------------------------------------------------------------------- /tests/common_tests/test_status_app.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=no-self-use 2 | """ 3 | Test suite for status app. 4 | """ 5 | import asyncio 6 | 7 | from common import logging 8 | from common import status_app 9 | 10 | 11 | class TestStatusApp: 12 | """Test suite""" 13 | 14 | def test_status_app_runner(self): 15 | """Test status app runner""" 16 | logger = logging.get_logger(__name__) 17 | app = status_app.create_status_app(logger) 18 | loop = asyncio.new_event_loop() 19 | runner, site = status_app.create_status_runner(app, "20000", logger, loop) 20 | assert runner is not None 21 | assert site is not None 22 | -------------------------------------------------------------------------------- /tests/data/truncate_dev_data.sql: -------------------------------------------------------------------------------- 1 | TRUNCATE TABLE repo, system_repo, timestamp_kv, cve_account_data, cve_rule_mapping, playbook, inventory.hosts_v1_1, notified_accounts, package_name, cpe, vulnerable_package, vulnerable_package_cve, system_vulnerable_package, system_cve_data; 2 | DELETE FROM system_vulnerabilities WHERE rh_account_id in (0, 1); -- truncating can't be restricted to selected partitions and truncating all takes too long 3 | DELETE FROM insights_rule; 4 | DELETE FROM system_platform; 5 | DELETE FROM cve_metadata; 6 | DELETE FROM rh_account; 7 | -------------------------------------------------------------------------------- /tests/listener_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/listener_tests/__init__.py -------------------------------------------------------------------------------- /tests/listener_tests/conftest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # pylint:disable=missing-docstring,redefined-outer-name 3 | import asyncio 4 | 5 | import psycopg2 6 | import pytest 7 | 8 | 9 | @pytest.fixture(scope="class") 10 | def event_loop(): 11 | return asyncio.new_event_loop() 12 | 13 | 14 | @pytest.fixture(scope="module") 15 | def pg_db_conn(pg_db_mod): 16 | """Returns connection to PostgreSQL database.""" 17 | conn = psycopg2.connect(**pg_db_mod.dsn()) 18 | yield conn 19 | conn.close() 20 | -------------------------------------------------------------------------------- /tests/listener_tests/test_common.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=no-self-use 2 | """ 3 | Test suite for common. 4 | """ 5 | 6 | from listener import common 7 | 8 | 9 | class TestCommon: 10 | """Test suite""" 11 | 12 | def test_reporter_not_allowed(self): 13 | """Test validation of kafka message with not allowed reporter""" 14 | msg_dict = {"host": {"reporter": "invalid"}} 15 | res = common.reporter_allowed(msg_dict) 16 | assert res is False 17 | 18 | msg_dict = {"input": {"host": {"reporter": "invalid"}}} 19 | res = common.reporter_allowed(msg_dict) 20 | assert res is False 21 | -------------------------------------------------------------------------------- /tests/manager_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/manager_tests/__init__.py -------------------------------------------------------------------------------- /tests/manager_tests/test_apistatus_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for api status handler 3 | """ 4 | 5 | # pylint: disable=missing-docstring,too-many-public-methods,invalid-name 6 | from .vuln_testcase import FlaskTestCase 7 | 8 | 9 | class TestApiStatusHandler(FlaskTestCase): 10 | def test_api_status(self): 11 | """Check if status is ok""" 12 | self.vfetch("/apistatus").check_response() 13 | -------------------------------------------------------------------------------- /tests/manager_tests/test_dashbar_handler.py: -------------------------------------------------------------------------------- 1 | """ 2 | Dashbar tests 3 | """ 4 | 5 | import pytest 6 | 7 | from .vuln_testcase import FlaskTestCase 8 | 9 | 10 | @pytest.mark.feature_flag("vulnerability.cves_without_errata") 11 | class TestDashbarHandler(FlaskTestCase): 12 | """Dashbar tests suite""" 13 | 14 | def test_dashbar(self): 15 | """Test dashbar""" 16 | res = self.vfetch("dashbar").check_response() 17 | assert res.body.exploitable_cves == 2 18 | assert res.body.critical_cves == 5 19 | assert res.body.cves_with_rule == 8 20 | assert res.body.important_cves == 6 21 | 22 | def test_dashbar_tags(self): 23 | """Test dashbar with tags filter""" 24 | res = self.vfetch("dashbar?tags=vulnerability/usage=NAS").check_response() 25 | assert res.body.exploitable_cves == 2 26 | assert res.body.critical_cves == 1 27 | assert res.body.cves_with_rule == 7 28 | assert res.body.important_cves == 6 29 | 30 | def test_dashbar_sap_system(self): 31 | """Test dashbar with sap systems""" 32 | res = self.vfetch("dashbar?sap_system=true").check_response() 33 | assert res.body.exploitable_cves == 2 34 | assert res.body.critical_cves == 5 35 | assert res.body.cves_with_rule == 8 36 | assert res.body.important_cves == 6 37 | 38 | def test_dashbar_sap_ids(self): 39 | """Test dashbar with sap ids""" 40 | res = self.vfetch("dashbar?sap_sids=xxee").check_response() 41 | assert res.body.exploitable_cves == 0 42 | assert res.body.critical_cves == 0 43 | assert res.body.cves_with_rule == 0 44 | assert res.body.important_cves == 0 45 | -------------------------------------------------------------------------------- /tests/manager_tests/test_models.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # pylint: disable=missing-docstring,invalid-name,protected-access,no-member 3 | """ 4 | Tests for database models. 5 | """ 6 | from common.peewee_model import BaseModel 7 | 8 | 9 | def test_used_db(peewee_db): 10 | assert peewee_db is BaseModel._meta.database 11 | -------------------------------------------------------------------------------- /tests/manager_tests/test_readonly.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # pylint: disable=missing-docstring,too-many-public-methods,invalid-name 3 | """ 4 | Manager read only mode tests. 5 | """ 6 | from common.config import Config 7 | 8 | from .vuln_testcase import FlaskTestCase 9 | 10 | CSV_EXPORT_ENDPOINTS = ( 11 | "cves/CVE-2016-1/affected_systems", 12 | "systems", 13 | "systems/00000000-0000-0000-0000-000000000004/cves", 14 | "vulnerabilities/cves", 15 | ) 16 | CFG = Config() 17 | 18 | 19 | class TestReadOnly(FlaskTestCase): 20 | def test_status(self, monkeypatch): 21 | monkeypatch.setattr(CFG, "read_only_mode", 1) 22 | self.vfetch( 23 | "status", data='{"inventory_id": "00000000-0000-0000-0000-000000000005", "cve":"CVE-2014-1", "status_id":2}', method="PATCH" 24 | ).check_response(status_code=503) 25 | 26 | def test_opt_out(self, monkeypatch): 27 | monkeypatch.setattr(CFG, "read_only_mode", 1) 28 | response = self.vfetch( 29 | "systems/opt_out", json={"inventory_id": "00000000-0000-0000-0000-000000000004", "opt_out": True}, method="PATCH" 30 | ) 31 | assert response.status_code == 503 32 | -------------------------------------------------------------------------------- /tests/notificator_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/notificator_tests/__init__.py -------------------------------------------------------------------------------- /tests/notificator_tests/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Fixtures for notificator tests 3 | """ 4 | 5 | import asyncpg 6 | import pytest 7 | import pytest_asyncio 8 | from aiohttp import test_utils 9 | from aiohttp import web 10 | 11 | from notificator.app import CacheHandler 12 | from notificator.app import create_notificator_app 13 | 14 | 15 | @pytest_asyncio.fixture(loop_scope="function", scope="function") 16 | async def asyncpg_pool(event_loop, pg_db_func): 17 | """Creates asyncpg pool fixture""" 18 | dsn = pg_db_func.dsn() 19 | dsn_str = f"postgres://{dsn['user']}@{dsn['host']}:{dsn['port']}/{dsn['database']}" 20 | 21 | pool = await asyncpg.create_pool( 22 | dsn=dsn_str, 23 | loop=event_loop, 24 | ) 25 | yield pool 26 | await pool.close() 27 | 28 | 29 | @pytest.fixture 30 | async def notificator_server(): 31 | """Creates notificator app server""" 32 | app = create_notificator_app(None, [(r"/api/v1/cache", CacheHandler)]) 33 | runner = web.AppRunner(app) 34 | await runner.setup() 35 | port = test_utils.unused_port() 36 | site = web.TCPSite(runner, "0.0.0.0", port) 37 | await site.start() 38 | 39 | yield app 40 | 41 | await runner.cleanup() 42 | 43 | 44 | @pytest.fixture() 45 | async def http_client(notificator_server, aiohttp_client): 46 | """Http client fixture""" 47 | client = await aiohttp_client(notificator_server) 48 | yield client 49 | client.close() 50 | -------------------------------------------------------------------------------- /tests/notificator_tests/test_app.py: -------------------------------------------------------------------------------- 1 | """ 2 | Notificator handlers unit tests 3 | """ 4 | 5 | import asyncio 6 | 7 | from notificator.notificator import NOTIFICATOR_PERIOD 8 | from notificator.notificator import NotificationType 9 | from notificator.notificator import NotificatorConditions 10 | from notificator.notificator_queue import NotificatorQueue 11 | 12 | 13 | class TestNotificatorHandlers: 14 | """ 15 | Notificator handlers tests suite 16 | """ 17 | 18 | async def _init_notificator(self, pool): 19 | loop = asyncio.new_event_loop() 20 | conditions = NotificatorConditions(pool) 21 | await conditions.init() 22 | queue = NotificatorQueue(NOTIFICATOR_PERIOD, pool, conditions, loop) 23 | await queue.init() 24 | return conditions, queue 25 | 26 | async def test_cache_handler(self, asyncpg_pool, http_client): 27 | """Test cache refresh handler""" 28 | _, queue = await self._init_notificator(asyncpg_pool) 29 | 30 | # these notified accounts are not in db 31 | queue.notified_accounts[(1337, 1)] = (NotificationType.CVSS_NOTIFICATION, NotificationType.EXPLOITS_NOTIFICATION) 32 | queue.notified_accounts[(333, 1)] = NotificationType.CVSS_NOTIFICATION 33 | 34 | resp = await http_client.put("/api/v1/cache", data="") 35 | assert resp.status == 200 36 | 37 | assert (1337, 1) not in queue.notified_accounts 38 | assert (333, 1) not in queue.notified_accounts 39 | NotificatorQueue.delete() 40 | -------------------------------------------------------------------------------- /tests/notificator_tests/test_notificator_conditions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Notificator conditions unit tests 3 | """ 4 | 5 | import pytest 6 | 7 | from common.peewee_model import NotificationType 8 | from notificator.notificator import NotificatorConditions 9 | 10 | 11 | class TestNotificatorConditions: 12 | """Notificator conditions test suite""" 13 | 14 | @staticmethod 15 | async def _build_conditions(pool): 16 | """Initializes notificator conditions""" 17 | conditions = NotificatorConditions(pool) 18 | await conditions.init() 19 | return conditions 20 | 21 | @staticmethod 22 | @pytest.mark.asyncio 23 | async def test_conditions_valid_cve(asyncpg_pool): 24 | """Tests map intialization""" 25 | conditions = await TestNotificatorConditions._build_conditions(asyncpg_pool) 26 | # test that cves are fetched in map correctly 27 | assert await conditions.cve_exists(1) is True 28 | assert await conditions.cve_exists(0) is True 29 | 30 | @staticmethod 31 | @pytest.mark.asyncio 32 | async def test_conditions_invalid_cve(asyncpg_pool): 33 | """Tests map initialization""" 34 | conditions = await TestNotificatorConditions._build_conditions(asyncpg_pool) 35 | 36 | # test that cves are not existing 37 | assert await conditions.cve_exists(-1) is False 38 | assert await conditions.cve_exists(1337) is False 39 | 40 | @staticmethod 41 | @pytest.mark.asyncio 42 | async def test_conditions_notif_events(asyncpg_pool): 43 | """Test generated notification events on specified cves""" 44 | # make CVE-2017-1 fresh 45 | async with asyncpg_pool.acquire() as conn: 46 | await conn.execute("""UPDATE cve_metadata SET public_date = CURRENT_TIMESTAMP WHERE id = 3""") 47 | conditions = await TestNotificatorConditions._build_conditions(asyncpg_pool) 48 | 49 | # build notification events for CVE-2016-1 50 | # Not fresh CVE, but has exploits 51 | assert await conditions.cve_exists(2) is True 52 | events = conditions.make_events_for_cve(2) 53 | assert len(events) == 1 54 | assert NotificationType.EXPLOITS_NOTIFICATION in events 55 | 56 | # CVE-2017-1 has critical impact, high cvss_score and has active security rule 57 | assert await conditions.cve_exists(3) is True 58 | events = conditions.make_events_for_cve(3) 59 | assert len(events) == 3 60 | assert NotificationType.CVSS_NOTIFICATION in events 61 | assert NotificationType.SEVERITY_NOTIFICATION in events 62 | assert NotificationType.RULE_NOTIFICATION in events 63 | -------------------------------------------------------------------------------- /tests/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/scripts/__init__.py -------------------------------------------------------------------------------- /tests/taskomatic_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/taskomatic_tests/__init__.py -------------------------------------------------------------------------------- /tests/taskomatic_tests/conftest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # pylint:disable=missing-docstring,redefined-outer-name 3 | import psycopg2 4 | import pytest 5 | from aiohttp import test_utils 6 | from aiohttp import web 7 | 8 | from common import database_handler 9 | from taskomatic import taskomatic 10 | 11 | from ..utils import restore_db 12 | 13 | 14 | @pytest.fixture(scope="module") 15 | def pg_db_conn(pg_db_mod): 16 | with database_handler.DatabasePool(1): 17 | conn = psycopg2.connect(**pg_db_mod.dsn()) 18 | yield conn 19 | conn.close() 20 | 21 | 22 | @pytest.fixture 23 | def cleanup(request): 24 | def db_cleanup(): 25 | restore_db(database_handler.pg_testing) 26 | 27 | request.addfinalizer(db_cleanup) 28 | 29 | 30 | @pytest.fixture 31 | async def taskomatic_server(): 32 | app = taskomatic.TaskomaticApp() 33 | runner = web.AppRunner(app.app) 34 | await runner.setup() 35 | port = test_utils.unused_port() 36 | site = web.TCPSite(runner, "0.0.0.0", port) 37 | await site.start() 38 | 39 | yield app.app 40 | 41 | await runner.cleanup() 42 | 43 | 44 | @pytest.fixture() 45 | async def http_client(taskomatic_server, aiohttp_client): 46 | client = await aiohttp_client(taskomatic_server) 47 | yield client 48 | client.close() 49 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-content-vulnerability/content/CVE_123_456/CVE_123_456_DISABLED/metadata.yaml: -------------------------------------------------------------------------------- 1 | description: 'Privilege Escalation' 2 | severity: Privilege Escalation 3 | publish_date: '2021-01-01 00:00:00' 4 | resolution_risk: Update Package 5 | status: inactive 6 | cves: 7 | - CVE-123-456 8 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-content-vulnerability/content/CVE_123_456/CVE_123_456_DISABLED/summary.md: -------------------------------------------------------------------------------- 1 | Vulnerable package versions are installed. 2 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-content-vulnerability/content/CVE_123_456/CVE_123_456_ENABLED/metadata.yaml: -------------------------------------------------------------------------------- 1 | description: 'Privilege Escalation' 2 | severity: Privilege Escalation 3 | publish_date: '2021-01-01 00:00:00' 4 | resolution_risk: Update Package 5 | status: active 6 | cves: 7 | - CVE-123-456 8 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-content-vulnerability/content/CVE_123_456/CVE_123_456_ENABLED/summary.md: -------------------------------------------------------------------------------- 1 | Vulnerable package versions are installed. 2 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-content-vulnerability/content/CVE_123_456/more_info.md: -------------------------------------------------------------------------------- 1 | * The Security Team also maintains a frequently updated blog at [securityblog.redhat.com](https://securityblog.redhat.com). 2 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-content-vulnerability/content/CVE_123_456/plugin.yaml: -------------------------------------------------------------------------------- 1 | name: 'test1' 2 | node_id: '123456' 3 | product_code: rhel 4 | python_module: some.python.module 5 | reboot_required: false 6 | role: host 7 | tags: 8 | - cve 9 | - security 10 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-content-vulnerability/content/CVE_123_456/reason.md: -------------------------------------------------------------------------------- 1 | This system is vulnerable because a vulnerable package {{=pydata.vulnerable_packages[0]}} is installed. 2 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-content-vulnerability/content/CVE_123_456/resolution.md: -------------------------------------------------------------------------------- 1 | Red Hat recommends that you update the `tzdata` package: 2 | 3 | ~~~ 4 | # yum update tzdata 5 | ~~~ 6 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-content-vulnerability/content/config.yaml: -------------------------------------------------------------------------------- 1 | severity: 2 | Packet Loss: 2 3 | Privilege Escalation: 3 4 | VM Performance Loss: 2 5 | VM Start Failure: 3 6 | Kdump Failure: 1 7 | Application Hang: 2 8 | Service Inoperative: 2 9 | null: 1 # Default for when no impact is set 10 | 11 | resolution_risk: 12 | Activate SELinux: 4 13 | Update Package: 1 14 | null: 1 # Default for when no risk is set 15 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-playbooks/playbooks/security/CVE_123_456/CVE_123_456_DISABLED/disable_fixit.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/taskomatic_tests/data/insights-playbooks/playbooks/security/CVE_123_456/CVE_123_456_DISABLED/disable_fixit.yml -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-playbooks/playbooks/security/CVE_123_456/CVE_123_456_DISABLED/upgrade_fixit.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/taskomatic_tests/data/insights-playbooks/playbooks/security/CVE_123_456/CVE_123_456_DISABLED/upgrade_fixit.yml -------------------------------------------------------------------------------- /tests/taskomatic_tests/data/insights-playbooks/playbooks/security/CVE_123_456/CVE_123_456_ENABLED/upgrade_fixit.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/taskomatic_tests/data/insights-playbooks/playbooks/security/CVE_123_456/CVE_123_456_ENABLED/upgrade_fixit.yml -------------------------------------------------------------------------------- /tests/taskomatic_tests/test_db_metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for db_metrics taskomatic job 3 | """ 4 | 5 | import taskomatic.jobs.db_metrics as dm 6 | from common.database_handler import DatabasePoolConnection 7 | 8 | 9 | class TestDbMetrics: 10 | """ 11 | Class holding all tests 12 | """ 13 | 14 | @staticmethod 15 | def test_db_metrics(pg_db_conn, monkeypatch): # pylint: disable=unused-argument) 16 | """Test gathering of DB""" 17 | 18 | with DatabasePoolConnection() as conn: 19 | monkeypatch.setattr(dm, "get_conn", lambda: conn) 20 | dm.run() 21 | 22 | assert dm.METRIC_SYSTEMS.collect()[0].samples[0].value == 33 # there are 33 systems in DB 23 | assert dm.METRIC_CYNDI_SYSTEMS.collect()[0].samples[0].value == 33 # there are also 33 systems syndicated 24 | assert len(dm.METRIC_TABLE_SIZE.collect()[0].samples) == 548 25 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/test_delete_notifications.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for delete_notifications taskomatic job 3 | """ 4 | 5 | import taskomatic.jobs.delete_notifications as dn 6 | from common.database_handler import DatabasePoolConnection 7 | 8 | 9 | class TestDeleteNotifications: 10 | """Class holding tests""" 11 | 12 | @staticmethod 13 | def test_delete(pg_db_conn, monkeypatch): 14 | """Test deleting old notifications, keeping the exploits""" 15 | cur = pg_db_conn.cursor() 16 | cur.execute( 17 | """INSERT INTO notified_accounts VALUES (0, 0, 'any-cve-known-exploit'), 18 | (0, 0, 'new-cve-cvss'), 19 | (0, 0, 'new-cve-severity'), 20 | (0, 0, 'new-cve-security-rule')""" 21 | ) 22 | cur.execute( 23 | """INSERT INTO notified_accounts VALUES (0, 1, 'any-cve-known-exploit'), 24 | (0, 1, 'new-cve-cvss'), 25 | (0, 1, 'new-cve-severity')""" 26 | ) 27 | pg_db_conn.commit() 28 | 29 | with DatabasePoolConnection() as conn: 30 | monkeypatch.setattr(dn, "get_conn", lambda: conn) 31 | dn.run() 32 | 33 | cur.execute("""SELECT COUNT(*) FROM notified_accounts""") 34 | res = cur.fetchone() 35 | assert res[0] == 2 36 | cur.execute("""DELETE FROM notified_accounts""") 37 | 38 | @staticmethod 39 | def test_delete_empty(pg_db_conn, monkeypatch): 40 | """Test deleting none notifications""" 41 | cur = pg_db_conn.cursor() 42 | cur.execute( 43 | """INSERT INTO notified_accounts VALUES (0, 0, 'any-cve-known-exploit'), 44 | (0, 1, 'any-cve-known-exploit')""" 45 | ) 46 | pg_db_conn.commit() 47 | cur.execute("""SELECT COUNT(*) FROM notified_accounts""") 48 | og_len = cur.fetchone()[0] 49 | 50 | with DatabasePoolConnection() as conn: 51 | monkeypatch.setattr(dn, "get_conn", lambda: conn) 52 | dn.run() 53 | 54 | cur.execute("""SELECT COUNT(*) FROM notified_accounts""") 55 | new_len = cur.fetchone()[0] 56 | assert new_len == og_len 57 | cur.execute("""DELETE FROM notified_accounts""") 58 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/test_delete_systems.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for delete_systems taskomatic job 3 | """ 4 | 5 | import taskomatic.jobs.delete_systems as ds 6 | from common.database_handler import DatabasePoolConnection 7 | 8 | 9 | class TestDeleteSystems: 10 | """ 11 | Class holding all tests 12 | """ 13 | 14 | @staticmethod 15 | def test_delete_stsrems(pg_db_conn, monkeypatch): 16 | """Test the deletion of obsolete systems""" 17 | cur = pg_db_conn.cursor() 18 | cur.execute("""SELECT count(id) from system_platform sp""") 19 | all_systems = cur.fetchone()[0] 20 | assert all_systems == 33 21 | cur.execute( 22 | """SELECT count(id) from system_platform sp 23 | WHERE when_deleted IS NOT NULL""" 24 | ) 25 | to_delete = cur.fetchone()[0] 26 | assert to_delete == 3 27 | 28 | with DatabasePoolConnection() as conn: 29 | monkeypatch.setattr(ds, "get_conn", lambda: conn) 30 | ds.run() 31 | 32 | cur.execute("""SELECT count(id) from system_platform sp""") 33 | after_deletion = cur.fetchone()[0] 34 | 35 | assert all_systems - to_delete == after_deletion 36 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/test_rules_git_sync.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for rules_git_sync taskomatic job 3 | """ 4 | 5 | from os import path 6 | from pathlib import Path 7 | 8 | import taskomatic.jobs.rules_git_sync as rgs 9 | from common.database_handler import DatabasePoolConnection 10 | 11 | 12 | class TestRulesGitSync: 13 | """ 14 | Class holding all tests 15 | """ 16 | 17 | @staticmethod 18 | def test_sync(pg_db_conn, monkeypatch): # pylint: disable=unused-argument 19 | """Test rules sync""" 20 | tmpdir = path.join(Path(__file__).resolve().parent, "data") 21 | monkeypatch.setattr(rgs, "CONTENT_GIT_NAME", "insights-content-vulnerability") 22 | monkeypatch.setattr(rgs, "PLAYBOOKS_GIT_NAME", "insights-playbooks") 23 | 24 | with DatabasePoolConnection() as conn: 25 | monkeypatch.setattr(rgs, "get_conn", lambda: conn) 26 | rgs.sync(tmpdir, "xxee", "eexx") 27 | 28 | cur = pg_db_conn.cursor() 29 | 30 | cur.execute("""SELECT id, name, active, rule_only FROM insights_rule where name like 'CVE_123_456%' ORDER BY NAME""") 31 | rows = cur.fetchall() 32 | 33 | assert len(rows) == 3 34 | 35 | assert rows[0][1] == "CVE_123_456" 36 | assert rows[0][2] == True # pylint: disable=singleton-comparison 37 | assert rows[0][3] == True # pylint: disable=singleton-comparison 38 | 39 | assert rows[1][1] == "CVE_123_456|CVE_123_456_DISABLED" 40 | assert rows[1][2] == False # pylint: disable=singleton-comparison 41 | assert rows[1][3] == False # pylint: disable=singleton-comparison 42 | 43 | assert rows[2][1] == "CVE_123_456|CVE_123_456_ENABLED" 44 | assert rows[2][2] == True # pylint: disable=singleton-comparison 45 | assert rows[2][3] == False # pylint: disable=singleton-comparison 46 | 47 | cur.execute("""SELECT cve, celebrity_name FROM cve_metadata WHERE cve = 'CVE-123-456'""") 48 | row = cur.fetchone() 49 | 50 | assert row is not None 51 | 52 | assert row[1] == "test1" 53 | 54 | cur.execute("""SELECT cve, celebrity_name FROM cve_metadata WHERE cve = 'CVE-2018-6'""") 55 | row = cur.fetchone() 56 | 57 | assert row is not None 58 | 59 | assert row[1] is None 60 | 61 | cur.close() 62 | -------------------------------------------------------------------------------- /tests/taskomatic_tests/test_stale_systems.py: -------------------------------------------------------------------------------- 1 | """ 2 | Unit tests for stale_systems taskomatic job 3 | """ 4 | 5 | import taskomatic.jobs.stale_systems as ss 6 | from common.database_handler import DatabasePoolConnection 7 | 8 | 9 | class TestStaleSystems: 10 | """ 11 | Class holding all tests 12 | """ 13 | 14 | @staticmethod 15 | def test_stale_systems(pg_db_conn, monkeypatch): 16 | """Test the stale systems marking""" 17 | cur = pg_db_conn.cursor() 18 | cur.execute( 19 | """SELECT count(id) from system_platform sp 20 | WHERE when_deleted IS NULL 21 | AND culled_timestamp IS NOT NULL 22 | AND stale = 'F' 23 | AND now() > culled_timestamp""" 24 | ) 25 | to_mark = cur.fetchone()[0] 26 | assert to_mark == 2 27 | cur.execute( 28 | """SELECT count(id) from system_platform sp 29 | WHERE when_deleted IS NULL 30 | AND culled_timestamp IS NOT NULL 31 | AND stale = 'T'""" 32 | ) 33 | already_stale = cur.fetchone()[0] 34 | assert already_stale == 1 35 | 36 | with DatabasePoolConnection() as conn: 37 | monkeypatch.setattr(ss, "get_conn", lambda: conn) 38 | ss.run() 39 | 40 | cur.execute( 41 | """SELECT count(id) from system_platform sp 42 | WHERE when_deleted IS NULL 43 | AND culled_timestamp IS NOT NULL 44 | AND stale = 'F' 45 | AND now() > culled_timestamp""" 46 | ) 47 | staling = cur.fetchone()[0] 48 | assert staling == 0 49 | cur.execute( 50 | """SELECT count(id) from system_platform sp 51 | WHERE when_deleted IS NOT NULL 52 | AND culled_timestamp IS NOT NULL 53 | AND stale = 'T'""" 54 | ) 55 | stale = cur.fetchone()[0] 56 | assert stale == 3 57 | 58 | assert to_mark + already_stale == stale 59 | -------------------------------------------------------------------------------- /tests/vmaas_sync_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/vmaas_sync_tests/__init__.py -------------------------------------------------------------------------------- /tests/vmaas_sync_tests/conftest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # pylint:disable=missing-docstring 3 | import psycopg2 4 | import pytest 5 | 6 | from common import database_handler 7 | 8 | from ..utils import restore_db 9 | 10 | 11 | @pytest.fixture(scope="module") 12 | def pg_db_conn(pg_db_mod): 13 | """Returns connection to PostgreSQL database.""" 14 | conn = psycopg2.connect(**pg_db_mod.dsn()) 15 | yield conn 16 | conn.close() 17 | 18 | 19 | @pytest.fixture 20 | def cleanup(request): 21 | def db_cleanup(): 22 | restore_db(database_handler.pg_testing) 23 | 24 | request.addfinalizer(db_cleanup) 25 | -------------------------------------------------------------------------------- /tests/zz_database_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/tests/zz_database_tests/__init__.py -------------------------------------------------------------------------------- /vmaas_sync/README.md: -------------------------------------------------------------------------------- 1 | # Vulnerability Engine Vmaas-Sync Service 2 | 3 | ## Overview 4 | Vmaas-sync periodically requests VMaaS service for CVE metadata. 5 | 6 | ## Design 7 | Sync is run every 4 hours and requests [VMaaS](https://github.com/RedHatInsights/vmaas) service for every known CVEs. For each returned CVE, sync updates or insert its metadata (cvss score, etc.). If CVE is present in Vulnerability but not in VMaaS, it gets deleted only if there is no system vulnerable to this CVE. When a new CVE appears from VMaaS, systems which have enabled repository which was updated since last vmaas sync gets re-evaluated by sending them to the `evaluator-recalc` component topic `vulnerability.evaluator.recalc` with message type `re-evaluate_system`. 8 | -------------------------------------------------------------------------------- /vmaas_sync/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHatInsights/vulnerability-engine/3c7452a2b5a1e9f60fc73f9b396afd005b4b6dc6/vmaas_sync/__init__.py --------------------------------------------------------------------------------