├── .arcignore ├── .dockerignore ├── .github └── workflows │ ├── issue_notifications.yaml │ ├── main_test.yaml │ ├── pr_lint.yaml │ ├── pr_notifications.yaml │ ├── pr_test_compat.yaml │ ├── pr_test_e2e_basic.yaml │ ├── pr_test_e2e_extra_one.yaml │ ├── pr_test_e2e_extra_two.yaml │ ├── pr_test_unit.yaml │ ├── release.yaml │ ├── release_nightly.yaml │ ├── subflow_release.yaml │ ├── subflow_run_compat_tests.yaml │ ├── subflow_run_e2e_tests.yaml │ └── subflow_run_unit_tests.yaml ├── .gitignore ├── .golangci.yaml ├── .mapping.json ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── PROJECT ├── README.md ├── ROADMAP.md ├── api └── v1 │ ├── chyt_types.go │ ├── chyt_webhook.go │ ├── groupversion_info.go │ ├── helpers.go │ ├── remotedatanodes_types.go │ ├── remoteexecnodes_types.go │ ├── remotetabletnodes_types.go │ ├── remoteytsaurus_types.go │ ├── spyt_types.go │ ├── spyt_webhook.go │ ├── ytsaurus_types.go │ ├── ytsaurus_types_test.go │ ├── ytsaurus_webhook.go │ └── zz_generated.deepcopy.go ├── compat_test.sh ├── config ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── crd-ref-docs │ └── config.yaml ├── crd │ ├── bases │ │ ├── cluster.ytsaurus.tech_chyts.yaml │ │ ├── cluster.ytsaurus.tech_remotedatanodes.yaml │ │ ├── cluster.ytsaurus.tech_remoteexecnodes.yaml │ │ ├── cluster.ytsaurus.tech_remotetabletnodes.yaml │ │ ├── cluster.ytsaurus.tech_remoteytsaurus.yaml │ │ ├── cluster.ytsaurus.tech_spyts.yaml │ │ └── cluster.ytsaurus.tech_ytsaurus.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_chyts.yaml │ │ ├── cainjection_in_remotedatanodes.yaml │ │ ├── cainjection_in_remoteexecnodes.yaml │ │ ├── cainjection_in_remotetabletnodes.yaml │ │ ├── cainjection_in_remoteytsaurus.yaml │ │ ├── cainjection_in_spyts.yaml │ │ ├── cainjection_in_ytsaurus.yaml │ │ ├── webhook_in_chyts.yaml │ │ ├── webhook_in_remotedatanodes.yaml │ │ ├── webhook_in_remoteexecnodes.yaml │ │ ├── webhook_in_remotetabletnodes.yaml │ │ ├── webhook_in_remoteytsaurus.yaml │ │ ├── webhook_in_spyts.yaml │ │ └── webhook_in_ytsaurus.yaml ├── default │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ ├── manager_auth_proxy_patch.yaml │ ├── manager_config_patch.yaml │ ├── manager_webhook_patch.yaml │ └── webhookcainjection_patch.yaml ├── helm │ ├── Chart.yaml │ ├── kustomization.yaml │ └── patches │ │ ├── cainjection.yaml │ │ └── webhook.yaml ├── kind │ ├── audit-policy.yaml │ ├── kind-with-audit.yaml │ └── kind-with-registry.yaml ├── manager │ ├── controller_manager_config.yaml │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── chyt_editor_role.yaml │ ├── chyt_viewer_role.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── remotedatanodes_editor_role.yaml │ ├── remotedatanodes_viewer_role.yaml │ ├── remoteexecnodes_editor_role.yaml │ ├── remoteexecnodes_viewer_role.yaml │ ├── remoteytsaurus_editor_role.yaml │ ├── remoteytsaurus_viewer_role.yaml │ ├── role.yaml │ ├── role_binding.yaml │ ├── service_account.yaml │ ├── spyt_editor_role.yaml │ ├── spyt_viewer_role.yaml │ ├── ytsaurus_editor_role.yaml │ └── ytsaurus_viewer_role.yaml ├── registry │ └── .gitignore ├── samples │ ├── 0.3.1 │ │ ├── cluster_v1_demo.yaml │ │ └── cluster_v1_minikube.yaml │ ├── 0.4.0 │ │ ├── cluster_v1_demo.yaml │ │ └── cluster_v1_minikube.yaml │ ├── 0.4.1 │ │ ├── cluster_v1_demo.yaml │ │ └── cluster_v1_minikube.yaml │ ├── 0.9.1 │ │ ├── cluster_v1_demo.yaml │ │ └── cluster_v1_local.yaml │ ├── cluster_v1_chyt.yaml │ ├── cluster_v1_cri.yaml │ ├── cluster_v1_demo.yaml │ ├── cluster_v1_local.yaml │ ├── cluster_v1_local_with_kafka.yaml │ ├── cluster_v1_remoteexecnodes.yaml │ ├── cluster_v1_remoteytsaurus.yaml │ ├── cluster_v1_spyt.yaml │ ├── cluster_v1_tls.yaml │ ├── jupyter │ │ └── jupyter-demo.yaml │ └── prometheus │ │ ├── prometheus.yaml │ │ ├── prometheus_role_binding.yaml │ │ ├── prometheus_service_account.yaml │ │ └── prometheus_service_monitor.yaml └── webhook │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ ├── manifests.yaml │ └── service.yaml ├── controllers ├── chyt_controller.go ├── chyt_sync.go ├── component_manager.go ├── helpers.go ├── remote_controllers_common_test.go ├── remotedatanodes_controller.go ├── remotedatanodes_sync.go ├── remotedatanodes_test.go ├── remoteexecnodes_controller.go ├── remoteexecnodes_sync.go ├── remoteexecnodes_test.go ├── remotetabletnodes_controller.go ├── remotetabletnodes_sync.go ├── remotetabletnodes_test.go ├── spyt_controller.go ├── spyt_sync.go ├── sync.go ├── update_flow_steps.go ├── update_flow_steps_test.go ├── ytsaurus_controller.go └── ytsaurus_local_test.go ├── docs └── api.md ├── go.mod ├── go.sum ├── hack ├── .dockerignore ├── .gitignore └── boilerplate.go.txt ├── main.go ├── pkg ├── apiproxy │ ├── chyt.go │ ├── proxy.go │ ├── spyt.go │ └── ytsaurus.go ├── canonize │ ├── canonize.go │ └── colordiff.go ├── components │ ├── canondata │ │ └── TestConfigMerge │ │ │ ├── http_proxy_config_override.yson │ │ │ ├── http_proxy_config_wo_override.yson │ │ │ └── test.canondata │ ├── chyt.go │ ├── component.go │ ├── config_helper.go │ ├── config_helper_test.go │ ├── controller_agent.go │ ├── data_node.go │ ├── data_node_remote.go │ ├── discovery.go │ ├── exec_node.go │ ├── exec_node_base.go │ ├── exec_node_remote.go │ ├── helpers.go │ ├── httpproxy.go │ ├── init_job.go │ ├── init_job_test.go │ ├── kafka_proxy.go │ ├── master.go │ ├── master_caches.go │ ├── microservice.go │ ├── pods_manager.go │ ├── query_tracker.go │ ├── queue_agent.go │ ├── rpcproxy.go │ ├── scheduler.go │ ├── server.go │ ├── serveroptions.go │ ├── spyt.go │ ├── strawberry_controller.go │ ├── suite_test.go │ ├── tablet_node.go │ ├── tablet_node_remote.go │ ├── tablet_node_test.go │ ├── tcpproxy.go │ ├── ui.go │ ├── user.go │ ├── volume.go │ ├── yql_agent.go │ └── ytsaurus_client.go ├── consts │ ├── address.go │ ├── cmd.go │ ├── conditions.go │ ├── defaults.go │ ├── labels.go │ ├── metrika.go │ ├── names.go │ └── types.go ├── labeller │ └── labeller.go ├── mock │ ├── declaration.go │ └── mock_ytsaurus_client.go ├── resources │ ├── ca_bundle.go │ ├── configmap.go │ ├── deployment.go │ ├── headless_service.go │ ├── http_service.go │ ├── job.go │ ├── monitoring_service.go │ ├── resource.go │ ├── rpc_service.go │ ├── statefulset.go │ ├── string_secret.go │ ├── tcp_service.go │ └── tls_secret.go ├── testutil │ ├── builders.go │ ├── combined_watcher.go │ ├── spec_builders.go │ └── testhelper.go └── ytconfig │ ├── canondata │ ├── TestGetContainerdConfig │ │ ├── exec-node.yaml │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetControllerAgentsConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetDataNodeConfig │ │ ├── with-trash-ttl │ │ │ ├── data-node-with-trash-ttl.yaml │ │ │ ├── test.canondata │ │ │ └── ytsaurus.yaml │ │ ├── with-watermark │ │ │ ├── data-node-with-watermark.yaml │ │ │ ├── test.canondata │ │ │ └── ytsaurus.yaml │ │ └── without-trash-ttl │ │ │ ├── data-node-without-trash-ttl.yaml │ │ │ ├── test.canondata │ │ │ └── ytsaurus.yaml │ ├── TestGetDataNodeWithoutYtsaurusConfig │ │ ├── data-node.yaml │ │ └── test.canondata │ ├── TestGetDiscoveryConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetExecNodeConfig │ │ ├── with-job-resources │ │ │ ├── exec-node-with-job-resources.yaml │ │ │ ├── test.canondata │ │ │ └── ytsaurus.yaml │ │ └── without-job-resources │ │ │ ├── exec-node-without-job-resources.yaml │ │ │ ├── test.canondata │ │ │ └── ytsaurus.yaml │ ├── TestGetExecNodeConfigWithCri │ │ ├── isolated-containers-with-job-resources │ │ │ ├── exec-node-isolated-containers-with-job-resources.yaml │ │ │ └── test.canondata │ │ ├── isolated-containers-without-job-resources │ │ │ ├── exec-node-isolated-containers-without-job-resources.yaml │ │ │ └── test.canondata │ │ ├── single-container-with-job-resources │ │ │ ├── exec-node-single-container-with-job-resources.yaml │ │ │ └── test.canondata │ │ ├── single-container-without-job-resources │ │ │ ├── exec-node-single-container-without-job-resources.yaml │ │ │ └── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetExecNodeWithoutYtsaurusConfig │ │ ├── common.yaml │ │ ├── exec-node.yaml │ │ ├── remote-ytsaurus.yaml │ │ └── test.canondata │ ├── TestGetHTTPProxyConfig │ │ ├── http-proxy.yaml │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetHTTPProxyConfigDisableCreateOauthUser │ │ ├── http-proxy.yaml │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetHTTPProxyConfigEnableCreateOauthUser │ │ ├── http-proxy.yaml │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetMasterCachesConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetMasterCachesWithFixedHostsConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetMasterConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetMasterWithFixedHostsConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetMasterWithMonitoringPortConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetNativeClientConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetQueryTrackerConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetQueueAgentConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetRPCProxyConfig │ │ ├── rpc-proxy.yaml │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetRPCProxyWithoutOauthConfig │ │ ├── rpc-proxy.yaml │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetSchedulerConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetSchedulerWithFixedMasterHostsConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetStrawberryControllerConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetStrawberryControllerConfigWithCustomFamilies │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetStrawberryControllerConfigWithExtendedHTTPMapping │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetStrawberryInitClusterConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetTCPProxyConfig │ │ ├── tcp-proxy.yaml │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetTabletNodeConfig │ │ ├── tablet-node.yaml │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetTabletNodeWithoutYtsaurusConfig │ │ ├── common.yaml │ │ ├── remote-ytsaurus.yaml │ │ ├── tablet-node.yaml │ │ └── test.canondata │ ├── TestGetUIClustersConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetUIClustersConfigWithSettings │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetUICustomConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetUICustomConfigWithSettings │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetYQLAgentConfig │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── TestGetYtsaurusWithTlsInterconnect │ │ ├── discovery │ │ │ └── test.canondata │ │ ├── exec-node │ │ │ └── test.canondata │ │ ├── master-cache │ │ │ └── test.canondata │ │ ├── master │ │ │ └── test.canondata │ │ ├── queue-agent │ │ │ └── test.canondata │ │ └── ytsaurus.yaml │ └── TestResolverOptionsKeepSocketAndForceTCP │ │ ├── test.canondata │ │ └── ytsaurus.yaml │ ├── cell_id.go │ ├── common.go │ ├── cri.go │ ├── discovery.go │ ├── generator.go │ ├── generator_test.go │ ├── logging.go │ ├── master.go │ ├── master_caches.go │ ├── node.go │ ├── proxy.go │ ├── query_tracker.go │ ├── queue_agent.go │ ├── scheduler.go │ ├── strawberry.go │ ├── ui.go │ └── yql_agent.go ├── test ├── e2e │ ├── README.md │ ├── checks_test.go │ ├── helpers_test.go │ ├── suite_test.go │ ├── watcher_test.go │ └── ytsaurus_controller_test.go └── webhooks │ ├── webhook_suite_test.go │ └── ytsaurus_webhooks_test.go └── ytop-chart ├── .helmignore ├── Chart.yaml ├── templates ├── _helpers.tpl ├── crds │ ├── chyts.cluster.ytsaurus.tech.yaml │ ├── remotedatanodes.cluster.ytsaurus.tech.yaml │ ├── remoteexecnodes.cluster.ytsaurus.tech.yaml │ ├── remotetabletnodes.cluster.ytsaurus.tech.yaml │ ├── remoteytsaurus.cluster.ytsaurus.tech.yaml │ ├── spyts.cluster.ytsaurus.tech.yaml │ └── ytsaurus.cluster.ytsaurus.tech.yaml ├── deployment.yaml ├── leader-election-rbac.yaml ├── manager-config.yaml ├── manager-rbac.yaml ├── metrics-cert.yaml ├── metrics-reader-rbac.yaml ├── metrics-service.yaml ├── proxy-rbac.yaml ├── selfsigned-issuer.yaml ├── serviceaccount.yaml ├── validating-webhook-configuration.yaml ├── webhook-cert.yaml └── webhook-service.yaml └── values.yaml /.arcignore: -------------------------------------------------------------------------------- 1 | bin 2 | cover.out 3 | ytop-chart/Chart.lock 4 | ytop-chart/charts 5 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | 4 | bin/ 5 | testbin/ 6 | a.yaml 7 | report.xml 8 | -------------------------------------------------------------------------------- /.github/workflows/issue_notifications.yaml: -------------------------------------------------------------------------------- 1 | name: Issue notifications 2 | 3 | on: 4 | issues: 5 | types: [opened] 6 | 7 | jobs: 8 | telegram: 9 | name: Telegram 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Send to notification chat 13 | uses: appleboy/telegram-action@master 14 | with: 15 | to: ${{ secrets.TELEGRAM_NOTIFICATIONS_CHAT_ID }} 16 | token: ${{ secrets.TELEGRAM_BOT_TOKEN }} 17 | disable_web_page_preview: true 18 | format: markdown 19 | message: | 20 | New issue in *${{github.repository}}*: `${{github.event.issue.title}}` 21 | ${{github.server_url}}/${{github.repository}}/issues/${{github.event.issue.number}} 22 | 23 | - name: Send to k8s operator chat 24 | uses: appleboy/telegram-action@master 25 | with: 26 | to: ${{ secrets.TELEGRAM_K8S_OPERATOR_CHAT_ID }} 27 | token: ${{ secrets.TELEGRAM_BOT_TOKEN }} 28 | disable_web_page_preview: true 29 | format: markdown 30 | message: | 31 | New issue in *${{github.repository}}*: `${{github.event.issue.title}}` 32 | ${{github.server_url}}/${{github.repository}}/issues/${{github.event.issue.number}} 33 | 34 | 35 | -------------------------------------------------------------------------------- /.github/workflows/main_test.yaml: -------------------------------------------------------------------------------- 1 | name: Test main branch 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | check-unit: 10 | name: Run unit tests 11 | uses: ./.github/workflows/subflow_run_unit_tests.yaml 12 | 13 | check-e2e: 14 | name: Run e2e tests 15 | needs: 16 | - check-unit 17 | uses: ./.github/workflows/subflow_run_e2e_tests.yaml 18 | 19 | check-compat: 20 | name: Run compat tests 21 | needs: 22 | - check-e2e 23 | uses: ./.github/workflows/subflow_run_compat_tests.yaml 24 | -------------------------------------------------------------------------------- /.github/workflows/pr_lint.yaml: -------------------------------------------------------------------------------- 1 | name: Run linters 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | check: 8 | name: Run checks 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - name: checkout sources 13 | uses: actions/checkout@v3 14 | with: 15 | fetch-depth: 0 16 | 17 | - name: Setup Go 18 | uses: actions/setup-go@v5 19 | with: 20 | go-version-file: 'go.mod' 21 | 22 | - name: Run linters 23 | shell: bash 24 | run: | 25 | make lint 26 | make lint-generated 27 | -------------------------------------------------------------------------------- /.github/workflows/pr_notifications.yaml: -------------------------------------------------------------------------------- 1 | name: PR notifications 2 | 3 | on: 4 | pull_request_target: 5 | branches: 6 | - main 7 | types: 8 | - opened 9 | 10 | jobs: 11 | pr-notifications: 12 | name: Send PR notifications 13 | if: github.event.pull_request.draft == false 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Send to notification chat 17 | uses: appleboy/telegram-action@master 18 | with: 19 | to: ${{ secrets.TELEGRAM_NOTIFICATIONS_CHAT_ID }} 20 | token: ${{ secrets.TELEGRAM_BOT_TOKEN }} 21 | disable_web_page_preview: true 22 | format: markdown 23 | message: | 24 | New PR in *${{ github.repository }}*: `${{ github.event.pull_request.title }}` 25 | ${{ github.server_url }}/${{ github.repository }}/pull/${{ github.event.number }} 26 | 27 | - name: Send to k8s operator chat 28 | uses: appleboy/telegram-action@master 29 | with: 30 | to: ${{ secrets.TELEGRAM_K8S_OPERATOR_CHAT_ID }} 31 | token: ${{ secrets.TELEGRAM_BOT_TOKEN }} 32 | disable_web_page_preview: true 33 | format: markdown 34 | message: | 35 | New PR in *${{ github.repository }}*: `${{ github.event.pull_request.title }}` 36 | ${{ github.server_url }}/${{ github.repository }}/pull/${{ github.event.number }} 37 | 38 | -------------------------------------------------------------------------------- /.github/workflows/pr_test_compat.yaml: -------------------------------------------------------------------------------- 1 | name: Run compat tests 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | check: 8 | name: Run compat tests 9 | uses: ./.github/workflows/subflow_run_compat_tests.yaml 10 | -------------------------------------------------------------------------------- /.github/workflows/pr_test_e2e_basic.yaml: -------------------------------------------------------------------------------- 1 | name: Run basic e2e tests 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | check: 8 | name: Run e2e tests for basic scenarios 9 | uses: ./.github/workflows/subflow_run_e2e_tests.yaml 10 | with: 11 | e2e_filter: "basic" 12 | -------------------------------------------------------------------------------- /.github/workflows/pr_test_e2e_extra_one.yaml: -------------------------------------------------------------------------------- 1 | name: Run extra e2e tests (part 1) 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | check: 8 | name: Run extra e2e tests (part 1) 9 | uses: ./.github/workflows/subflow_run_e2e_tests.yaml 10 | with: 11 | e2e_filter: "selector || remote" 12 | -------------------------------------------------------------------------------- /.github/workflows/pr_test_e2e_extra_two.yaml: -------------------------------------------------------------------------------- 1 | name: Run extra e2e tests (part 2) 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | check: 8 | name: Run extra e2e tests (part 2) 9 | uses: ./.github/workflows/subflow_run_e2e_tests.yaml 10 | with: 11 | e2e_filter: "!basic && !selector && !remote" 12 | -------------------------------------------------------------------------------- /.github/workflows/pr_test_unit.yaml: -------------------------------------------------------------------------------- 1 | name: Run unit tests 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | check: 8 | name: Run unit tests 9 | uses: ./.github/workflows/subflow_run_unit_tests.yaml 10 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | tags: 7 | - 'release/0\.[0-9]+\.[0-9]+' 8 | 9 | jobs: 10 | release: 11 | name: Run release 12 | uses: ./.github/workflows/subflow_release.yaml 13 | secrets: inherit 14 | with: 15 | release_version: ${GITHUB_REF#refs/*/release/} 16 | -------------------------------------------------------------------------------- /.github/workflows/release_nightly.yaml: -------------------------------------------------------------------------------- 1 | name: Build nightly release 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | release: 11 | name: Run release 12 | uses: ./.github/workflows/subflow_release.yaml 13 | secrets: inherit 14 | with: 15 | release_version: 0.0.$(git rev-list --count HEAD)-dev-${GITHUB_SHA} 16 | release_suffix: "-nightly" 17 | -------------------------------------------------------------------------------- /.github/workflows/subflow_release.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_call: 3 | inputs: 4 | release_version: 5 | required: true 6 | type: string 7 | release_suffix: 8 | required: false 9 | type: string 10 | 11 | jobs: 12 | release: 13 | name: Run release 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - name: checkout sources 18 | uses: actions/checkout@v3 19 | with: 20 | fetch-depth: 0 21 | 22 | - name: Setup Go 23 | uses: actions/setup-go@v5 24 | with: 25 | go-version-file: 'go.mod' 26 | 27 | - name: Set up Helm 28 | uses: azure/setup-helm@v1 29 | with: 30 | version: v3.11.2 31 | 32 | - name: Build 33 | shell: bash 34 | run: | 35 | make build 36 | 37 | - name: Docker Hub login 38 | shell: bash 39 | run: | 40 | echo '${{ secrets.DOCKER_HUB_PASSWORD }}' | docker login --username ${{ secrets.DOCKER_HUB_LOGIN}} --password-stdin 41 | 42 | # Uses the `docker/login-action` action to log in to the Container registry 43 | # using the account and password that will publish the packages. 44 | # Once published, the packages are scoped to the account defined here. 45 | - name: Guthub Packages Login 46 | uses: docker/login-action@v3.1.0 47 | with: 48 | registry: ghcr.io 49 | username: ${{ github.actor }} 50 | password: ${{ secrets.GITHUB_TOKEN }} 51 | 52 | - name: Release 53 | shell: bash 54 | env: 55 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 56 | run: | 57 | make release RELEASE_VERSION=${{ inputs.release_version }} RELEASE_SUFFIX=${{ inputs.release_suffix }} 58 | 59 | send-notifications: 60 | name: Send notifications 61 | runs-on: ubuntu-latest 62 | if: ${{ failure() }} 63 | steps: 64 | - name: Send telegram message 65 | uses: appleboy/telegram-action@master 66 | with: 67 | to: ${{ secrets.TELEGRAM_CHAT_ID }} 68 | token: ${{ secrets.TELEGRAM_TOKEN }} 69 | disable_web_page_preview: true 70 | format: markdown 71 | message: | 72 | Workflow *${{github.workflow}}* failed: ${{github.server_url}}/${{github.repository}}/actions/runs/${{github.run_id}}. 73 | Commit: ${{github.event.head_commit.sha}} 74 | Message: 75 | ``` 76 | ${{github.event.head_commit.message}} 77 | ``` 78 | -------------------------------------------------------------------------------- /.github/workflows/subflow_run_compat_tests.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_call: 3 | 4 | jobs: 5 | check: 6 | name: Run checks 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - name: Free Disk Space (Ubuntu) 11 | uses: jlumbroso/free-disk-space@main 12 | with: 13 | tool-cache: false 14 | 15 | - name: checkout sources 16 | uses: actions/checkout@v3 17 | with: 18 | fetch-depth: 0 19 | 20 | - name: Setup Go 21 | uses: actions/setup-go@v5 22 | with: 23 | go-version-file: 'go.mod' 24 | 25 | - name: Set up Helm 26 | uses: azure/setup-helm@v1 27 | with: 28 | version: v3.11.2 29 | 30 | - name: Build 31 | shell: bash 32 | run: | 33 | make build 34 | 35 | - name: Run compat tests 36 | shell: bash 37 | run: | 38 | make kind-create-cluster 39 | make kind-load-sample-images 40 | ./compat_test.sh --from-version 0.4.1 --to-version trunk 41 | -------------------------------------------------------------------------------- /.github/workflows/subflow_run_e2e_tests.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_call: 3 | inputs: 4 | e2e_filter: 5 | required: false 6 | type: string 7 | 8 | jobs: 9 | check: 10 | name: Run checks 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Free Disk Space (Ubuntu) 15 | uses: jlumbroso/free-disk-space@main 16 | with: 17 | tool-cache: false 18 | 19 | - name: checkout sources 20 | uses: actions/checkout@v3 21 | with: 22 | fetch-depth: 0 23 | 24 | - name: Setup Go 25 | uses: actions/setup-go@v5 26 | with: 27 | go-version-file: 'go.mod' 28 | 29 | - name: Set up Helm 30 | uses: azure/setup-helm@v1 31 | with: 32 | version: v3.11.2 33 | 34 | - name: Build 35 | shell: bash 36 | run: | 37 | make build 38 | 39 | - name: Run e2e tests 40 | shell: bash 41 | env: 42 | GINKGO_LABEL_FILTER: ${{ inputs.e2e_filter }} 43 | run: | 44 | make kind-create-cluster 45 | make helm-kind-install 46 | kubectl get pod 47 | make kind-load-test-images 48 | make test-e2e 49 | make helm-uninstall 50 | 51 | - name: Report results 52 | if: always() 53 | uses: dorny/test-reporter@v2 54 | with: 55 | name: "E2E tests" 56 | path: report.xml 57 | reporter: java-junit 58 | -------------------------------------------------------------------------------- /.github/workflows/subflow_run_unit_tests.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | workflow_call: 3 | 4 | jobs: 5 | check: 6 | name: Run checks 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - name: checkout sources 11 | uses: actions/checkout@v3 12 | with: 13 | fetch-depth: 0 14 | 15 | - name: Setup Go 16 | uses: actions/setup-go@v5 17 | with: 18 | go-version-file: 'go.mod' 19 | 20 | - name: Run unit tests 21 | shell: bash 22 | run: | 23 | make test 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | testbin/* 10 | cover.out 11 | report.xml 12 | 13 | # Test binary, build with `go test -c` 14 | *.test 15 | 16 | # Output of the go coverage tool, specifically when used with LiteIDE 17 | *.out 18 | 19 | # Kubernetes Generated files - skip generated files, except for vendored files 20 | 21 | !vendor/**/zz_generated.* 22 | 23 | # editor and IDE paraphernalia 24 | .idea 25 | *.swp 26 | *.swo 27 | *~ 28 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Notice to external contributors 2 | 3 | ## Common 4 | 5 | YTsaurus is a free and open project and we appreciate to receive contributions from our community. 6 | 7 | ## Contributing code changes 8 | 9 | If you would like to contribute a new feature or a bug fix, please discuss your idea first on the GitHub issue. 10 | If there is no issue for your idea, please open one. It may be that somebody is already working on it, 11 | or that there are some complex obstacles that you should know about before starting the implementation. 12 | Usually there are several ways to fix a problem and it is important to find the right approach before spending time on a PR 13 | that cannot be merged. We add the `help wanted` label to existing GitHub issues for which community 14 | contributions are particularly welcome, and we use the `good first issue` label to mark issues that we think will be suitable for new contributors. 15 | 16 | ## Provide a contribution 17 | 18 | To make a contribution you should submit a pull request. There will probably be discussion about the pull request and, 19 | if any changes are needed, we would love to work with you to get your pull request merged. 20 | 21 | ## Other questions 22 | 23 | If you have any questions, please mail us at info@ytsaurus.tech. 24 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.22 as builder 3 | 4 | WORKDIR /workspace 5 | 6 | # Copy the Go Modules manifests 7 | COPY go.mod go.mod 8 | COPY go.sum go.sum 9 | 10 | # cache deps before building and copying source so that we don't need to re-download as much 11 | # and so that source changes don't invalidate our downloaded layer 12 | 13 | RUN go mod download 14 | 15 | # Copy the go source 16 | COPY main.go main.go 17 | COPY api/ api/ 18 | COPY pkg/ pkg/ 19 | COPY controllers/ controllers/ 20 | 21 | # Build 22 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go 23 | 24 | # Use distroless as minimal base image to package the manager binary 25 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 26 | FROM gcr.io/distroless/static:nonroot 27 | WORKDIR / 28 | COPY --from=builder /workspace/manager . 29 | USER 65532:65532 30 | 31 | ARG VERSION=UNSET 32 | ARG REVISION=UNSET 33 | ARG BUILD_DATE=UNSET 34 | 35 | LABEL org.opencontainers.image.title="YTsaurus Operator for Kubernetes" 36 | LABEL org.opencontainers.image.url="https://ytsaurus.tech" 37 | LABEL org.opencontainers.image.source="https://github.com/ytsaurus/ytsaurus-k8s-operator/" 38 | LABEL org.opencontainers.image.licenses="Apache-2.0" 39 | LABEL org.opencontainers.image.version="${VERSION}" 40 | LABEL org.opencontainers.image.revision="${REVISION}" 41 | LABEL org.opencontainers.image.created="${BUILD_DATE}" 42 | 43 | ENTRYPOINT ["/manager"] 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2023 YANDEX LLC 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | # Code generated by tool. DO NOT EDIT. 2 | # This file is used to track the info used to scaffold your project 3 | # and allow the plugins properly work. 4 | # More info: https://book.kubebuilder.io/reference/project-config.html 5 | domain: ytsaurus.tech 6 | layout: 7 | - go.kubebuilder.io/v3 8 | projectName: ytsaurus-k8s-operator 9 | repo: github.com/ytsaurus/ytsaurus-k8s-operator 10 | resources: 11 | - api: 12 | crdVersion: v1 13 | namespaced: true 14 | controller: true 15 | domain: ytsaurus.tech 16 | group: cluster 17 | kind: Ytsaurus 18 | path: github.com/ytsaurus/ytsaurus-k8s-operator/api/v1 19 | version: v1 20 | webhooks: 21 | defaulting: true 22 | validation: true 23 | webhookVersion: v1 24 | - api: 25 | crdVersion: v1 26 | namespaced: true 27 | controller: true 28 | domain: ytsaurus.tech 29 | group: cluster 30 | kind: Spyt 31 | path: github.com/ytsaurus/ytsaurus-k8s-operator/api/v1 32 | version: v1 33 | webhooks: 34 | defaulting: true 35 | validation: true 36 | webhookVersion: v1 37 | - api: 38 | crdVersion: v1 39 | namespaced: true 40 | controller: true 41 | domain: ytsaurus.tech 42 | group: cluster 43 | kind: Chyt 44 | path: github.com/ytsaurus/ytsaurus-k8s-operator/api/v1 45 | version: v1 46 | webhooks: 47 | defaulting: true 48 | validation: true 49 | webhookVersion: v1 50 | - api: 51 | crdVersion: v1 52 | namespaced: true 53 | domain: ytsaurus.tech 54 | group: cluster 55 | kind: RemoteYtsaurus 56 | path: github.com/ytsaurus/ytsaurus-k8s-operator/api/v1 57 | version: v1 58 | - api: 59 | crdVersion: v1 60 | namespaced: true 61 | controller: true 62 | domain: ytsaurus.tech 63 | group: cluster 64 | kind: RemoteExecNodes 65 | path: github.com/ytsaurus/ytsaurus-k8s-operator/api/v1 66 | version: v1 67 | version: "3" 68 | -------------------------------------------------------------------------------- /ROADMAP.md: -------------------------------------------------------------------------------- 1 | ## Current ytsaurus-k8s-operator development roadmap 2 | 3 | _As new versions are released, we will update tasks in this roadmap with corresponding versions and add new tasks._ 4 | *** 5 | 6 | - [x] Ability to configure logs in YTsaurus spec ([0.3.1](https://github.com/ytsaurus/ytsaurus-k8s-operator/releases/tag/release%2F0.3.1)) 7 | - [x] Logs rotation ([0.3.1](https://github.com/ytsaurus/ytsaurus-k8s-operator/releases/tag/release%2F0.3.1)) 8 | - [x] Release SPYT as a separate CRD ([0.3.1](https://github.com/ytsaurus/ytsaurus-k8s-operator/releases/tag/release%2F0.3.1)) 9 | - [x] Ability to create instance groups of proxies with different specification ([0.4.0](https://github.com/ytsaurus/ytsaurus-k8s-operator/releases/tag/release%2F0.4.0)) 10 | - [x] Ability to create instance groups of nodes with different specification ([0.4.0](https://github.com/ytsaurus/ytsaurus-k8s-operator/releases/tag/release%2F0.4.0)) 11 | - [x] Support of update scenario through snapshots and deleting tablet cells ([0.3.1](https://github.com/ytsaurus/ytsaurus-k8s-operator/releases/tag/release%2F0.3.1)) 12 | - [x] Namespace-scoped deploy mode ([0.18.0](https://github.com/ytsaurus/ytsaurus-k8s-operator/releases/tag/release%2F0.18.0)) 13 | - [ ] Support of some cluster reconfiguration scenarios (change volume mounts, instances count, etc) 14 | - [ ] Support of CRI-O as CRI service sidecar 15 | - [ ] Timbertruck Support for YTsaurus Log Delivery: Implement support for timbertruck to facilitate the delivery of YTsaurus logs into YTsaurus queues. 16 | - [ ] Support for partial cluster update with granular component update selectors 17 | - [ ] Support for rolling updates of cluster components 18 | -------------------------------------------------------------------------------- /api/v1/chyt_webhook.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | "k8s.io/apimachinery/pkg/runtime" 21 | ctrl "sigs.k8s.io/controller-runtime" 22 | logf "sigs.k8s.io/controller-runtime/pkg/log" 23 | "sigs.k8s.io/controller-runtime/pkg/webhook" 24 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 25 | ) 26 | 27 | // log is for logging in this package. 28 | var chytlog = logf.Log.WithName("chyt-resource") 29 | 30 | func (r *Chyt) SetupWebhookWithManager(mgr ctrl.Manager) error { 31 | return ctrl.NewWebhookManagedBy(mgr). 32 | For(r). 33 | Complete() 34 | } 35 | 36 | // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. 37 | //+kubebuilder:webhook:path=/validate-cluster-ytsaurus-tech-v1-chyt,mutating=false,failurePolicy=fail,sideEffects=None,groups=cluster.ytsaurus.tech,resources=chyts,verbs=create;update,versions=v1,name=vchyt.kb.io,admissionReviewVersions=v1 38 | 39 | var _ webhook.Validator = &Chyt{} 40 | 41 | // ValidateCreate implements webhook.Validator so a webhook will be registered for the type 42 | func (r *Chyt) ValidateCreate() (admission.Warnings, error) { 43 | chytlog.Info("validate create", "name", r.Name) 44 | 45 | // TODO(user): fill in your validation logic upon object creation. 46 | return nil, nil 47 | } 48 | 49 | // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type 50 | func (r *Chyt) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { 51 | chytlog.Info("validate update", "name", r.Name) 52 | 53 | // TODO(user): fill in your validation logic upon object update. 54 | return nil, nil 55 | } 56 | 57 | // ValidateDelete implements webhook.Validator so a webhook will be registered for the type 58 | func (r *Chyt) ValidateDelete() (admission.Warnings, error) { 59 | chytlog.Info("validate delete", "name", r.Name) 60 | 61 | // TODO(user): fill in your validation logic upon object deletion. 62 | return nil, nil 63 | } 64 | -------------------------------------------------------------------------------- /api/v1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1 contains API Schema definitions for the cluster v1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=cluster.ytsaurus.tech 20 | package v1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "cluster.ytsaurus.tech", Version: "v1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | 37 | YtsaurusGVK = GroupVersion.WithKind("Ytsaurus") 38 | ) 39 | -------------------------------------------------------------------------------- /api/v1/helpers.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | func FindFirstLocation(locations []LocationSpec, locationType LocationType) *LocationSpec { 4 | for _, location := range locations { 5 | if location.LocationType == locationType { 6 | return &location 7 | } 8 | } 9 | return nil 10 | } 11 | 12 | func FindAllLocations(locations []LocationSpec, locationType LocationType) []LocationSpec { 13 | result := make([]LocationSpec, 0) 14 | for _, location := range locations { 15 | if location.LocationType == locationType { 16 | result = append(result, location) 17 | } 18 | } 19 | return result 20 | } 21 | -------------------------------------------------------------------------------- /api/v1/remotedatanodes_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // RemoteDataNodesSpec defines the desired state of RemoteDataNodes 25 | type RemoteDataNodesSpec struct { 26 | RemoteClusterSpec *corev1.LocalObjectReference `json:"remoteClusterSpec"` 27 | CommonSpec `json:",inline"` 28 | DataNodesSpec `json:",inline"` 29 | } 30 | 31 | // RemoteDataNodesStatus defines the observed state of RemoteDataNodes 32 | type RemoteDataNodesStatus struct { 33 | CommonRemoteNodeStatus `json:",inline"` 34 | } 35 | 36 | //+kubebuilder:object:root=true 37 | //+kubebuilder:printcolumn:name="ReleaseStatus",type="string",JSONPath=".status.releaseStatus",description="Release status" 38 | //+kubebuilder:resource:categories=ytsaurus-all;yt-all 39 | //+kubebuilder:subresource:status 40 | 41 | // RemoteDataNodes is the Schema for the remotedatanodes API 42 | type RemoteDataNodes struct { 43 | metav1.TypeMeta `json:",inline"` 44 | metav1.ObjectMeta `json:"metadata,omitempty"` 45 | 46 | Spec RemoteDataNodesSpec `json:"spec,omitempty"` 47 | Status RemoteDataNodesStatus `json:"status,omitempty"` 48 | } 49 | 50 | //+kubebuilder:object:root=true 51 | 52 | // RemoteDataNodesList contains a list of RemoteDataNodes 53 | type RemoteDataNodesList struct { 54 | metav1.TypeMeta `json:",inline"` 55 | metav1.ListMeta `json:"metadata,omitempty"` 56 | Items []RemoteDataNodes `json:"items"` 57 | } 58 | 59 | func init() { 60 | SchemeBuilder.Register(&RemoteDataNodes{}, &RemoteDataNodesList{}) 61 | } 62 | -------------------------------------------------------------------------------- /api/v1/remoteexecnodes_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // RemoteExecNodesSpec defines the desired state of RemoteExecNodes 25 | type RemoteExecNodesSpec struct { 26 | RemoteClusterSpec *corev1.LocalObjectReference `json:"remoteClusterSpec"` 27 | CommonSpec `json:",inline"` 28 | ExecNodesSpec `json:",inline"` 29 | } 30 | 31 | // RemoteExecNodesStatus defines the observed state of RemoteExecNodes 32 | type RemoteExecNodesStatus struct { 33 | CommonRemoteNodeStatus `json:",inline"` 34 | } 35 | 36 | //+kubebuilder:object:root=true 37 | //+kubebuilder:printcolumn:name="ReleaseStatus",type="string",JSONPath=".status.releaseStatus",description="Release status" 38 | //+kubebuilder:resource:categories=ytsaurus-all;yt-all 39 | //+kubebuilder:subresource:status 40 | 41 | // RemoteExecNodes is the Schema for the remoteexecnodes API 42 | type RemoteExecNodes struct { 43 | metav1.TypeMeta `json:",inline"` 44 | metav1.ObjectMeta `json:"metadata,omitempty"` 45 | 46 | Spec RemoteExecNodesSpec `json:"spec,omitempty"` 47 | Status RemoteExecNodesStatus `json:"status,omitempty"` 48 | } 49 | 50 | //+kubebuilder:object:root=true 51 | 52 | // RemoteExecNodesList contains a list of RemoteExecNodes 53 | type RemoteExecNodesList struct { 54 | metav1.TypeMeta `json:",inline"` 55 | metav1.ListMeta `json:"metadata,omitempty"` 56 | Items []RemoteExecNodes `json:"items"` 57 | } 58 | 59 | func init() { 60 | SchemeBuilder.Register(&RemoteExecNodes{}, &RemoteExecNodesList{}) 61 | } 62 | -------------------------------------------------------------------------------- /api/v1/remotetabletnodes_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // RemoteTabletNodesSpec defines the desired state of RemoteTabletNodes 25 | type RemoteTabletNodesSpec struct { 26 | RemoteClusterSpec *corev1.LocalObjectReference `json:"remoteClusterSpec"` 27 | CommonSpec `json:",inline"` 28 | TabletNodesSpec `json:",inline"` 29 | } 30 | 31 | // RemoteTabletNodesStatus defines the observed state of RemoteTabletNodes 32 | type RemoteTabletNodesStatus struct { 33 | CommonRemoteNodeStatus `json:",inline"` 34 | } 35 | 36 | //+kubebuilder:object:root=true 37 | //+kubebuilder:printcolumn:name="ReleaseStatus",type="string",JSONPath=".status.releaseStatus",description="Release status" 38 | //+kubebuilder:resource:categories=ytsaurus-all;yt-all 39 | //+kubebuilder:subresource:status 40 | 41 | // RemoteTabletNodes is the Schema for the remotetabletnodes API 42 | type RemoteTabletNodes struct { 43 | metav1.TypeMeta `json:",inline"` 44 | metav1.ObjectMeta `json:"metadata,omitempty"` 45 | 46 | Spec RemoteTabletNodesSpec `json:"spec,omitempty"` 47 | Status RemoteTabletNodesStatus `json:"status,omitempty"` 48 | } 49 | 50 | //+kubebuilder:object:root=true 51 | 52 | // RemoteTabletNodesList contains a list of RemoteTabletNodes 53 | type RemoteTabletNodesList struct { 54 | metav1.TypeMeta `json:",inline"` 55 | metav1.ListMeta `json:"metadata,omitempty"` 56 | Items []RemoteTabletNodes `json:"items"` 57 | } 58 | 59 | func init() { 60 | SchemeBuilder.Register(&RemoteTabletNodes{}, &RemoteTabletNodesList{}) 61 | } 62 | -------------------------------------------------------------------------------- /api/v1/remoteytsaurus_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 21 | ) 22 | 23 | // RemoteYtsaurusSpec defines the desired state of RemoteYtsaurus 24 | type RemoteYtsaurusSpec struct { 25 | MasterConnectionSpec `json:",inline"` 26 | MasterCachesSpec `json:",inline"` 27 | } 28 | 29 | // RemoteYtsaurusStatus defines the observed state of RemoteYtsaurus 30 | type RemoteYtsaurusStatus struct { 31 | } 32 | 33 | //+kubebuilder:object:root=true 34 | //+kubebuilder:resource:path=remoteytsaurus,categories=ytsaurus-all;yt-all 35 | //+kubebuilder:subresource:status 36 | 37 | // RemoteYtsaurus is the Schema for the remoteytsauruses API 38 | type RemoteYtsaurus struct { 39 | metav1.TypeMeta `json:",inline"` 40 | metav1.ObjectMeta `json:"metadata,omitempty"` 41 | 42 | Spec RemoteYtsaurusSpec `json:"spec,omitempty"` 43 | Status RemoteYtsaurusStatus `json:"status,omitempty"` 44 | } 45 | 46 | //+kubebuilder:object:root=true 47 | 48 | // RemoteYtsaurusList contains a list of RemoteYtsaurus 49 | type RemoteYtsaurusList struct { 50 | metav1.TypeMeta `json:",inline"` 51 | metav1.ListMeta `json:"metadata,omitempty"` 52 | Items []RemoteYtsaurus `json:"items"` 53 | } 54 | 55 | func init() { 56 | SchemeBuilder.Register(&RemoteYtsaurus{}, &RemoteYtsaurusList{}) 57 | } 58 | 59 | // RemoteYtsaurus doesn't have a reconciller, so we put rbac markers here. 60 | //+kubebuilder:rbac:groups=cluster.ytsaurus.tech,resources=remoteytsaurus,verbs=get;list;watch;create;update;patch;delete 61 | //+kubebuilder:rbac:groups=cluster.ytsaurus.tech,resources=remoteytsaurus/status,verbs=get;update;patch 62 | //+kubebuilder:rbac:groups=cluster.ytsaurus.tech,resources=remoteytsaurus/finalizers,verbs=update 63 | -------------------------------------------------------------------------------- /api/v1/spyt_webhook.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | "k8s.io/apimachinery/pkg/runtime" 21 | ctrl "sigs.k8s.io/controller-runtime" 22 | logf "sigs.k8s.io/controller-runtime/pkg/log" 23 | "sigs.k8s.io/controller-runtime/pkg/webhook" 24 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 25 | ) 26 | 27 | // log is for logging in this package. 28 | var spytlog = logf.Log.WithName("spyt-resource") 29 | 30 | func (r *Spyt) SetupWebhookWithManager(mgr ctrl.Manager) error { 31 | return ctrl.NewWebhookManagedBy(mgr). 32 | For(r). 33 | Complete() 34 | } 35 | 36 | // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. 37 | //+kubebuilder:webhook:path=/validate-cluster-ytsaurus-tech-v1-spyt,mutating=false,failurePolicy=fail,sideEffects=None,groups=cluster.ytsaurus.tech,resources=spyts,verbs=create;update,versions=v1,name=vspyt.kb.io,admissionReviewVersions=v1 38 | 39 | var _ webhook.Validator = &Spyt{} 40 | 41 | // ValidateCreate implements webhook.Validator so a webhook will be registered for the type 42 | func (r *Spyt) ValidateCreate() (admission.Warnings, error) { 43 | spytlog.Info("validate create", "name", r.Name) 44 | 45 | // TODO(user): fill in your validation logic upon object creation. 46 | return nil, nil 47 | } 48 | 49 | // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type 50 | func (r *Spyt) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { 51 | spytlog.Info("validate update", "name", r.Name) 52 | 53 | // TODO(user): fill in your validation logic upon object update. 54 | return nil, nil 55 | } 56 | 57 | // ValidateDelete implements webhook.Validator so a webhook will be registered for the type 58 | func (r *Spyt) ValidateDelete() (admission.Warnings, error) { 59 | spytlog.Info("validate delete", "name", r.Name) 60 | 61 | // TODO(user): fill in your validation logic upon object deletion. 62 | return nil, nil 63 | } 64 | -------------------------------------------------------------------------------- /api/v1/ytsaurus_types_test.go: -------------------------------------------------------------------------------- 1 | package v1_test 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | 7 | "github.com/google/go-cmp/cmp" 8 | "github.com/stretchr/testify/require" 9 | corev1 "k8s.io/api/core/v1" 10 | "k8s.io/apimachinery/pkg/api/resource" 11 | "k8s.io/utils/ptr" 12 | k8syaml "sigs.k8s.io/yaml" 13 | 14 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 15 | ) 16 | 17 | var testSpec = ytv1.YtsaurusSpec{ 18 | CommonSpec: ytv1.CommonSpec{ 19 | CoreImage: "img", 20 | UseIPv6: true, 21 | UseIPv4: false, 22 | }, 23 | PrimaryMasters: ytv1.MastersSpec{ 24 | InstanceSpec: ytv1.InstanceSpec{ 25 | Tolerations: []corev1.Toleration{ 26 | { 27 | Key: "base-toleration", 28 | }, 29 | }, 30 | VolumeClaimTemplates: []ytv1.EmbeddedPersistentVolumeClaim{ 31 | { 32 | EmbeddedObjectMetadata: ytv1.EmbeddedObjectMetadata{ 33 | Name: "master-data", 34 | }, 35 | Spec: corev1.PersistentVolumeClaimSpec{ 36 | StorageClassName: ptr.To[string]("className"), 37 | Resources: corev1.VolumeResourceRequirements{ 38 | Requests: corev1.ResourceList{ 39 | corev1.ResourceStorage: resource.MustParse("1Gi"), 40 | }, 41 | }, 42 | }, 43 | }, 44 | }, 45 | }, 46 | }, 47 | HTTPProxies: []ytv1.HTTPProxiesSpec{ 48 | { 49 | InstanceSpec: ytv1.InstanceSpec{ 50 | InstanceCount: 1, 51 | }, 52 | ServiceType: corev1.ServiceTypeClusterIP, 53 | }, 54 | }, 55 | } 56 | 57 | type marshalFunc func(v any) ([]byte, error) 58 | type unmarshalFunc func(data []byte, v any) error 59 | 60 | func TestMarshallUnmarshall(t *testing.T) { 61 | t.Run("JSON", func(t *testing.T) { 62 | testMarshallUnmarshall(t, json.Marshal, json.Unmarshal) 63 | }) 64 | t.Run("YAML-JSON", func(t *testing.T) { 65 | testMarshallUnmarshall( 66 | t, 67 | k8syaml.Marshal, 68 | func(data []byte, v any) error { 69 | return k8syaml.Unmarshal(data, v) 70 | }, 71 | ) 72 | }) 73 | } 74 | 75 | func testMarshallUnmarshall(t *testing.T, marshall marshalFunc, unmarshall unmarshalFunc) { 76 | serialized, err := marshall(testSpec) 77 | require.NoError(t, err) 78 | 79 | deserializedSpec := &ytv1.YtsaurusSpec{} 80 | err = unmarshall(serialized, deserializedSpec) 81 | require.NoError(t, err) 82 | 83 | require.Empty(t, cmp.Diff(testSpec, *deserializedSpec)) 84 | 85 | reSerialized, err := marshall(deserializedSpec) 86 | require.NoError(t, err) 87 | require.Empty(t, cmp.Diff(serialized, reSerialized)) 88 | } 89 | -------------------------------------------------------------------------------- /compat_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | script_name=$0 4 | 5 | from_version="0.4.1" 6 | to_version="trunk" 7 | 8 | print_usage() { 9 | cat << EOF 10 | Usage: $script_name [-h|--help] 11 | [--from-version (default: $from_version)] 12 | [--to-version (default: $to_version)] 13 | EOF 14 | exit 1 15 | } 16 | 17 | # Parse options 18 | while [[ $# -gt 0 ]]; do 19 | key="$1" 20 | case $key in 21 | --from-version) 22 | from_version="$2" 23 | shift 2 24 | ;; 25 | --to-version) 26 | to_version="$2" 27 | shift 2 28 | ;; 29 | *) # unknown option 30 | echo "Unknown argument $1" 31 | print_usage 32 | ;; 33 | esac 34 | done 35 | 36 | step=0 37 | 38 | helm install ytsaurus oci://registry-1.docker.io/ytsaurus/ytop-chart --version ${from_version} 39 | 40 | until [ $step -eq 30 ] || kubectl get pod | grep "ytsaurus-ytop-chart-controller-manager" | grep "2/2" | grep "Running"; do 41 | echo "Waiting for controller pods" 42 | sleep 10 43 | let step=step+1 44 | done 45 | 46 | kubectl apply -f config/samples/${from_version}/cluster_v1_minikube.yaml 47 | 48 | let step=0 49 | until [ $step -eq 60 ] || kubectl get ytsaurus | grep "Running"; do 50 | echo "Waiting for ytsaurus is Running" 51 | sleep 10 52 | let step=step+1 53 | done 54 | 55 | if [[ "$to_version" == "trunk" ]]; then 56 | docker build -t ytsaurus/k8s-operator:0.0.0-alpha . 57 | kind load docker-image ytsaurus/k8s-operator:0.0.0-alpha 58 | helm upgrade ytsaurus ytop-chart 59 | else 60 | helm upgrade ytsaurus --install oci://docker.io/ytsaurus/ytop-chart --version ${to_version} 61 | fi 62 | 63 | sleep 10 64 | 65 | until [ $step -eq 30 ] || kubectl get pod | grep "ytsaurus-ytop-chart-controller-manager" | grep "2/2" | grep "Running"; do 66 | echo "Waiting for controller pods" 67 | sleep 10 68 | let step=step+1 69 | done 70 | 71 | sleep 20 72 | 73 | let step=0 74 | until [ $step -eq 60 ] || kubectl get ytsaurus | grep "Running"; do 75 | echo "Waiting for ytsaurus is Running again" 76 | sleep 10 77 | let step=step+1 78 | done 79 | 80 | helm uninstall ytsaurus 81 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /config/crd-ref-docs/config.yaml: -------------------------------------------------------------------------------- 1 | processor: 2 | # RE2 regular expressions describing types that should be excluded from the generated documentation. 3 | ignoreTypes: 4 | - "(Chyt|RemoteExecNodes|RemoteYtsaurus|Spyt|Ytsaurus)List$" 5 | # - "CommonSpec$" 6 | # RE2 regular expressions describing type fields that should be excluded from the generated documentation. 7 | ignoreFields: 8 | - "TypeMeta$" 9 | 10 | render: 11 | # Version of Kubernetes to use when generating links to Kubernetes API documentation. 12 | kubernetesVersion: 1.28 13 | # Generate better link for known types 14 | knownTypes: 15 | - name: SecretObjectReference 16 | package: sigs.k8s.io/gateway-api/apis/v1beta1 17 | link: https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1beta1.SecretObjectReference 18 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/cluster.ytsaurus.tech_ytsaurus.yaml 6 | - bases/cluster.ytsaurus.tech_spyts.yaml 7 | - bases/cluster.ytsaurus.tech_chyts.yaml 8 | - bases/cluster.ytsaurus.tech_remoteytsaurus.yaml 9 | - bases/cluster.ytsaurus.tech_remoteexecnodes.yaml 10 | - bases/cluster.ytsaurus.tech_remotedatanodes.yaml 11 | - bases/cluster.ytsaurus.tech_remotetabletnodes.yaml 12 | #+kubebuilder:scaffold:crdkustomizeresource 13 | 14 | patches: 15 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 16 | # patches here are for enabling the conversion webhook for each CRD 17 | - path: patches/webhook_in_ytsaurus.yaml 18 | - path: patches/webhook_in_spyts.yaml 19 | - path: patches/webhook_in_chyts.yaml 20 | - path: patches/webhook_in_remoteytsaurus.yaml 21 | - path: patches/webhook_in_remoteexecnodes.yaml 22 | - path: patches/webhook_in_remotedatanodes.yaml 23 | - path: patches/webhook_in_remotetabletnodes.yaml 24 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 25 | 26 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. 27 | # patches here are for enabling the CA injection for each CRD 28 | - path: patches/cainjection_in_ytsaurus.yaml 29 | - path: patches/cainjection_in_spyts.yaml 30 | - path: patches/cainjection_in_chyts.yaml 31 | - path: patches/cainjection_in_remoteytsaurus.yaml 32 | - path: patches/cainjection_in_remoteexecnodes.yaml 33 | - path: patches/cainjection_in_remotedatanodes.yaml 34 | - path: patches/cainjection_in_remotetabletnodes.yaml 35 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 36 | 37 | # the following config is for teaching kustomize how to do kustomization for CRDs. 38 | configurations: 39 | - kustomizeconfig.yaml 40 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_chyts.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(WEBHOOK_CERTIFICATE_NAMESPACE)/$(WEBHOOK_CERTIFICATE_NAME) 7 | name: chyts.cluster.ytsaurus.tech 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_remotedatanodes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(WEBHOOK_CERTIFICATE_NAMESPACE)/$(WEBHOOK_CERTIFICATE_NAME) 7 | name: remotedatanodes.cluster.ytsaurus.tech 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_remoteexecnodes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(WEBHOOK_CERTIFICATE_NAMESPACE)/$(WEBHOOK_CERTIFICATE_NAME) 7 | name: remoteexecnodes.cluster.ytsaurus.tech 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_remotetabletnodes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(WEBHOOK_CERTIFICATE_NAMESPACE)/$(WEBHOOK_CERTIFICATE_NAME) 7 | name: remotetabletnodes.cluster.ytsaurus.tech 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_remoteytsaurus.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(WEBHOOK_CERTIFICATE_NAMESPACE)/$(WEBHOOK_CERTIFICATE_NAME) 7 | name: remoteytsaurus.cluster.ytsaurus.tech 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_spyts.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(WEBHOOK_CERTIFICATE_NAMESPACE)/$(WEBHOOK_CERTIFICATE_NAME) 7 | name: spyts.cluster.ytsaurus.tech 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(WEBHOOK_CERTIFICATE_NAMESPACE)/$(WEBHOOK_CERTIFICATE_NAME) 7 | name: ytsaurus.cluster.ytsaurus.tech 8 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_chyts.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: chyts.cluster.ytsaurus.tech 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_remotedatanodes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: remotedatanodes.cluster.ytsaurus.tech 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_remoteexecnodes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: remoteexecnodes.cluster.ytsaurus.tech 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_remotetabletnodes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: remotetabletnodes.cluster.ytsaurus.tech 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_remoteytsaurus.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: remoteytsaurus.cluster.ytsaurus.tech 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_spyts.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: spyts.cluster.ytsaurus.tech 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: ytsaurus.cluster.ytsaurus.tech 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/default/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | varReference: 2 | - kind: Deployment 3 | path: spec/template/spec/volumes/secret 4 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | securityContext: 14 | allowPrivilegeEscalation: false 15 | capabilities: 16 | drop: 17 | - "ALL" 18 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0 19 | args: 20 | - "--secure-listen-address=:8443" 21 | - "--upstream=http://127.0.0.1:8080/" 22 | - "--logtostderr=true" 23 | - "--tls-cert-file=/etc/certs/tls/tls.crt" 24 | - "--tls-private-key-file=/etc/certs/tls/tls.key" 25 | - "--v=0" 26 | ports: 27 | - containerPort: 8443 28 | protocol: TCP 29 | name: https 30 | resources: 31 | limits: 32 | cpu: 500m 33 | memory: 128Mi 34 | requests: 35 | cpu: 5m 36 | memory: 64Mi 37 | volumeMounts: 38 | - mountPath: /etc/certs/tls 39 | name: metrics-cert 40 | readOnly: true 41 | - name: manager 42 | args: 43 | - "--health-probe-bind-address=:8081" 44 | - "--metrics-bind-address=127.0.0.1:8080" 45 | - "--leader-elect" 46 | volumes: 47 | - name: metrics-cert 48 | secret: 49 | defaultMode: 420 50 | secretName: $(METRICS_SECRET_NAME) 51 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | args: 12 | - "--config=controller_manager_config.yaml" 13 | volumeMounts: 14 | - name: manager-config 15 | mountPath: /controller_manager_config.yaml 16 | subPath: controller_manager_config.yaml 17 | volumes: 18 | - name: manager-config 19 | configMap: 20 | name: manager-config 21 | -------------------------------------------------------------------------------- /config/default/manager_webhook_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | ports: 12 | - containerPort: 9443 13 | name: webhook-server 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp/k8s-webhook-server/serving-certs 17 | name: webhook-cert 18 | readOnly: true 19 | volumes: 20 | - name: webhook-cert 21 | secret: 22 | defaultMode: 420 23 | secretName: $(WEBHOOK_SECRET_NAME) 24 | -------------------------------------------------------------------------------- /config/default/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch add annotation to admission webhook config and 2 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. 3 | apiVersion: admissionregistration.k8s.io/v1 4 | kind: ValidatingWebhookConfiguration 5 | metadata: 6 | labels: 7 | app.kubernetes.io/name: validatingwebhookconfiguration 8 | app.kubernetes.io/instance: validating-webhook-configuration 9 | app.kubernetes.io/component: webhook 10 | app.kubernetes.io/created-by: ytsaurus-k8s-operator 11 | app.kubernetes.io/part-of: ytsaurus-k8s-operator 12 | app.kubernetes.io/managed-by: kustomize 13 | name: validating-webhook-configuration 14 | annotations: 15 | cert-manager.io/inject-ca-from: $(WEBHOOK_CERTIFICATE_NAMESPACE)/$(WEBHOOK_CERTIFICATE_NAME) 16 | -------------------------------------------------------------------------------- /config/helm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ${name} 3 | description: A Helm chart for Kubernetes 4 | type: application 5 | version: "${version:-0.0.0-alpha}" 6 | appVersion: "${version:-0.0.0-alpha}" 7 | sources: 8 | - https://github.com/ytsaurus/ytsaurus-k8s-operator 9 | -------------------------------------------------------------------------------- /config/helm/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../crd 3 | 4 | patches: 5 | - target: 6 | kind: CustomResourceDefinition 7 | path: patches/cainjection.yaml 8 | - target: 9 | kind: CustomResourceDefinition 10 | path: patches/webhook.yaml 11 | -------------------------------------------------------------------------------- /config/helm/patches/cainjection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: any 5 | annotations: 6 | cert-manager.io/inject-ca-from: '{{ .Release.Namespace }}/{{ include "${name}.fullname" . }}-webhook-cert' 7 | -------------------------------------------------------------------------------- /config/helm/patches/webhook.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: any 5 | spec: 6 | conversion: 7 | strategy: Webhook 8 | webhook: 9 | clientConfig: 10 | service: 11 | name: '{{ include "${name}.fullname" . }}-webhook-service' 12 | namespace: '{{ .Release.Namespace }}' 13 | path: /convert 14 | conversionReviewVersions: 15 | - v1 16 | -------------------------------------------------------------------------------- /config/kind/audit-policy.yaml: -------------------------------------------------------------------------------- 1 | # https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ 2 | 3 | apiVersion: audit.k8s.io/v1 4 | kind: Policy 5 | rules: 6 | # - level: Metadata 7 | - level: RequestResponse 8 | -------------------------------------------------------------------------------- /config/kind/kind-with-registry.yaml: -------------------------------------------------------------------------------- 1 | # https://kind.sigs.k8s.io/docs/user/configuration/ 2 | 3 | kind: Cluster 4 | apiVersion: kind.x-k8s.io/v1alpha4 5 | 6 | # overriden by environment KIND_CLUSTER_NAME 7 | name: kind 8 | 9 | # https://kind.sigs.k8s.io/docs/user/configuration/#networking 10 | networking: 11 | # ipFamily: dual 12 | # ipFamily: ipv6 13 | # podSubnet: "10.244.0.0/16" 14 | # serviceSubnet: "10.96.0.0/12" 15 | # disableDefaultCNI: true 16 | # kubeProxyMode: "ipvs" 17 | # kubeProxyMode: "none" 18 | 19 | runtimeConfig: 20 | # "api/alpha": "true" 21 | 22 | # https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/ 23 | featureGates: 24 | # "KubeletCgroupDriverFromCRI": true 25 | # "NewVolumeManagerReconstruction": true 26 | # "InPlacePodVerticalScaling": true 27 | # "MemoryQoS": true 28 | # "CustomCPUCFSQuotaPeriod": true 29 | # "StatefulSetStartOrdinal": true 30 | # "StatefulSetAutoDeletePVC": true 31 | 32 | # https://kind.sigs.k8s.io/docs/user/local-registry/ 33 | # https://github.com/containerd/containerd/blob/main/docs/hosts.md 34 | containerdConfigPatches: 35 | - |- 36 | [plugins."io.containerd.grpc.v1.cri".registry] 37 | config_path = "/etc/containerd/certs.d" 38 | 39 | # https://kind.sigs.k8s.io/docs/user/configuration/#nodes 40 | nodes: 41 | - role: control-plane 42 | # https://kind.sigs.k8s.io/docs/user/configuration#kubeadm-config-patches 43 | # https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/control-plane-flags/#etcd-flags 44 | # https://etcd.io/docs/v3.5/op-guide/configuration/#command-line-flags 45 | kubeadmConfigPatches: 46 | - | 47 | kind: ClusterConfiguration 48 | etcd: 49 | local: 50 | extraArgs: 51 | unsafe-no-fsync: "true" 52 | extraMounts: 53 | - ®istry-config 54 | hostPath: "config/registry" 55 | containerPath: "/etc/containerd/certs.d" 56 | readOnly: true 57 | - role: worker 58 | extraMounts: 59 | - *registry-config 60 | - role: worker 61 | extraMounts: 62 | - *registry-config 63 | - role: worker 64 | extraMounts: 65 | - *registry-config 66 | -------------------------------------------------------------------------------- /config/manager/controller_manager_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 2 | kind: ControllerManagerConfig 3 | health: 4 | healthProbeBindAddress: :8081 5 | metrics: 6 | bindAddress: 127.0.0.1:8080 7 | webhook: 8 | port: 9443 9 | leaderElection: 10 | leaderElect: true 11 | resourceName: 6ab077f0.ytsaurus.tech 12 | # leaderElectionReleaseOnCancel defines if the leader should step down volume 13 | # when the Manager ends. This requires the binary to immediately end when the 14 | # Manager is stopped, otherwise, this setting is unsafe. Setting this significantly 15 | # speeds up voluntary leader transitions as the new leader don't have to wait 16 | # LeaseDuration time first. 17 | # In the default scaffold provided, the program ends immediately after 18 | # the manager stops, so would be fine to enable this option. However, 19 | # if you are doing or is intended to do any operation such as perform cleanups 20 | # after the manager stops then its usage might be unsafe. 21 | # leaderElectionReleaseOnCancel: true 22 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | 4 | generatorOptions: 5 | disableNameSuffixHash: true 6 | 7 | configMapGenerator: 8 | - files: 9 | - controller_manager_config.yaml 10 | name: manager-config 11 | apiVersion: kustomize.config.k8s.io/v1beta1 12 | kind: Kustomization 13 | images: 14 | - name: controller 15 | newName: ytsaurus/k8s-operator 16 | newTag: 0.0.0-alpha 17 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: controller-manager 12 | namespace: system 13 | labels: 14 | control-plane: controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | control-plane: controller-manager 19 | replicas: 1 20 | revisionHistoryLimit: 10 21 | template: 22 | metadata: 23 | annotations: 24 | kubectl.kubernetes.io/default-container: manager 25 | labels: 26 | control-plane: controller-manager 27 | spec: 28 | securityContext: 29 | runAsNonRoot: true 30 | # TODO(user): For common cases that do not require escalating privileges 31 | # it is recommended to ensure that all your Pods/Containers are restrictive. 32 | # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted 33 | # Please uncomment the following code if your project does NOT have to work on old Kubernetes 34 | # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). 35 | # seccompProfile: 36 | # type: RuntimeDefault 37 | containers: 38 | - command: 39 | - /manager 40 | args: 41 | - --leader-elect 42 | image: controller:latest 43 | name: manager 44 | env: 45 | - name: YT_LOG_LEVEL 46 | value: DEBUG 47 | - name: WATCH_NAMESPACE 48 | value: "" 49 | securityContext: 50 | allowPrivilegeEscalation: false 51 | capabilities: 52 | drop: 53 | - "ALL" 54 | livenessProbe: 55 | httpGet: 56 | path: /healthz 57 | port: 8081 58 | initialDelaySeconds: 15 59 | periodSeconds: 20 60 | readinessProbe: 61 | httpGet: 62 | path: /readyz 63 | port: 8081 64 | initialDelaySeconds: 5 65 | periodSeconds: 10 66 | # TODO(user): Configure the resources accordingly based on the project requirements. 67 | # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ 68 | resources: 69 | limits: 70 | cpu: 500m 71 | memory: 128Mi 72 | requests: 73 | cpu: 10m 74 | memory: 64Mi 75 | serviceAccountName: controller-manager 76 | terminationGracePeriodSeconds: 10 77 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | scheme: https 15 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | selector: 19 | matchLabels: 20 | control-plane: controller-manager 21 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: 7 | - "/metrics" 8 | verbs: 9 | - get 10 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: controller-manager-metrics-service 7 | namespace: system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | protocol: TCP 13 | targetPort: https 14 | selector: 15 | control-plane: controller-manager 16 | -------------------------------------------------------------------------------- /config/rbac/chyt_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit chyts. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: chyt-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: ytsaurus-k8s-operator 10 | app.kubernetes.io/part-of: ytsaurus-k8s-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: chyt-editor-role 13 | rules: 14 | - apiGroups: 15 | - cluster.ytsaurus.tech 16 | resources: 17 | - chyts 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - cluster.ytsaurus.tech 28 | resources: 29 | - chyts/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/chyt_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view chyts. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: chyt-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: ytsaurus-k8s-operator 10 | app.kubernetes.io/part-of: ytsaurus-k8s-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: chyt-viewer-role 13 | rules: 14 | - apiGroups: 15 | - cluster.ytsaurus.tech 16 | resources: 17 | - chyts 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - cluster.ytsaurus.tech 24 | resources: 25 | - chyts/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - create 28 | - update 29 | - patch 30 | - delete 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/remotedatanodes_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit remotedatanodes. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: remotedatanodes-editor-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ytsaurus.tech 9 | resources: 10 | - remotedatanodes 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - cluster.ytsaurus.tech 21 | resources: 22 | - remotedatanodes/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/remotedatanodes_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view remotedatanodes. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: remotedatanodes-viewer-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ytsaurus.tech 9 | resources: 10 | - remotedatanodes 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - cluster.ytsaurus.tech 17 | resources: 18 | - remotedatanodes/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/remoteexecnodes_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit remoteexecnodes. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: remoteexecnodes-editor-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ytsaurus.tech 9 | resources: 10 | - remoteexecnodes 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - cluster.ytsaurus.tech 21 | resources: 22 | - remoteexecnodes/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/remoteexecnodes_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view remoteexecnodes. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: remoteexecnodes-viewer-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ytsaurus.tech 9 | resources: 10 | - remoteexecnodes 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - cluster.ytsaurus.tech 17 | resources: 18 | - remoteexecnodes/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/remoteytsaurus_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit remoteytsauruses. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: remoteytsaurus-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: ytsaurus-k8s-operator 10 | app.kubernetes.io/part-of: ytsaurus-k8s-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: remoteytsaurus-editor-role 13 | rules: 14 | - apiGroups: 15 | - cluster.ytsaurus.tech 16 | resources: 17 | - remoteytsauruses 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - cluster.ytsaurus.tech 28 | resources: 29 | - remoteytsauruses/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/remoteytsaurus_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view remoteytsauruses. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: remoteytsaurus-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: ytsaurus-k8s-operator 10 | app.kubernetes.io/part-of: ytsaurus-k8s-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: remoteytsaurus-viewer-role 13 | rules: 14 | - apiGroups: 15 | - cluster.ytsaurus.tech 16 | resources: 17 | - remoteytsauruses 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - cluster.ytsaurus.tech 24 | resources: 25 | - remoteytsauruses/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | - events 12 | - pod 13 | - pods 14 | - secrets 15 | - services 16 | verbs: 17 | - create 18 | - delete 19 | - get 20 | - list 21 | - patch 22 | - update 23 | - watch 24 | - apiGroups: 25 | - "" 26 | resources: 27 | - pod/status 28 | verbs: 29 | - get 30 | - apiGroups: 31 | - apps 32 | resources: 33 | - deployments 34 | - statefulset 35 | - statefulsets 36 | verbs: 37 | - create 38 | - delete 39 | - get 40 | - list 41 | - patch 42 | - update 43 | - watch 44 | - apiGroups: 45 | - apps 46 | resources: 47 | - statefulset/status 48 | verbs: 49 | - get 50 | - apiGroups: 51 | - batch 52 | resources: 53 | - jobs 54 | verbs: 55 | - create 56 | - delete 57 | - get 58 | - list 59 | - patch 60 | - update 61 | - watch 62 | - apiGroups: 63 | - cluster.ytsaurus.tech 64 | resources: 65 | - chyts 66 | - remotedatanodes 67 | - remoteexecnodes 68 | - remotetabletnodes 69 | - remoteytsaurus 70 | - spyts 71 | - ytsaurus 72 | verbs: 73 | - create 74 | - delete 75 | - get 76 | - list 77 | - patch 78 | - update 79 | - watch 80 | - apiGroups: 81 | - cluster.ytsaurus.tech 82 | resources: 83 | - chyts/finalizers 84 | - remotedatanodes/finalizers 85 | - remoteexecnodes/finalizers 86 | - remotetabletnodes/finalizers 87 | - remoteytsaurus/finalizers 88 | - spyts/finalizers 89 | - ytsaurus/finalizers 90 | verbs: 91 | - update 92 | - apiGroups: 93 | - cluster.ytsaurus.tech 94 | resources: 95 | - chyts/status 96 | - remotedatanodes/status 97 | - remoteexecnodes/status 98 | - remotetabletnodes/status 99 | - remoteytsaurus/status 100 | - spyts/status 101 | - ytsaurus/status 102 | verbs: 103 | - get 104 | - patch 105 | - update 106 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | -------------------------------------------------------------------------------- /config/rbac/spyt_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit spyts. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: spyt-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: ytsaurus-k8s-operator 10 | app.kubernetes.io/part-of: ytsaurus-k8s-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: spyt-editor-role 13 | rules: 14 | - apiGroups: 15 | - cluster.ytsaurus.tech 16 | resources: 17 | - spyts 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - cluster.ytsaurus.tech 28 | resources: 29 | - spyts/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/spyt_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view spyts. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: spyt-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: ytsaurus-k8s-operator 10 | app.kubernetes.io/part-of: ytsaurus-k8s-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: spyt-viewer-role 13 | rules: 14 | - apiGroups: 15 | - cluster.ytsaurus.tech 16 | resources: 17 | - spyts 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - cluster.ytsaurus.tech 24 | resources: 25 | - spyts/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/ytsaurus_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit ytsaurus. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: ytsaurus-editor-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ytsaurus.tech 9 | resources: 10 | - ytsaurus 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - cluster.ytsaurus.tech 21 | resources: 22 | - ytsaurus/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/ytsaurus_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view ytsaurus. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: ytsaurus-viewer-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ytsaurus.tech 9 | resources: 10 | - ytsaurus 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - cluster.ytsaurus.tech 17 | resources: 18 | - ytsaurus/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/registry/.gitignore: -------------------------------------------------------------------------------- 1 | */ 2 | -------------------------------------------------------------------------------- /config/samples/cluster_v1_chyt.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.ytsaurus.tech/v1 2 | kind: Chyt 3 | metadata: 4 | name: mychyt 5 | spec: 6 | ytsaurus: 7 | name: 8 | minisaurus 9 | image: ghcr.io/ytsaurus/chyt:2.14.0-relwithdebinfo 10 | makeDefault: true 11 | createPublicClique: true 12 | -------------------------------------------------------------------------------- /config/samples/cluster_v1_remoteytsaurus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.ytsaurus.tech/v1 2 | kind: RemoteYtsaurus 3 | metadata: 4 | name: remote-ytsaurus 5 | spec: 6 | cellTag: 1 7 | # FIXME: Must be optional. 8 | cellTagMasterCaches: 1 9 | # FIXME Lookup master endpoints via service. 10 | hostAddresses: 11 | - ms-0.masters.ytsaurus.svc 12 | -------------------------------------------------------------------------------- /config/samples/cluster_v1_spyt.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.ytsaurus.tech/v1 2 | kind: Spyt 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: spyt 6 | app.kubernetes.io/instance: spyt-sample 7 | app.kubernetes.io/part-of: ytsaurus-k8s-operator 8 | app.kubernetes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: ytsaurus-k8s-operator 10 | name: myspyt 11 | spec: 12 | ytsaurus: 13 | name: 14 | minisaurus 15 | image: ghcr.io/ytsaurus/spyt:1.78.0 16 | -------------------------------------------------------------------------------- /config/samples/jupyter/jupyter-demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: jupyterlab 5 | labels: 6 | name: jupyterlab 7 | spec: 8 | type: NodePort 9 | ports: 10 | - port: 80 11 | targetPort: 8888 12 | protocol: TCP 13 | name: http 14 | selector: 15 | name: jupyterlab 16 | 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: jupyterlab-headless 22 | labels: 23 | name: jupyterlab 24 | spec: 25 | type: ClusterIP 26 | clusterIP: None 27 | selector: 28 | name: jupyterlab 29 | 30 | --- 31 | apiVersion: apps/v1 32 | kind: StatefulSet 33 | metadata: 34 | name: jupyterlab 35 | labels: 36 | name: jupyterlab 37 | spec: 38 | replicas: 1 39 | serviceName: jupyter-headless 40 | selector: 41 | matchLabels: 42 | name: jupyterlab 43 | template: 44 | metadata: 45 | labels: 46 | name: jupyterlab 47 | spec: 48 | containers: 49 | - name: jupyterlab 50 | image: ytsaurus/jupyter-tutorial:0.0.23 51 | env: 52 | - name: JUPYTER_TOKEN 53 | valueFrom: 54 | secretKeyRef: 55 | name: ytadminsec 56 | key: token 57 | - name: YT_TOKEN 58 | valueFrom: 59 | secretKeyRef: 60 | name: ytadminsec 61 | key: token 62 | - name: YT_PROXY 63 | value: http-proxies-lb.default.svc.cluster.local 64 | - name: CHYT_CTL_ADDRESS 65 | value: chyt.default.svc.cluster.local 66 | - name: YT_UI_URL 67 | value: https://my-ip:port/ytdemo 68 | command: 69 | - /bin/bash 70 | - -c 71 | - | 72 | start.sh jupyter lab --ip='0.0.0.0' --port 8888 --notebook-dir=/home/jovyan/tutorial 73 | resources: 74 | requests: 75 | cpu: 250m 76 | memory: 2G 77 | restartPolicy: Always 78 | setHostnameAsFQDN: true -------------------------------------------------------------------------------- /config/samples/prometheus/prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Prometheus 3 | metadata: 4 | name: prometheus 5 | spec: 6 | serviceAccountName: prometheus 7 | resources: 8 | requests: 9 | memory: 400Mi 10 | enableAdminAPI: true 11 | 12 | serviceMonitorSelector: 13 | matchLabels: 14 | yt_metrics: "true" 15 | 16 | -------------------------------------------------------------------------------- /config/samples/prometheus/prometheus_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: prometheus 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: prometheus 9 | subjects: 10 | - kind: ServiceAccount 11 | name: prometheus 12 | -------------------------------------------------------------------------------- /config/samples/prometheus/prometheus_service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: prometheus 5 | -------------------------------------------------------------------------------- /config/samples/prometheus/prometheus_service_monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: ytsaurus-metrics 5 | labels: 6 | yt_metrics: "true" 7 | spec: 8 | namespaceSelector: 9 | any: false 10 | selector: 11 | matchLabels: 12 | yt_metrics: "true" 13 | endpoints: 14 | - port: ytsaurus-metrics 15 | path: /solomon/all 16 | metricRelabelings: 17 | - targetLabel: service 18 | sourceLabels: 19 | - service 20 | regex: (.*)-monitoring 21 | replacement: ${1} -------------------------------------------------------------------------------- /config/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manifests.yaml 3 | - service.yaml 4 | 5 | configurations: 6 | - kustomizeconfig.yaml 7 | -------------------------------------------------------------------------------- /config/webhook/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # the following config is for teaching kustomize where to look at when substituting vars. 2 | # It requires kustomize v2.1.0 or newer to work properly. 3 | nameReference: 4 | - kind: Service 5 | version: v1 6 | fieldSpecs: 7 | - kind: MutatingWebhookConfiguration 8 | group: admissionregistration.k8s.io 9 | path: webhooks/clientConfig/service/name 10 | - kind: ValidatingWebhookConfiguration 11 | group: admissionregistration.k8s.io 12 | path: webhooks/clientConfig/service/name 13 | 14 | namespace: 15 | - kind: MutatingWebhookConfiguration 16 | group: admissionregistration.k8s.io 17 | path: webhooks/clientConfig/service/namespace 18 | create: true 19 | - kind: ValidatingWebhookConfiguration 20 | group: admissionregistration.k8s.io 21 | path: webhooks/clientConfig/service/namespace 22 | create: true 23 | 24 | varReference: 25 | - path: metadata/annotations 26 | -------------------------------------------------------------------------------- /config/webhook/manifests.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: admissionregistration.k8s.io/v1 3 | kind: ValidatingWebhookConfiguration 4 | metadata: 5 | name: validating-webhook-configuration 6 | webhooks: 7 | - admissionReviewVersions: 8 | - v1 9 | clientConfig: 10 | service: 11 | name: webhook-service 12 | namespace: system 13 | path: /validate-cluster-ytsaurus-tech-v1-chyt 14 | failurePolicy: Fail 15 | name: vchyt.kb.io 16 | rules: 17 | - apiGroups: 18 | - cluster.ytsaurus.tech 19 | apiVersions: 20 | - v1 21 | operations: 22 | - CREATE 23 | - UPDATE 24 | resources: 25 | - chyts 26 | sideEffects: None 27 | - admissionReviewVersions: 28 | - v1 29 | clientConfig: 30 | service: 31 | name: webhook-service 32 | namespace: system 33 | path: /validate-cluster-ytsaurus-tech-v1-spyt 34 | failurePolicy: Fail 35 | name: vspyt.kb.io 36 | rules: 37 | - apiGroups: 38 | - cluster.ytsaurus.tech 39 | apiVersions: 40 | - v1 41 | operations: 42 | - CREATE 43 | - UPDATE 44 | resources: 45 | - spyts 46 | sideEffects: None 47 | - admissionReviewVersions: 48 | - v1 49 | clientConfig: 50 | service: 51 | name: webhook-service 52 | namespace: system 53 | path: /validate-cluster-ytsaurus-tech-v1-ytsaurus 54 | failurePolicy: Fail 55 | name: vytsaurus.kb.io 56 | rules: 57 | - apiGroups: 58 | - cluster.ytsaurus.tech 59 | apiVersions: 60 | - v1 61 | operations: 62 | - CREATE 63 | - UPDATE 64 | resources: 65 | - ytsaurus 66 | sideEffects: None 67 | -------------------------------------------------------------------------------- /config/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: service 7 | app.kubernetes.io/instance: webhook-service 8 | app.kubernetes.io/component: webhook 9 | app.kubernetes.io/created-by: ytsaurus-k8s-operator 10 | app.kubernetes.io/part-of: ytsaurus-k8s-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: webhook-service 13 | namespace: system 14 | spec: 15 | ports: 16 | - port: 443 17 | protocol: TCP 18 | targetPort: 9443 19 | selector: 20 | control-plane: controller-manager 21 | -------------------------------------------------------------------------------- /controllers/chyt_sync.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 8 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 9 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/components" 10 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/ytconfig" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | "sigs.k8s.io/controller-runtime/pkg/log" 13 | ) 14 | 15 | func (r *ChytReconciler) Sync(ctx context.Context, resource *ytv1.Chyt, ytsaurus *ytv1.Ytsaurus) (ctrl.Result, error) { 16 | logger := log.FromContext(ctx) 17 | 18 | chyt := apiproxy.NewChyt(resource, r.Client, r.Recorder, r.Scheme) 19 | 20 | cfgen := ytconfig.NewLocalNodeGenerator(ytsaurus, resource.Name, getClusterDomain(chyt.APIProxy().Client())) 21 | 22 | component := components.NewChyt(cfgen, chyt, ytsaurus) 23 | 24 | err := component.Fetch(ctx) 25 | if err != nil { 26 | logger.Error(err, "failed to fetch CHYT status for controller") 27 | return ctrl.Result{Requeue: true}, err 28 | } 29 | 30 | if chyt.GetResource().Status.ReleaseStatus == ytv1.ChytReleaseStatusFinished { 31 | return ctrl.Result{}, nil 32 | } 33 | 34 | status := component.Status(ctx) 35 | if status.SyncStatus == components.SyncStatusBlocked { 36 | return ctrl.Result{RequeueAfter: time.Second * 10}, nil 37 | } 38 | 39 | if status.SyncStatus == components.SyncStatusReady { 40 | logger.Info("CHYT initialization finished") 41 | 42 | err := chyt.SaveReleaseStatus(ctx, ytv1.ChytReleaseStatusFinished) 43 | return ctrl.Result{Requeue: true}, err 44 | } 45 | 46 | if err := component.Sync(ctx); err != nil { 47 | logger.Error(err, "component sync failed", "component", "chyt") 48 | return ctrl.Result{Requeue: true}, err 49 | } 50 | 51 | if err := chyt.APIProxy().UpdateStatus(ctx); err != nil { 52 | logger.Error(err, "update chyt status failed") 53 | return ctrl.Result{Requeue: true}, err 54 | } 55 | 56 | return ctrl.Result{Requeue: true}, nil 57 | } 58 | -------------------------------------------------------------------------------- /controllers/helpers.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "net" 5 | "os" 6 | "strings" 7 | 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | const ( 12 | defaultClusterDomain = "cluster.local" 13 | remoteClusterSpecField = "remoteClusterSpec" 14 | ) 15 | 16 | func getClusterDomain(client client.Client) string { 17 | domain, exists := os.LookupEnv("K8S_CLUSTER_DOMAIN") 18 | if exists { 19 | return domain 20 | } 21 | apiSvc := "kubernetes.default.svc" 22 | 23 | cname, err := net.LookupCNAME(apiSvc) 24 | if err != nil { 25 | return defaultClusterDomain 26 | } 27 | 28 | clusterDomain := strings.TrimPrefix(cname, apiSvc) 29 | clusterDomain = strings.TrimPrefix(clusterDomain, ".") 30 | clusterDomain = strings.TrimSuffix(clusterDomain, ".") 31 | 32 | return clusterDomain 33 | } 34 | -------------------------------------------------------------------------------- /controllers/remote_controllers_common_test.go: -------------------------------------------------------------------------------- 1 | package controllers_test 2 | 3 | import ( 4 | "path/filepath" 5 | "testing" 6 | 7 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 8 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/testutil" 9 | ctrl "sigs.k8s.io/controller-runtime" 10 | ) 11 | 12 | const ( 13 | remoteYtsaurusHostname = "test-hostname" 14 | remoteYtsaurusName = "test-remote-ytsaurus" 15 | ) 16 | 17 | func startHelperWithController(t *testing.T, namespace string, reconcilerSetupFunc func(mgr ctrl.Manager) error) *testutil.TestHelper { 18 | h := testutil.NewTestHelper(t, namespace, filepath.Join("..", "config", "crd", "bases")) 19 | h.Start(reconcilerSetupFunc) 20 | return h 21 | } 22 | 23 | func buildRemoteYtsaurus(h *testutil.TestHelper, remoteYtsaurusName, remoteYtsaurusHostname string) ytv1.RemoteYtsaurus { 24 | remoteYtsaurus := ytv1.RemoteYtsaurus{ 25 | ObjectMeta: h.GetObjectMeta(remoteYtsaurusName), 26 | Spec: ytv1.RemoteYtsaurusSpec{ 27 | MasterConnectionSpec: ytv1.MasterConnectionSpec{ 28 | CellTag: 100, 29 | HostAddresses: []string{ 30 | remoteYtsaurusHostname, 31 | }, 32 | }, 33 | }, 34 | } 35 | return remoteYtsaurus 36 | } 37 | 38 | func waitRemoteYtsaurusDeployed(h *testutil.TestHelper, remoteYtsaurusName string) { 39 | testutil.FetchEventually(h, remoteYtsaurusName, &ytv1.RemoteYtsaurus{}) 40 | } 41 | -------------------------------------------------------------------------------- /controllers/remotedatanodes_sync.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | 6 | ctrl "sigs.k8s.io/controller-runtime" 7 | "sigs.k8s.io/controller-runtime/pkg/log" 8 | 9 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 10 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 11 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/components" 12 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/ytconfig" 13 | ) 14 | 15 | func (r *RemoteDataNodesReconciler) Sync( 16 | ctx context.Context, 17 | resource *ytv1.RemoteDataNodes, 18 | remoteYtsaurus *ytv1.RemoteYtsaurus, 19 | ) (ctrl.Result, error) { 20 | logger := log.FromContext(ctx).WithValues("component", "remoteDatanodes") 21 | apiProxy := apiproxy.NewAPIProxy(resource, r.Client, r.Recorder, r.Scheme) 22 | 23 | cfgen := ytconfig.NewRemoteNodeGenerator(remoteYtsaurus, resource.GetName(), getClusterDomain(r.Client), &resource.Spec.CommonSpec) 24 | 25 | component := components.NewRemoteDataNodes( 26 | cfgen, 27 | resource, 28 | apiProxy, 29 | resource.Spec.DataNodesSpec, 30 | resource.Spec.CommonSpec, 31 | ) 32 | err := component.Fetch(ctx) 33 | if err != nil { 34 | logger.Error(err, "failed to fetch remote nodes") 35 | return ctrl.Result{Requeue: true}, err 36 | } 37 | 38 | status, err := component.Sync(ctx) 39 | if err != nil { 40 | logger.Error(err, "failed to sync remote nodes") 41 | return ctrl.Result{Requeue: true}, err 42 | } 43 | 44 | var requeue bool 45 | if status.SyncStatus != components.SyncStatusReady { 46 | resource.Status.ReleaseStatus = ytv1.RemoteNodeReleaseStatusPending 47 | requeue = true 48 | } else { 49 | resource.Status.ReleaseStatus = ytv1.RemoteNodeReleaseStatusRunning 50 | requeue = false 51 | } 52 | resource.Status.ObservedGeneration = resource.Generation 53 | 54 | logger.Info("Setting status for remote data nodes", "status", resource.Status.ReleaseStatus) 55 | err = r.Client.Status().Update(ctx, resource) 56 | if err != nil { 57 | logger.Error(err, "failed to update status for remote data nodes") 58 | return ctrl.Result{Requeue: true}, err 59 | } 60 | 61 | return ctrl.Result{Requeue: requeue}, nil 62 | } 63 | -------------------------------------------------------------------------------- /controllers/remoteexecnodes_sync.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | 6 | ctrl "sigs.k8s.io/controller-runtime" 7 | "sigs.k8s.io/controller-runtime/pkg/log" 8 | 9 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 10 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 11 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/components" 12 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/ytconfig" 13 | ) 14 | 15 | func (r *RemoteExecNodesReconciler) Sync( 16 | ctx context.Context, 17 | resource *ytv1.RemoteExecNodes, 18 | remoteYtsaurus *ytv1.RemoteYtsaurus, 19 | ) (ctrl.Result, error) { 20 | logger := log.FromContext(ctx).WithValues("component", "remoteexecnodes") 21 | apiProxy := apiproxy.NewAPIProxy(resource, r.Client, r.Recorder, r.Scheme) 22 | 23 | cfgen := ytconfig.NewRemoteNodeGenerator(remoteYtsaurus, resource.GetName(), getClusterDomain(r.Client), &resource.Spec.CommonSpec) 24 | 25 | component := components.NewRemoteExecNodes( 26 | cfgen, 27 | resource, 28 | apiProxy, 29 | resource.Spec.ExecNodesSpec, 30 | resource.Spec.CommonSpec, 31 | ) 32 | err := component.Fetch(ctx) 33 | if err != nil { 34 | logger.Error(err, "failed to fetch remote nodes") 35 | return ctrl.Result{Requeue: true}, err 36 | } 37 | 38 | status, err := component.Sync(ctx) 39 | if err != nil { 40 | logger.Error(err, "failed to sync remote nodes") 41 | return ctrl.Result{Requeue: true}, err 42 | } 43 | 44 | var requeue bool 45 | if status.SyncStatus != components.SyncStatusReady { 46 | resource.Status.ReleaseStatus = ytv1.RemoteNodeReleaseStatusPending 47 | requeue = true 48 | } else { 49 | resource.Status.ReleaseStatus = ytv1.RemoteNodeReleaseStatusRunning 50 | requeue = false 51 | } 52 | resource.Status.ObservedGeneration = resource.Generation 53 | 54 | logger.Info("Setting status for remote exec nodes", "status", resource.Status.ReleaseStatus) 55 | err = r.Client.Status().Update(ctx, resource) 56 | if err != nil { 57 | logger.Error(err, "failed to update status for remote exec nodes") 58 | return ctrl.Result{Requeue: true}, err 59 | } 60 | 61 | return ctrl.Result{Requeue: requeue}, nil 62 | } 63 | -------------------------------------------------------------------------------- /controllers/remotetabletnodes_sync.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | 6 | ctrl "sigs.k8s.io/controller-runtime" 7 | "sigs.k8s.io/controller-runtime/pkg/log" 8 | 9 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 10 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 11 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/components" 12 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/ytconfig" 13 | ) 14 | 15 | func (r *RemoteTabletNodesReconciler) Sync( 16 | ctx context.Context, 17 | resource *ytv1.RemoteTabletNodes, 18 | remoteYtsaurus *ytv1.RemoteYtsaurus, 19 | ) (ctrl.Result, error) { 20 | logger := log.FromContext(ctx).WithValues("component", "remoteTabletNodes") 21 | apiProxy := apiproxy.NewAPIProxy(resource, r.Client, r.Recorder, r.Scheme) 22 | 23 | cfgen := ytconfig.NewRemoteNodeGenerator(remoteYtsaurus, resource.GetName(), getClusterDomain(r.Client), &resource.Spec.CommonSpec) 24 | 25 | component := components.NewRemoteTabletNodes( 26 | cfgen, 27 | resource, 28 | apiProxy, 29 | resource.Spec.TabletNodesSpec, 30 | resource.Spec.CommonSpec, 31 | ) 32 | err := component.Fetch(ctx) 33 | if err != nil { 34 | logger.Error(err, "failed to fetch remote nodes") 35 | return ctrl.Result{Requeue: true}, err 36 | } 37 | 38 | status, err := component.Sync(ctx) 39 | if err != nil { 40 | logger.Error(err, "failed to sync remote nodes") 41 | return ctrl.Result{Requeue: true}, err 42 | } 43 | 44 | var requeue bool 45 | if status.SyncStatus != components.SyncStatusReady { 46 | resource.Status.ReleaseStatus = ytv1.RemoteNodeReleaseStatusPending 47 | requeue = true 48 | } else { 49 | resource.Status.ReleaseStatus = ytv1.RemoteNodeReleaseStatusRunning 50 | requeue = false 51 | } 52 | resource.Status.ObservedGeneration = resource.Generation 53 | 54 | logger.Info("Setting status for remote tablet nodes", "status", resource.Status.ReleaseStatus) 55 | err = r.Client.Status().Update(ctx, resource) 56 | if err != nil { 57 | logger.Error(err, "failed to update status for remote tablet nodes") 58 | return ctrl.Result{Requeue: true}, err 59 | } 60 | 61 | return ctrl.Result{Requeue: requeue}, nil 62 | } 63 | -------------------------------------------------------------------------------- /controllers/spyt_sync.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 8 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 9 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/components" 10 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/ytconfig" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | "sigs.k8s.io/controller-runtime/pkg/log" 13 | ) 14 | 15 | func (r *SpytReconciler) Sync(ctx context.Context, resource *ytv1.Spyt, ytsaurus *ytv1.Ytsaurus) (ctrl.Result, error) { 16 | logger := log.FromContext(ctx) 17 | 18 | spyt := apiproxy.NewSpyt(resource, r.Client, r.Recorder, r.Scheme) 19 | 20 | cfgen := ytconfig.NewLocalNodeGenerator(ytsaurus, resource.Name, getClusterDomain(spyt.APIProxy().Client())) 21 | 22 | component := components.NewSpyt(cfgen, spyt, ytsaurus) 23 | 24 | err := component.Fetch(ctx) 25 | if err != nil { 26 | logger.Error(err, "failed to fetch SPYT status for controller") 27 | return ctrl.Result{Requeue: true}, err 28 | } 29 | 30 | if spyt.GetResource().Status.ReleaseStatus == ytv1.SpytReleaseStatusFinished { 31 | return ctrl.Result{}, nil 32 | } 33 | 34 | componentStatus := component.Status(ctx) 35 | 36 | if componentStatus.SyncStatus == components.SyncStatusBlocked { 37 | return ctrl.Result{RequeueAfter: time.Second * 10}, nil 38 | } 39 | 40 | if componentStatus.SyncStatus == components.SyncStatusReady { 41 | logger.Info("SPYT initialization finished") 42 | 43 | err := spyt.SaveReleaseStatus(ctx, ytv1.SpytReleaseStatusFinished) 44 | return ctrl.Result{Requeue: true}, err 45 | } 46 | 47 | if err := component.Sync(ctx); err != nil { 48 | logger.Error(err, "component sync failed", "component", "spyt") 49 | return ctrl.Result{Requeue: true}, err 50 | } 51 | 52 | if err := spyt.APIProxy().UpdateStatus(ctx); err != nil { 53 | logger.Error(err, "update spyt status failed") 54 | return ctrl.Result{Requeue: true}, err 55 | } 56 | 57 | return ctrl.Result{Requeue: true}, nil 58 | } 59 | -------------------------------------------------------------------------------- /hack/.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | 4 | bin/ 5 | testbin/ 6 | -------------------------------------------------------------------------------- /hack/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | testbin/* 10 | 11 | # Test binary, build with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Kubernetes Generated files - skip generated files, except for vendored files 18 | 19 | !vendor/**/zz_generated.* 20 | 21 | # editor and IDE paraphernalia 22 | .idea 23 | *.swp 24 | *.swo 25 | *~ 26 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /pkg/apiproxy/chyt.go: -------------------------------------------------------------------------------- 1 | package apiproxy 2 | 3 | import ( 4 | "context" 5 | 6 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 7 | "k8s.io/apimachinery/pkg/api/meta" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | "k8s.io/client-go/tools/record" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/log" 13 | ) 14 | 15 | type Chyt struct { 16 | apiProxy APIProxy 17 | chyt *ytv1.Chyt 18 | } 19 | 20 | func NewChyt( 21 | chyt *ytv1.Chyt, 22 | client client.Client, 23 | recorder record.EventRecorder, 24 | scheme *runtime.Scheme) *Chyt { 25 | return &Chyt{ 26 | chyt: chyt, 27 | apiProxy: NewAPIProxy(chyt, client, recorder, scheme), 28 | } 29 | } 30 | 31 | func (c *Chyt) GetResource() *ytv1.Chyt { 32 | return c.chyt 33 | } 34 | 35 | func (c *Chyt) APIProxy() APIProxy { 36 | return c.apiProxy 37 | } 38 | 39 | func (c *Chyt) SetStatusCondition(condition metav1.Condition) { 40 | meta.SetStatusCondition(&c.chyt.Status.Conditions, condition) 41 | } 42 | 43 | func (c *Chyt) IsStatusConditionTrue(conditionType string) bool { 44 | return meta.IsStatusConditionTrue(c.chyt.Status.Conditions, conditionType) 45 | } 46 | 47 | func (c *Chyt) IsStatusConditionFalse(conditionType string) bool { 48 | return meta.IsStatusConditionFalse(c.chyt.Status.Conditions, conditionType) 49 | } 50 | 51 | func (c *Chyt) SaveReleaseStatus(ctx context.Context, releaseStatus ytv1.ChytReleaseStatus) error { 52 | logger := log.FromContext(ctx) 53 | c.GetResource().Status.ReleaseStatus = releaseStatus 54 | if err := c.apiProxy.UpdateStatus(ctx); err != nil { 55 | logger.Error(err, "unable to update Chyt release status") 56 | return err 57 | } 58 | 59 | return nil 60 | } 61 | -------------------------------------------------------------------------------- /pkg/apiproxy/spyt.go: -------------------------------------------------------------------------------- 1 | package apiproxy 2 | 3 | import ( 4 | "context" 5 | 6 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 7 | "k8s.io/apimachinery/pkg/api/meta" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | "k8s.io/client-go/tools/record" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/log" 13 | ) 14 | 15 | type Spyt struct { 16 | apiProxy APIProxy 17 | spyt *ytv1.Spyt 18 | } 19 | 20 | func NewSpyt( 21 | spyt *ytv1.Spyt, 22 | client client.Client, 23 | recorder record.EventRecorder, 24 | scheme *runtime.Scheme) *Spyt { 25 | return &Spyt{ 26 | spyt: spyt, 27 | apiProxy: NewAPIProxy(spyt, client, recorder, scheme), 28 | } 29 | } 30 | 31 | func (c *Spyt) GetResource() *ytv1.Spyt { 32 | return c.spyt 33 | } 34 | 35 | func (c *Spyt) APIProxy() APIProxy { 36 | return c.apiProxy 37 | } 38 | 39 | func (c *Spyt) SetStatusCondition(condition metav1.Condition) { 40 | meta.SetStatusCondition(&c.spyt.Status.Conditions, condition) 41 | } 42 | 43 | func (c *Spyt) IsStatusConditionTrue(conditionType string) bool { 44 | return meta.IsStatusConditionTrue(c.spyt.Status.Conditions, conditionType) 45 | } 46 | 47 | func (c *Spyt) IsStatusConditionFalse(conditionType string) bool { 48 | return meta.IsStatusConditionFalse(c.spyt.Status.Conditions, conditionType) 49 | } 50 | 51 | func (c *Spyt) SaveReleaseStatus(ctx context.Context, releaseStatus ytv1.SpytReleaseStatus) error { 52 | logger := log.FromContext(ctx) 53 | c.GetResource().Status.ReleaseStatus = releaseStatus 54 | if err := c.apiProxy.UpdateStatus(ctx); err != nil { 55 | logger.Error(err, "unable to update Spyt release status") 56 | return err 57 | } 58 | 59 | return nil 60 | } 61 | -------------------------------------------------------------------------------- /pkg/canonize/colordiff.go: -------------------------------------------------------------------------------- 1 | package canonize 2 | 3 | import "strings" 4 | 5 | type Color string 6 | 7 | const ( 8 | colorRed Color = "\033[31m" 9 | colorGreen Color = "\033[32m" 10 | colorCyan Color = "\033[36m" 11 | ) 12 | 13 | const colorReset = "\033[0m" 14 | 15 | func addColorsToDiff(text string) string { 16 | coloredText := "" 17 | for _, l := range strings.Split(text, "\n") { 18 | if strings.HasPrefix(l, "+") { 19 | l = string(colorGreen) + l + colorReset 20 | } else if strings.HasPrefix(l, "-") { 21 | l = string(colorRed) + l + colorReset 22 | } else if strings.HasPrefix(l, "@@") { 23 | l = string(colorCyan) + l + colorReset 24 | } 25 | coloredText += l + "\n" 26 | } 27 | return coloredText 28 | } 29 | -------------------------------------------------------------------------------- /pkg/components/canondata/TestConfigMerge/http_proxy_config_override.yson: -------------------------------------------------------------------------------- 1 | { 2 | address_resolver = { 3 | force_tcp = %true; 4 | }; 5 | api = { 6 | concurrency_limit = 4000; 7 | }; 8 | cluster_connection = { 9 | thread_pool_size = 8; 10 | }; 11 | } 12 | -------------------------------------------------------------------------------- /pkg/components/config_helper_test.go: -------------------------------------------------------------------------------- 1 | package components 2 | 3 | import ( 4 | _ "embed" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | 9 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/canonize" 10 | ) 11 | 12 | //go:embed canondata/TestConfigMerge/http_proxy_config_wo_override.yson 13 | var hpConfigWithoutOverride string 14 | 15 | //go:embed canondata/TestConfigMerge/http_proxy_config_override.yson 16 | var hpConfigOverride string 17 | 18 | func TestConfigMerge(t *testing.T) { 19 | merged, err := overrideYsonConfigs( 20 | []byte(hpConfigWithoutOverride), 21 | []byte(hpConfigOverride), 22 | ) 23 | require.NoError(t, err) 24 | canonize.Assert(t, merged) 25 | } 26 | -------------------------------------------------------------------------------- /pkg/components/data_node_remote.go: -------------------------------------------------------------------------------- 1 | package components 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | 8 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 9 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 10 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 11 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/resources" 12 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/ytconfig" 13 | ) 14 | 15 | type RemoteDataNode struct { 16 | server server 17 | cfgen *ytconfig.NodeGenerator 18 | spec *ytv1.DataNodesSpec 19 | baseComponent 20 | } 21 | 22 | func NewRemoteDataNodes( 23 | cfgen *ytconfig.NodeGenerator, 24 | nodes *ytv1.RemoteDataNodes, 25 | proxy apiproxy.APIProxy, 26 | spec ytv1.DataNodesSpec, 27 | commonSpec ytv1.CommonSpec, 28 | ) *RemoteDataNode { 29 | l := cfgen.GetComponentLabeller(consts.DataNodeType, spec.Name) 30 | 31 | srv := newServerConfigured( 32 | l, 33 | proxy, 34 | commonSpec, 35 | &spec.InstanceSpec, 36 | "/usr/bin/ytserver-node", 37 | "ytserver-data-node.yson", 38 | func() ([]byte, error) { 39 | return cfgen.GetDataNodeConfig(spec) 40 | }, 41 | consts.DataNodeMonitoringPort, 42 | WithContainerPorts(corev1.ContainerPort{ 43 | Name: consts.YTRPCPortName, 44 | ContainerPort: consts.DataNodeRPCPort, 45 | Protocol: corev1.ProtocolTCP, 46 | }), 47 | ) 48 | return &RemoteDataNode{ 49 | baseComponent: baseComponent{labeller: l}, 50 | server: srv, 51 | cfgen: cfgen, 52 | spec: &spec, 53 | } 54 | } 55 | 56 | func (n *RemoteDataNode) doSync(ctx context.Context, dry bool) (ComponentStatus, error) { 57 | var err error 58 | 59 | if n.server.needSync() || n.server.needUpdate() { 60 | if !dry { 61 | err = n.server.Sync(ctx) 62 | } 63 | return WaitingStatus(SyncStatusPending, "components"), err 64 | } 65 | 66 | if !n.server.arePodsReady(ctx) { 67 | return WaitingStatus(SyncStatusBlocked, "pods"), err 68 | } 69 | 70 | return SimpleStatus(SyncStatusReady), err 71 | } 72 | 73 | func (n *RemoteDataNode) GetType() consts.ComponentType { return consts.DataNodeType } 74 | 75 | func (n *RemoteDataNode) Sync(ctx context.Context) (ComponentStatus, error) { 76 | return n.doSync(ctx, false) 77 | } 78 | 79 | func (n *RemoteDataNode) Fetch(ctx context.Context) error { 80 | return resources.Fetch(ctx, n.server) 81 | } 82 | -------------------------------------------------------------------------------- /pkg/components/discovery.go: -------------------------------------------------------------------------------- 1 | package components 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | 8 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 9 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 10 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 11 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/resources" 12 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/ytconfig" 13 | ) 14 | 15 | type Discovery struct { 16 | localServerComponent 17 | cfgen *ytconfig.Generator 18 | } 19 | 20 | func NewDiscovery(cfgen *ytconfig.Generator, ytsaurus *apiproxy.Ytsaurus) *Discovery { 21 | l := cfgen.GetComponentLabeller(consts.DiscoveryType, "") 22 | resource := ytsaurus.GetResource() 23 | 24 | srv := newServer( 25 | l, 26 | ytsaurus, 27 | &resource.Spec.Discovery.InstanceSpec, 28 | "/usr/bin/ytserver-discovery", 29 | "ytserver-discovery.yson", 30 | func() ([]byte, error) { 31 | return cfgen.GetDiscoveryConfig(&resource.Spec.Discovery) 32 | }, 33 | consts.DiscoveryMonitoringPort, 34 | WithContainerPorts(corev1.ContainerPort{ 35 | Name: consts.YTRPCPortName, 36 | ContainerPort: consts.DiscoveryRPCPort, 37 | Protocol: corev1.ProtocolTCP, 38 | }), 39 | ) 40 | 41 | return &Discovery{ 42 | localServerComponent: newLocalServerComponent(l, ytsaurus, srv), 43 | cfgen: cfgen, 44 | } 45 | } 46 | 47 | func (d *Discovery) Fetch(ctx context.Context) error { 48 | return resources.Fetch(ctx, d.server) 49 | } 50 | 51 | func (d *Discovery) doSync(ctx context.Context, dry bool) (ComponentStatus, error) { 52 | var err error 53 | 54 | if ytv1.IsReadyToUpdateClusterState(d.ytsaurus.GetClusterState()) && d.server.needUpdate() { 55 | return SimpleStatus(SyncStatusNeedLocalUpdate), err 56 | } 57 | 58 | if d.ytsaurus.GetClusterState() == ytv1.ClusterStateUpdating { 59 | if status, err := handleUpdatingClusterState(ctx, d.ytsaurus, d, &d.localComponent, d.server, dry); status != nil { 60 | return *status, err 61 | } 62 | } 63 | 64 | if d.NeedSync() { 65 | if !dry { 66 | err = d.server.Sync(ctx) 67 | } 68 | return WaitingStatus(SyncStatusPending, "components"), err 69 | } 70 | 71 | if !d.server.arePodsReady(ctx) { 72 | return WaitingStatus(SyncStatusBlocked, "pods"), err 73 | } 74 | 75 | return SimpleStatus(SyncStatusReady), err 76 | } 77 | 78 | func (d *Discovery) Status(ctx context.Context) (ComponentStatus, error) { 79 | return d.doSync(ctx, true) 80 | } 81 | 82 | func (d *Discovery) Sync(ctx context.Context) error { 83 | _, err := d.doSync(ctx, false) 84 | return err 85 | } 86 | -------------------------------------------------------------------------------- /pkg/components/pods_manager.go: -------------------------------------------------------------------------------- 1 | package components 2 | 3 | import ( 4 | "context" 5 | 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // TODO: move to Updatable 10 | type podsManager interface { 11 | removePods(ctx context.Context) error 12 | arePodsRemoved(ctx context.Context) bool 13 | arePodsReady(ctx context.Context) bool 14 | podsImageCorrespondsToSpec() bool 15 | } 16 | 17 | func removePods(ctx context.Context, manager podsManager, c *localComponent) error { 18 | if !isPodsRemovingStarted(c) { 19 | if err := manager.removePods(ctx); err != nil { 20 | return err 21 | } 22 | 23 | setPodsRemovingStartedCondition(ctx, c) 24 | return nil 25 | } 26 | 27 | if !manager.arePodsRemoved(ctx) { 28 | return nil 29 | } 30 | 31 | setPodsRemovedCondition(ctx, c) 32 | return nil 33 | } 34 | 35 | func isPodsRemovingStarted(c *localComponent) bool { 36 | return c.ytsaurus.IsUpdateStatusConditionTrue(c.labeller.GetPodsRemovingStartedCondition()) 37 | } 38 | 39 | func setPodsRemovingStartedCondition(ctx context.Context, c *localComponent) { 40 | c.ytsaurus.SetUpdateStatusCondition(ctx, metav1.Condition{ 41 | Type: c.labeller.GetPodsRemovingStartedCondition(), 42 | Status: metav1.ConditionTrue, 43 | Reason: "Update", 44 | Message: "Pods removing was started", 45 | }) 46 | } 47 | 48 | func setPodsRemovedCondition(ctx context.Context, c *localComponent) { 49 | c.ytsaurus.SetUpdateStatusCondition(ctx, metav1.Condition{ 50 | Type: c.labeller.GetPodsRemovedCondition(), 51 | Status: metav1.ConditionTrue, 52 | Reason: "Update", 53 | Message: "Pods removed", 54 | }) 55 | } 56 | -------------------------------------------------------------------------------- /pkg/components/serveroptions.go: -------------------------------------------------------------------------------- 1 | package components 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | "k8s.io/apimachinery/pkg/util/intstr" 6 | ) 7 | 8 | type options struct { 9 | containerPorts []corev1.ContainerPort 10 | 11 | readinessProbeEndpointPort intstr.IntOrString 12 | readinessProbeEndpointPath string 13 | } 14 | 15 | type Option func(opts *options) 16 | 17 | func WithCustomReadinessProbeEndpointPort(port int32) Option { 18 | return func(opts *options) { 19 | opts.readinessProbeEndpointPort = intstr.FromInt32(port) 20 | } 21 | } 22 | 23 | func WithCustomReadinessProbeEndpointPath(path string) Option { 24 | return func(opts *options) { 25 | opts.readinessProbeEndpointPath = path 26 | } 27 | } 28 | 29 | func WithContainerPorts(ports ...corev1.ContainerPort) Option { 30 | return func(opts *options) { 31 | opts.containerPorts = append(opts.containerPorts, ports...) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /pkg/components/tablet_node_remote.go: -------------------------------------------------------------------------------- 1 | package components 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | 8 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 9 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 10 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 11 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/resources" 12 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/ytconfig" 13 | ) 14 | 15 | type RemoteTabletNode struct { 16 | server server 17 | cfgen *ytconfig.NodeGenerator 18 | spec *ytv1.TabletNodesSpec 19 | baseComponent 20 | } 21 | 22 | func NewRemoteTabletNodes( 23 | cfgen *ytconfig.NodeGenerator, 24 | nodes *ytv1.RemoteTabletNodes, 25 | proxy apiproxy.APIProxy, 26 | spec ytv1.TabletNodesSpec, 27 | commonSpec ytv1.CommonSpec, 28 | ) *RemoteTabletNode { 29 | l := cfgen.GetComponentLabeller(consts.TabletNodeType, spec.Name) 30 | 31 | srv := newServerConfigured( 32 | l, 33 | proxy, 34 | commonSpec, 35 | &spec.InstanceSpec, 36 | "/usr/bin/ytserver-node", 37 | "ytserver-tablet-node.yson", 38 | func() ([]byte, error) { 39 | return cfgen.GetTabletNodeConfig(spec) 40 | }, 41 | consts.TabletNodeMonitoringPort, 42 | WithContainerPorts(corev1.ContainerPort{ 43 | Name: consts.YTRPCPortName, 44 | ContainerPort: consts.TabletNodeRPCPort, 45 | Protocol: corev1.ProtocolTCP, 46 | }), 47 | ) 48 | return &RemoteTabletNode{ 49 | baseComponent: baseComponent{labeller: l}, 50 | server: srv, 51 | cfgen: cfgen, 52 | spec: &spec, 53 | } 54 | } 55 | 56 | func (n *RemoteTabletNode) doSync(ctx context.Context, dry bool) (ComponentStatus, error) { 57 | var err error 58 | 59 | if n.server.needSync() || n.server.needUpdate() { 60 | if !dry { 61 | err = n.server.Sync(ctx) 62 | } 63 | return WaitingStatus(SyncStatusPending, "components"), err 64 | } 65 | 66 | if !n.server.arePodsReady(ctx) { 67 | return WaitingStatus(SyncStatusBlocked, "pods"), err 68 | } 69 | 70 | return SimpleStatus(SyncStatusReady), err 71 | } 72 | 73 | func (n *RemoteTabletNode) GetType() consts.ComponentType { return consts.TabletNodeType } 74 | 75 | func (n *RemoteTabletNode) Sync(ctx context.Context) (ComponentStatus, error) { 76 | return n.doSync(ctx, false) 77 | } 78 | 79 | func (n *RemoteTabletNode) Fetch(ctx context.Context) error { 80 | return resources.Fetch(ctx, n.server) 81 | } 82 | -------------------------------------------------------------------------------- /pkg/components/user.go: -------------------------------------------------------------------------------- 1 | package components 2 | 3 | import ( 4 | "crypto/sha256" 5 | "fmt" 6 | ) 7 | 8 | func sha256String(value string) string { 9 | hash := sha256.New() 10 | // TODO(psushin): handle errors. 11 | hash.Write([]byte(value)) 12 | bs := hash.Sum(nil) 13 | return fmt.Sprintf("%x", bs) 14 | } 15 | 16 | func createUserCommand(userName, password, token string, isSuperuser bool) []string { 17 | result := []string{ 18 | fmt.Sprintf("/usr/bin/yt create user --attributes '{name=\"%s\"}' --ignore-existing", userName), 19 | } 20 | 21 | if password != "" { 22 | passwordHash := sha256String(password) 23 | result = append(result, fmt.Sprintf("/usr/bin/yt execute set_user_password '{user=%s;new_password_sha256=\"%s\"}'", userName, passwordHash)) 24 | } 25 | 26 | if token != "" { 27 | tokenHash := sha256String(token) 28 | result = append(result, fmt.Sprintf("/usr/bin/yt create map_node '//sys/cypress_tokens/%s' --ignore-existing", tokenHash)) 29 | result = append(result, fmt.Sprintf("/usr/bin/yt set '//sys/cypress_tokens/%s/@user' '%s'", tokenHash, userName)) 30 | } 31 | 32 | if isSuperuser { 33 | result = append(result, fmt.Sprintf("/usr/bin/yt add-member %s superusers || true", userName)) 34 | } 35 | 36 | return result 37 | } 38 | -------------------------------------------------------------------------------- /pkg/consts/address.go: -------------------------------------------------------------------------------- 1 | package consts 2 | 3 | const ( 4 | YTRPCPortName = "rpc" 5 | KafkaPortName = "kafka" 6 | 7 | YTMonitoringContainerPortName = "metrics" 8 | YTMonitoringServicePortName = "ytsaurus-metrics" 9 | YTMonitoringPort = 10000 10 | ) 11 | 12 | const ( 13 | DiscoveryRPCPort = 9020 14 | DiscoveryMonitoringPort = 10020 15 | 16 | MasterRPCPort = 9010 17 | MasterMonitoringPort = 10010 18 | 19 | SchedulerRPCPort = 9011 20 | SchedulerMonitoringPort = 10011 21 | 22 | ControllerAgentRPCPort = 9014 23 | ControllerAgentMonitoringPort = 10014 24 | 25 | DataNodeRPCPort = 9012 26 | DataNodeMonitoringPort = 10012 27 | 28 | TabletNodeRPCPort = 9022 29 | TabletNodeMonitoringPort = 10022 30 | 31 | ExecNodeRPCPort = 9029 32 | ExecNodeMonitoringPort = 10029 33 | 34 | // TODO(zlobober): temporary until YT-20036. 35 | DataNodeSkynetPort = 11012 36 | TabletNodeSkynetPort = 11022 37 | ExecNodeSkynetPort = 11029 38 | 39 | RPCProxyRPCPort = 9013 40 | RPCProxyMonitoringPort = 10013 41 | 42 | HTTPProxyRPCPort = 9016 43 | HTTPProxyMonitoringPort = 10016 44 | HTTPProxyHTTPPort = 80 45 | HTTPProxyHTTPSPort = 443 46 | 47 | TCPProxyMonitoringPort = 10017 48 | 49 | QueryTrackerRPCPort = 9028 50 | QueryTrackerMonitoringPort = 10028 51 | 52 | YQLAgentRPCPort = 9019 53 | YQLAgentMonitoringPort = 10019 54 | 55 | QueueAgentRPCPort = 9030 56 | QueueAgentMonitoringPort = 10030 57 | 58 | UIHTTPPort = 80 59 | 60 | StrawberryHTTPAPIPort = 80 61 | 62 | MasterCachesRPCPort = 9018 63 | MasterCachesMonitoringPort = 10018 64 | 65 | KafkaProxyMonitoringPort = 10033 66 | KafkaProxyKafkaPort = 9034 67 | KafkaProxyRPCPort = 9033 68 | ) 69 | -------------------------------------------------------------------------------- /pkg/consts/cmd.go: -------------------------------------------------------------------------------- 1 | package consts 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | const ( 8 | ConfigMountPoint = "/config" 9 | ConfigTemplateMountPoint = "/config_template" 10 | HTTPSSecretMountPoint = "/tls/https_secret" 11 | RPCSecretMountPoint = "/tls/rpc_secret" 12 | BusSecretMountPoint = "/tls/bus_secret" 13 | CABundleMountPoint = "/tls/ca_bundle" 14 | UIClustersConfigMountPoint = "/opt/app" 15 | UICustomConfigMountPoint = "/opt/app/dist/server/configs/custom" 16 | UISecretsMountPoint = "/opt/app/secrets" 17 | UIVaultMountPoint = "/vault" 18 | ) 19 | 20 | const ( 21 | YTServerContainerName = "ytserver" 22 | PostprocessConfigContainerName = "postprocess-config" 23 | PrepareLocationsContainerName = "prepare-locations" 24 | PrepareSecretContainerName = "prepare-secret" 25 | UIContainerName = "yt-ui" 26 | StrawberryContainerName = "strawberry" 27 | ) 28 | 29 | const ( 30 | ClientConfigFileName = "client.yson" 31 | 32 | InitClusterScriptFileName = "init-cluster.sh" 33 | PostprocessConfigScriptFileName = "postprocess-config.sh" 34 | 35 | UIClusterConfigFileName = "clusters-config.json" 36 | UISecretFileName = "yt-interface-secret.json" 37 | CABundleFileName = "ca.crt" 38 | TokenSecretKey = "YT_TOKEN" 39 | ) 40 | 41 | const ( 42 | JobsContainerName = "jobs" 43 | 44 | ContainerdConfigVolumeName = "config-containerd" 45 | ContainerdConfigMountPoint = "/config/containerd" 46 | ContainerdSocketName = "containerd.sock" 47 | ContainerdConfigFileName = "containerd.toml" 48 | 49 | CRINamespace = "yt" 50 | CRIBaseCgroup = "/yt" 51 | ) 52 | 53 | const ( 54 | ConfigTemplateVolumeName = "config-template" 55 | ConfigVolumeName = "config" 56 | HTTPSSecretVolumeName = "https-secret" 57 | RPCSecretVolumeName = "rpc-secret" 58 | BusSecretVolumeName = "bus-secret" 59 | CABundleVolumeName = "ca-bundle" 60 | InitScriptVolumeName = "init-script" 61 | UIVaultVolumeName = "vault" 62 | UISecretsVolumeName = "secrets" 63 | ) 64 | 65 | const ( 66 | HTTPSSecretUpdatePeriod = time.Second * 60 67 | ) 68 | -------------------------------------------------------------------------------- /pkg/consts/conditions.go: -------------------------------------------------------------------------------- 1 | package consts 2 | 3 | const ConditionHasPossibility = "HasPossibility" 4 | const ConditionNoPossibility = "NoPossibility" 5 | const ConditionSafeModeEnabled = "SafeModeEnabled" 6 | const ConditionTabletCellsSaved = "TabletCellsSaved" 7 | const ConditionTabletCellsRemovingStarted = "TabletCellsRemovingStarted" 8 | const ConditionTabletCellsRemoved = "TabletCellsRemoved" 9 | const ConditionSnapshotsBuildingStarted = "SnapshotsBuildingStarted" 10 | const ConditionSnapshotsMonitoringInfoSaved = "SnapshotsMonitoringInfoSaved" 11 | const ConditionSnaphotsSaved = "SnaphotsSaved" 12 | const ConditionTabletCellsRecovered = "TabletCellsRecovered" 13 | const ConditionOpArchiveUpdated = "OpArchiveUpdated" 14 | const ConditionOpArchivePreparedForUpdating = "OpArchivePreparedForUpdating" 15 | const ConditionQTStateUpdated = "QTStateUpdated" 16 | const ConditionQTStatePreparedForUpdating = "QTStatePreparedForUpdating" 17 | const ConditionQAStateUpdated = "QAStateUpdated" 18 | const ConditionQAStatePreparedForUpdating = "QAStatePreparedForUpdating" 19 | const ConditionYqlaUpdated = "YqlaUpdated" 20 | const ConditionYqlaPreparedForUpdating = "YqlaPreparedForUpdating" 21 | const ConditionMasterExitReadOnlyPrepared = "MasterExitReadOnlyPrepared" 22 | const ConditionMasterExitedReadOnly = "MasterExitedReadOnly" 23 | const ConditionSafeModeDisabled = "SafeModeDisabled" 24 | 25 | // Conditions below are for migration from imaginary chunks to real chunks for 24.2 26 | // https://github.com/ytsaurus/ytsaurus-k8s-operator/issues/396 27 | const ( 28 | // ConditionRealChunksAttributeEnabled is set by client component when 29 | // it ensures that sys/@config/node_tracker/enable_real_chunk_locations == %true. 30 | ConditionRealChunksAttributeEnabled = "RealChunksAttributeEnabled" 31 | 32 | // ConditionDataNodesNeedPodsRemoval is set by client component when it detects that 33 | // some nodes have imaginary chunks and need to be restarted to remove them. 34 | ConditionDataNodesNeedPodsRemoval = "DataNodesNeedPodsRemoval" 35 | 36 | // ConditionDataNodesWithImaginaryChunksAbsent is set by client component when 37 | // it ensures that there are no active data nodes with imaginary chunks exists, so master 38 | // can be safely updated to 24.2. 39 | ConditionDataNodesWithImaginaryChunksAbsent = "DataNodesWithImaginaryChunksAbsent" 40 | ) 41 | -------------------------------------------------------------------------------- /pkg/consts/defaults.go: -------------------------------------------------------------------------------- 1 | package consts 2 | 3 | const DefaultAdminLogin = "admin" 4 | const DefaultAdminPassword = "password" 5 | 6 | const AdminLoginSecret = "login" 7 | const AdminPasswordSecret = "password" 8 | const AdminTokenSecret = "token" 9 | 10 | const DefaultCABundlePath = "/etc/ssl/certs/ca-certificates.crt" 11 | 12 | const UIUserName = "robot-ui" 13 | const StrawberryControllerUserName = "robot-strawberry-controller" 14 | const YtsaurusOperatorUserName = "robot-ytsaurus-k8s-operator" 15 | 16 | const YqlUserName = "yql_agent" 17 | const DefaultYqlTokenPath = "/usr/yql_agent_token" 18 | 19 | const StartUID = 19500 20 | 21 | const DefaultHTTPProxyRole = "default" 22 | const DefaultName = "default" 23 | const DefaultMedium = "default" 24 | 25 | const MaxSlotLocationReserve = 10 << 30 // 10GiB 26 | 27 | const DefaultStrawberryControllerFamily = "chyt" 28 | 29 | func GetDefaultStrawberryControllerFamilies() []string { 30 | return []string{"chyt", "jupyt"} 31 | } 32 | -------------------------------------------------------------------------------- /pkg/consts/labels.go: -------------------------------------------------------------------------------- 1 | package consts 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | const YTClusterLabelName = "ytsaurus.tech/cluster-name" 8 | const YTComponentLabelName = "yt_component" 9 | const YTMetricsLabelName = "yt_metrics" 10 | 11 | func ComponentLabel(component ComponentType) string { 12 | // TODO(achulkov2): We should probably use `ytsaurus` instead of `yt` everywhere, but 13 | // it will be an inconvenient change that requires all statefulsets to be recreated. 14 | switch component { 15 | case MasterType: 16 | return "yt-master" 17 | case MasterCacheType: 18 | return "yt-master-cache" 19 | case DiscoveryType: 20 | return "yt-discovery" 21 | case SchedulerType: 22 | return "yt-scheduler" 23 | case ControllerAgentType: 24 | return "yt-controller-agent" 25 | case DataNodeType: 26 | return "yt-data-node" 27 | case ExecNodeType: 28 | return "yt-exec-node" 29 | case TabletNodeType: 30 | return "yt-tablet-node" 31 | case HttpProxyType: 32 | return "yt-http-proxy" 33 | case RpcProxyType: 34 | return "yt-rpc-proxy" 35 | case TcpProxyType: 36 | return "yt-tcp-proxy" 37 | case KafkaProxyType: 38 | return "yt-kafka-proxy" 39 | case QueueAgentType: 40 | return "yt-queue-agent" 41 | case QueryTrackerType: 42 | return "yt-query-tracker" 43 | case YqlAgentType: 44 | return "yt-yql-agent" 45 | case StrawberryControllerType: 46 | return "yt-strawberry-controller" 47 | case ChytType: 48 | return "yt-chyt" 49 | case SpytType: 50 | return "yt-spyt" 51 | case YtsaurusClientType: 52 | return "yt-client" 53 | case UIType: 54 | return "yt-ui" 55 | } 56 | 57 | panic(fmt.Sprintf("Unknown component type: %s", component)) 58 | } 59 | -------------------------------------------------------------------------------- /pkg/consts/metrika.go: -------------------------------------------------------------------------------- 1 | package consts 2 | 3 | const MetrikaCounterFileName = "common.js" 4 | const MetrikaCounterScript = ` 5 | "use strict"; 6 | Object.defineProperty(exports, "__esModule", { value: true }); 7 | /** @type {Partial} */ 8 | const config = { 9 | metrikaCounter: [ 10 | { 11 | id: 92831672, 12 | defer: true, 13 | clickmap: true, 14 | trackLinks: true, 15 | accurateTrackBounce: true, 16 | webvisor: true, 17 | }, 18 | ], 19 | }; 20 | exports.default = config; 21 | ` 22 | -------------------------------------------------------------------------------- /pkg/consts/types.go: -------------------------------------------------------------------------------- 1 | package consts 2 | 3 | type ComponentType string 4 | 5 | const ( 6 | ControllerAgentType ComponentType = "ControllerAgent" 7 | DataNodeType ComponentType = "DataNode" 8 | DiscoveryType ComponentType = "Discovery" 9 | ExecNodeType ComponentType = "ExecNode" 10 | HttpProxyType ComponentType = "HttpProxy" 11 | MasterCacheType ComponentType = "MasterCache" 12 | MasterType ComponentType = "Master" 13 | QueryTrackerType ComponentType = "QueryTracker" 14 | QueueAgentType ComponentType = "QueueAgent" 15 | RpcProxyType ComponentType = "RpcProxy" 16 | SchedulerType ComponentType = "Scheduler" 17 | StrawberryControllerType ComponentType = "StrawberryController" 18 | TabletNodeType ComponentType = "TabletNode" 19 | TcpProxyType ComponentType = "TcpProxy" 20 | KafkaProxyType ComponentType = "KafkaProxy" 21 | UIType ComponentType = "UI" 22 | YqlAgentType ComponentType = "YqlAgent" 23 | YtsaurusClientType ComponentType = "YtsaurusClient" 24 | ChytType ComponentType = "CHYT" 25 | SpytType ComponentType = "SPYT" 26 | ) 27 | 28 | type ComponentClass string 29 | 30 | const ( 31 | // ComponentClassStateless group contains only stateless components (not master, data nodes, tablet nodes) 32 | ComponentClassUnspecified ComponentClass = "" 33 | ComponentClassStateless ComponentClass = "Stateless" 34 | ComponentClassEverything ComponentClass = "Everything" 35 | ComponentClassNothing ComponentClass = "Nothing" 36 | ) 37 | -------------------------------------------------------------------------------- /pkg/mock/declaration.go: -------------------------------------------------------------------------------- 1 | package mock_yt 2 | 3 | //go:generate go run go.uber.org/mock/mockgen@v0.5.0 -destination=mock_ytsaurus_client.go -package=mock_yt go.ytsaurus.tech/yt/go/yt Client 4 | -------------------------------------------------------------------------------- /pkg/resources/ca_bundle.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | ) 6 | 7 | // CABundle represents mounted configmap with trusted certificates 8 | type CABundle struct { 9 | ConfigMapName string 10 | VolumeName string 11 | MountPath string 12 | } 13 | 14 | func NewCABundle(configMapName string, volumeName string, mountPath string) *CABundle { 15 | return &CABundle{ 16 | ConfigMapName: configMapName, 17 | VolumeName: volumeName, 18 | MountPath: mountPath, 19 | } 20 | } 21 | 22 | func (t *CABundle) AddVolume(podSpec *corev1.PodSpec) { 23 | podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ 24 | Name: t.VolumeName, 25 | VolumeSource: corev1.VolumeSource{ 26 | ConfigMap: &corev1.ConfigMapVolumeSource{ 27 | LocalObjectReference: corev1.LocalObjectReference{ 28 | Name: t.ConfigMapName, 29 | }, 30 | }, 31 | }, 32 | }) 33 | } 34 | 35 | func (t *CABundle) AddVolumeMount(container *corev1.Container) { 36 | container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ 37 | Name: t.VolumeName, 38 | MountPath: t.MountPath, 39 | ReadOnly: true, 40 | }) 41 | } 42 | -------------------------------------------------------------------------------- /pkg/resources/configmap.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 5 | labeller2 "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/labeller" 6 | corev1 "k8s.io/api/core/v1" 7 | ) 8 | 9 | type ConfigMap struct { 10 | BaseManagedResource[*corev1.ConfigMap] 11 | } 12 | 13 | func NewConfigMap(name string, labeller *labeller2.Labeller, apiProxy apiproxy.APIProxy) *ConfigMap { 14 | return &ConfigMap{ 15 | BaseManagedResource: BaseManagedResource[*corev1.ConfigMap]{ 16 | proxy: apiProxy, 17 | labeller: labeller, 18 | name: name, 19 | oldObject: &corev1.ConfigMap{}, 20 | newObject: &corev1.ConfigMap{}, 21 | }, 22 | } 23 | } 24 | 25 | func (s *ConfigMap) Build() *corev1.ConfigMap { 26 | s.newObject.ObjectMeta = s.labeller.GetObjectMeta(s.name) 27 | s.newObject.Data = make(map[string]string) 28 | return s.newObject 29 | } 30 | -------------------------------------------------------------------------------- /pkg/resources/headless_service.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 5 | labeller2 "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/labeller" 6 | corev1 "k8s.io/api/core/v1" 7 | ) 8 | 9 | type HeadlessService struct { 10 | BaseManagedResource[*corev1.Service] 11 | } 12 | 13 | func NewHeadlessService(name string, labeller *labeller2.Labeller, apiProxy apiproxy.APIProxy) *HeadlessService { 14 | return &HeadlessService{ 15 | BaseManagedResource: BaseManagedResource[*corev1.Service]{ 16 | proxy: apiProxy, 17 | labeller: labeller, 18 | name: name, 19 | oldObject: &corev1.Service{}, 20 | newObject: &corev1.Service{}, 21 | }, 22 | } 23 | } 24 | 25 | func (s *HeadlessService) Build() *corev1.Service { 26 | s.newObject.ObjectMeta = s.labeller.GetObjectMeta(s.name) 27 | s.newObject.Spec = corev1.ServiceSpec{ 28 | ClusterIP: "None", 29 | Selector: s.labeller.GetSelectorLabelMap(), 30 | } 31 | 32 | return s.newObject 33 | } 34 | -------------------------------------------------------------------------------- /pkg/resources/http_service.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 5 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 6 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 7 | labeller "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/labeller" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/util/intstr" 10 | ) 11 | 12 | type HTTPService struct { 13 | BaseManagedResource[*corev1.Service] 14 | 15 | transport *ytv1.HTTPTransportSpec 16 | httpNodePort *int32 17 | httpsNodePort *int32 18 | } 19 | 20 | func NewHTTPService(name string, transport *ytv1.HTTPTransportSpec, labeller *labeller.Labeller, apiProxy apiproxy.APIProxy) *HTTPService { 21 | if transport == nil { 22 | transport = &ytv1.HTTPTransportSpec{} 23 | } 24 | return &HTTPService{ 25 | BaseManagedResource: BaseManagedResource[*corev1.Service]{ 26 | proxy: apiProxy, 27 | labeller: labeller, 28 | name: name, 29 | oldObject: &corev1.Service{}, 30 | newObject: &corev1.Service{}, 31 | }, 32 | transport: transport, 33 | } 34 | } 35 | 36 | func (s *HTTPService) SetHttpNodePort(port *int32) { 37 | s.httpNodePort = port 38 | } 39 | 40 | func (s *HTTPService) SetHttpsNodePort(port *int32) { 41 | s.httpsNodePort = port 42 | } 43 | 44 | func (s *HTTPService) Build() *corev1.Service { 45 | s.newObject.ObjectMeta = s.labeller.GetObjectMeta(s.name) 46 | s.newObject.Spec = corev1.ServiceSpec{ 47 | Selector: s.labeller.GetSelectorLabelMap(), 48 | } 49 | 50 | s.newObject.Spec.Ports = make([]corev1.ServicePort, 0, 2) 51 | if !s.transport.DisableHTTP { 52 | port := corev1.ServicePort{ 53 | Name: "http", 54 | Port: consts.HTTPProxyHTTPPort, 55 | TargetPort: intstr.IntOrString{IntVal: consts.HTTPProxyHTTPPort}, 56 | } 57 | if s.httpNodePort != nil { 58 | port.NodePort = *s.httpNodePort 59 | } 60 | s.newObject.Spec.Ports = append(s.newObject.Spec.Ports, port) 61 | } 62 | 63 | if s.transport.HTTPSSecret != nil { 64 | port := corev1.ServicePort{ 65 | Name: "https", 66 | Port: consts.HTTPProxyHTTPSPort, 67 | TargetPort: intstr.IntOrString{IntVal: consts.HTTPProxyHTTPSPort}, 68 | } 69 | if s.httpsNodePort != nil { 70 | port.NodePort = *s.httpsNodePort 71 | } 72 | s.newObject.Spec.Ports = append(s.newObject.Spec.Ports, port) 73 | } 74 | 75 | return s.newObject 76 | } 77 | -------------------------------------------------------------------------------- /pkg/resources/job.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | batchv1 "k8s.io/api/batch/v1" 5 | 6 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 7 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/labeller" 8 | ) 9 | 10 | type Job struct { 11 | BaseManagedResource[*batchv1.Job] 12 | } 13 | 14 | func NewJob(name string, l *labeller.Labeller, apiProxy apiproxy.APIProxy) *Job { 15 | return &Job{ 16 | BaseManagedResource: BaseManagedResource[*batchv1.Job]{ 17 | proxy: apiProxy, 18 | labeller: l, 19 | name: name, 20 | oldObject: &batchv1.Job{}, 21 | newObject: &batchv1.Job{}, 22 | }, 23 | } 24 | } 25 | 26 | func (j *Job) Completed() bool { 27 | return j.oldObject.Status.Succeeded > 0 28 | } 29 | 30 | func (j *Job) Build() *batchv1.Job { 31 | var ttlSeconds int32 = 600 32 | var backoffLimit int32 = 15 33 | j.newObject.ObjectMeta = j.labeller.GetObjectMeta(j.name) 34 | j.newObject.Spec = batchv1.JobSpec{ 35 | TTLSecondsAfterFinished: &ttlSeconds, 36 | BackoffLimit: &backoffLimit, 37 | } 38 | 39 | return j.newObject 40 | } 41 | -------------------------------------------------------------------------------- /pkg/resources/monitoring_service.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 7 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 8 | labeller2 "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/labeller" 9 | corev1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/util/intstr" 12 | ) 13 | 14 | type MonitoringService struct { 15 | BaseManagedResource[*corev1.Service] 16 | 17 | monitoringTargetPort int32 18 | } 19 | 20 | func NewMonitoringService(monitoringTargetPort int32, labeller *labeller2.Labeller, apiProxy apiproxy.APIProxy) *MonitoringService { 21 | return &MonitoringService{ 22 | BaseManagedResource: BaseManagedResource[*corev1.Service]{ 23 | proxy: apiProxy, 24 | labeller: labeller, 25 | name: fmt.Sprintf("%s-monitoring", labeller.GetFullComponentLabel()), 26 | oldObject: &corev1.Service{}, 27 | newObject: &corev1.Service{}, 28 | }, 29 | monitoringTargetPort: monitoringTargetPort, 30 | } 31 | } 32 | 33 | func (s *MonitoringService) GetServiceMeta(name string) metav1.ObjectMeta { 34 | return metav1.ObjectMeta{ 35 | Name: name, 36 | Namespace: s.labeller.GetNamespace(), 37 | Labels: s.labeller.GetMonitoringMetaLabelMap(), 38 | } 39 | } 40 | 41 | func (s *MonitoringService) Build() *corev1.Service { 42 | s.newObject.ObjectMeta = s.GetServiceMeta(s.name) 43 | s.newObject.Spec = corev1.ServiceSpec{ 44 | Selector: s.labeller.GetSelectorLabelMap(), 45 | Ports: []corev1.ServicePort{ 46 | { 47 | Name: consts.YTMonitoringServicePortName, 48 | Port: consts.YTMonitoringPort, 49 | TargetPort: intstr.IntOrString{IntVal: s.monitoringTargetPort}, 50 | }, 51 | }, 52 | } 53 | 54 | return s.newObject 55 | } 56 | -------------------------------------------------------------------------------- /pkg/resources/resource.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | 7 | "sigs.k8s.io/controller-runtime/pkg/client" 8 | 9 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 10 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/labeller" 11 | ) 12 | 13 | type ResourceObject interface { 14 | client.Object 15 | } 16 | 17 | type Fetchable interface { 18 | Fetch(ctx context.Context) error 19 | } 20 | 21 | type Syncable interface { 22 | Sync(ctx context.Context) error 23 | } 24 | 25 | type ManagedResource[T ResourceObject] interface { 26 | Fetchable 27 | Syncable 28 | 29 | Name() string 30 | OldObject() T 31 | NewObject() T 32 | Exists() bool 33 | Build() T 34 | } 35 | 36 | type BaseManagedResource[T ResourceObject] struct { 37 | proxy apiproxy.APIProxy 38 | labeller *labeller.Labeller 39 | 40 | name string 41 | oldObject T 42 | newObject T 43 | } 44 | 45 | func (r *BaseManagedResource[T]) OldObject() T { 46 | return r.oldObject 47 | } 48 | 49 | func (r *BaseManagedResource[T]) NewObject() T { 50 | return r.newObject 51 | } 52 | 53 | func (r *BaseManagedResource[T]) Name() string { 54 | return r.name 55 | } 56 | 57 | func (r *BaseManagedResource[T]) Fetch(ctx context.Context) error { 58 | return r.proxy.FetchObject(ctx, r.name, r.oldObject) 59 | } 60 | 61 | func (r *BaseManagedResource[T]) Exists() bool { 62 | return r.oldObject.GetResourceVersion() != "" 63 | } 64 | 65 | func (r *BaseManagedResource[T]) Sync(ctx context.Context) error { 66 | return r.proxy.SyncObject(ctx, r.oldObject, r.newObject) 67 | } 68 | 69 | func Exists[T ResourceObject](r ManagedResource[T]) bool { 70 | return r.Exists() 71 | } 72 | 73 | func Fetch(ctx context.Context, objects ...Fetchable) error { 74 | for _, obj := range objects { 75 | if obj == nil || reflect.ValueOf(obj).IsNil() { 76 | continue 77 | } 78 | err := obj.Fetch(ctx) 79 | if err != nil { 80 | return err 81 | } 82 | } 83 | return nil 84 | } 85 | 86 | func Sync(ctx context.Context, objects ...Syncable) error { 87 | for _, obj := range objects { 88 | if obj == nil || reflect.ValueOf(obj).IsNil() { 89 | continue 90 | } 91 | err := obj.Sync(ctx) 92 | if err != nil { 93 | return err 94 | } 95 | } 96 | return nil 97 | } 98 | -------------------------------------------------------------------------------- /pkg/resources/rpc_service.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 5 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 6 | labeller "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/labeller" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/util/intstr" 9 | ) 10 | 11 | type RPCService struct { 12 | BaseManagedResource[*corev1.Service] 13 | 14 | port *int32 15 | nodePort *int32 16 | } 17 | 18 | func NewRPCService(name string, labeller *labeller.Labeller, apiProxy apiproxy.APIProxy) *RPCService { 19 | return &RPCService{ 20 | BaseManagedResource: BaseManagedResource[*corev1.Service]{ 21 | proxy: apiProxy, 22 | labeller: labeller, 23 | name: name, 24 | oldObject: &corev1.Service{}, 25 | newObject: &corev1.Service{}, 26 | }, 27 | } 28 | } 29 | 30 | func (s *RPCService) SetNodePort(port *int32) { 31 | s.nodePort = port 32 | } 33 | 34 | func (s *RPCService) SetPort(port *int32) { 35 | s.port = port 36 | } 37 | 38 | func (s *RPCService) Build() *corev1.Service { 39 | var port int32 = consts.RPCProxyRPCPort 40 | if s.port != nil { 41 | port = *s.port 42 | } 43 | servicePort := corev1.ServicePort{ 44 | Name: "rpc", 45 | Port: port, 46 | TargetPort: intstr.IntOrString{IntVal: port}, 47 | } 48 | if s.nodePort != nil { 49 | servicePort.NodePort = *s.nodePort 50 | } 51 | 52 | s.newObject.ObjectMeta = s.labeller.GetObjectMeta(s.name) 53 | s.newObject.Spec = corev1.ServiceSpec{ 54 | Selector: s.labeller.GetSelectorLabelMap(), 55 | Ports: []corev1.ServicePort{servicePort}, 56 | } 57 | 58 | return s.newObject 59 | } 60 | -------------------------------------------------------------------------------- /pkg/resources/string_secret.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 5 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/labeller" 6 | corev1 "k8s.io/api/core/v1" 7 | ) 8 | 9 | type StringSecret struct { 10 | BaseManagedResource[*corev1.Secret] 11 | } 12 | 13 | func NewStringSecret(name string, reconciler *labeller.Labeller, apiProxy apiproxy.APIProxy) *StringSecret { 14 | return &StringSecret{ 15 | BaseManagedResource: BaseManagedResource[*corev1.Secret]{ 16 | proxy: apiProxy, 17 | labeller: reconciler, 18 | name: name, 19 | oldObject: &corev1.Secret{}, 20 | newObject: &corev1.Secret{}, 21 | }, 22 | } 23 | } 24 | 25 | func (s *StringSecret) GetValue(key string) (string, bool) { 26 | v, ok := s.oldObject.Data[key] 27 | if !ok { 28 | return "", ok 29 | } 30 | return string(v), ok 31 | } 32 | 33 | func (s *StringSecret) GetEnvSource() corev1.EnvFromSource { 34 | return corev1.EnvFromSource{ 35 | SecretRef: &corev1.SecretEnvSource{ 36 | LocalObjectReference: corev1.LocalObjectReference{ 37 | Name: s.Name(), 38 | }, 39 | }, 40 | } 41 | } 42 | 43 | func (s *StringSecret) Build() *corev1.Secret { 44 | s.newObject.ObjectMeta = s.labeller.GetObjectMeta(s.name) 45 | s.newObject.Type = corev1.SecretTypeOpaque 46 | return s.newObject 47 | } 48 | 49 | func (s *StringSecret) NeedSync(key, value string) bool { 50 | if !s.Exists() { 51 | return true 52 | } 53 | 54 | v, ok := s.GetValue(key) 55 | if !ok { 56 | return true 57 | } 58 | 59 | if value == "" { 60 | return false 61 | } 62 | 63 | return value != string(v) 64 | } 65 | -------------------------------------------------------------------------------- /pkg/resources/tcp_service.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/apiproxy" 7 | labeller2 "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/labeller" 8 | corev1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/util/intstr" 10 | ) 11 | 12 | type TCPService struct { 13 | BaseManagedResource[*corev1.Service] 14 | 15 | serviceType corev1.ServiceType 16 | portCount int32 17 | minPort int32 18 | } 19 | 20 | func NewTCPService(name string, 21 | serviceType corev1.ServiceType, 22 | portCount int32, 23 | minPort int32, 24 | labeller *labeller2.Labeller, 25 | apiProxy apiproxy.APIProxy) *TCPService { 26 | return &TCPService{ 27 | BaseManagedResource: BaseManagedResource[*corev1.Service]{ 28 | proxy: apiProxy, 29 | labeller: labeller, 30 | name: name, 31 | oldObject: &corev1.Service{}, 32 | newObject: &corev1.Service{}, 33 | }, 34 | serviceType: serviceType, 35 | portCount: portCount, 36 | minPort: minPort, 37 | } 38 | } 39 | 40 | func (s *TCPService) Build() *corev1.Service { 41 | s.newObject.ObjectMeta = s.labeller.GetObjectMeta(s.name) 42 | 43 | var ports = make([]corev1.ServicePort, 0) 44 | for port := s.minPort; port < s.minPort+s.portCount; port++ { 45 | servicePort := corev1.ServicePort{ 46 | Name: fmt.Sprintf("tcp-%d", port), 47 | Port: port, 48 | TargetPort: intstr.IntOrString{IntVal: port}, 49 | } 50 | if s.serviceType == corev1.ServiceTypeNodePort { 51 | servicePort.NodePort = port 52 | } 53 | ports = append(ports, servicePort) 54 | } 55 | 56 | s.newObject.Spec = corev1.ServiceSpec{ 57 | Selector: s.labeller.GetSelectorLabelMap(), 58 | Type: s.serviceType, 59 | Ports: ports, 60 | } 61 | 62 | return s.newObject 63 | } 64 | -------------------------------------------------------------------------------- /pkg/resources/tls_secret.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | ) 6 | 7 | // TLSSecret represents mounted kubernetes.io/tls secret 8 | // https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets 9 | type TLSSecret struct { 10 | SecretName string 11 | VolumeName string 12 | MountPath string 13 | } 14 | 15 | func NewTLSSecret(secretName string, volumeName string, mountPath string) *TLSSecret { 16 | return &TLSSecret{ 17 | SecretName: secretName, 18 | VolumeName: volumeName, 19 | MountPath: mountPath, 20 | } 21 | } 22 | 23 | func (t *TLSSecret) AddVolume(podSpec *corev1.PodSpec) { 24 | podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ 25 | Name: t.VolumeName, 26 | VolumeSource: corev1.VolumeSource{ 27 | Secret: &corev1.SecretVolumeSource{ 28 | SecretName: t.SecretName, 29 | }, 30 | }, 31 | }) 32 | } 33 | 34 | func (t *TLSSecret) AddVolumeMount(container *corev1.Container) { 35 | container.VolumeMounts = append(container.VolumeMounts, corev1.VolumeMount{ 36 | Name: t.VolumeName, 37 | MountPath: t.MountPath, 38 | ReadOnly: true, 39 | }) 40 | } 41 | -------------------------------------------------------------------------------- /pkg/testutil/builders.go: -------------------------------------------------------------------------------- 1 | package testutil 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | 7 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 8 | ) 9 | 10 | const ( 11 | testYtsaurusImage = "test-ytsaurus-image" 12 | dndsNameOne = "dn-1" 13 | ) 14 | 15 | func BuildMinimalYtsaurus(namespace, name string) ytv1.Ytsaurus { 16 | return ytv1.Ytsaurus{ 17 | ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, 18 | Spec: ytv1.YtsaurusSpec{ 19 | CommonSpec: ytv1.CommonSpec{ 20 | CoreImage: testYtsaurusImage, 21 | UseShortNames: true, 22 | }, 23 | IsManaged: true, 24 | EnableFullUpdate: false, 25 | 26 | Discovery: ytv1.DiscoverySpec{ 27 | InstanceSpec: ytv1.InstanceSpec{ 28 | InstanceCount: 3, 29 | }, 30 | }, 31 | PrimaryMasters: ytv1.MastersSpec{ 32 | InstanceSpec: ytv1.InstanceSpec{ 33 | InstanceCount: 3, 34 | Locations: []ytv1.LocationSpec{ 35 | { 36 | LocationType: "MasterChangelogs", 37 | Path: "/yt/master-data/master-changelogs", 38 | }, 39 | { 40 | LocationType: "MasterSnapshots", 41 | Path: "/yt/master-data/master-snapshots", 42 | }, 43 | }, 44 | }, 45 | MasterConnectionSpec: ytv1.MasterConnectionSpec{ 46 | CellTag: 1, 47 | }, 48 | }, 49 | HTTPProxies: []ytv1.HTTPProxiesSpec{ 50 | { 51 | InstanceSpec: ytv1.InstanceSpec{InstanceCount: 3}, 52 | ServiceType: corev1.ServiceTypeNodePort, 53 | }, 54 | }, 55 | DataNodes: []ytv1.DataNodesSpec{ 56 | { 57 | InstanceSpec: ytv1.InstanceSpec{ 58 | InstanceCount: 5, 59 | Locations: []ytv1.LocationSpec{ 60 | { 61 | LocationType: "ChunkStore", 62 | Path: "/yt/node-data/chunk-store", 63 | }, 64 | }, 65 | }, 66 | ClusterNodesSpec: ytv1.ClusterNodesSpec{}, 67 | Name: dndsNameOne, 68 | }, 69 | }, 70 | }, 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /pkg/testutil/combined_watcher.go: -------------------------------------------------------------------------------- 1 | package testutil 2 | 3 | import ( 4 | "context" 5 | 6 | "k8s.io/apimachinery/pkg/watch" 7 | "sigs.k8s.io/controller-runtime/pkg/client" 8 | ) 9 | 10 | // CombinedKubeWatcher is a watcher that combines multiple watchers into a single channel preserving watcher interface. 11 | type CombinedKubeWatcher struct { 12 | watchers []watch.Interface 13 | stopCh chan struct{} 14 | muxResultChan chan watch.Event 15 | } 16 | 17 | func NewCombinedKubeWatcher(ctx context.Context, kubecli client.WithWatch, namespace string, lists []client.ObjectList) (*CombinedKubeWatcher, error) { 18 | muxResultChan := make(chan watch.Event) 19 | stopCh := make(chan struct{}) 20 | 21 | var watchers []watch.Interface 22 | for _, objList := range lists { 23 | watcher, err := kubecli.Watch(ctx, objList, &client.ListOptions{ 24 | Namespace: namespace, 25 | }) 26 | if err != nil { 27 | return nil, err 28 | } 29 | go func(ch <-chan watch.Event) { 30 | for { 31 | select { 32 | case msg, ok := <-ch: 33 | if !ok { 34 | return 35 | } 36 | muxResultChan <- msg 37 | case <-stopCh: 38 | return 39 | } 40 | } 41 | }(watcher.ResultChan()) 42 | watchers = append(watchers, watcher) 43 | } 44 | return &CombinedKubeWatcher{ 45 | watchers: watchers, 46 | stopCh: stopCh, 47 | muxResultChan: muxResultChan, 48 | }, nil 49 | } 50 | 51 | func (w *CombinedKubeWatcher) Stop() { 52 | close(w.stopCh) 53 | for _, watcher := range w.watchers { 54 | watcher.Stop() 55 | } 56 | } 57 | 58 | func (w *CombinedKubeWatcher) ResultChan() <-chan watch.Event { 59 | return w.muxResultChan 60 | } 61 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetContainerdConfig/exec-node.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 50 2 | jobEnvironment: 3 | cri: 4 | apiRetryTimeoutSeconds: 120 5 | baseCgroup: /yt 6 | criNamespace: yt 7 | sandboxImage: registry.k8s.io/pause:3.8 8 | doNotSetUserId: true 9 | isolated: false 10 | useArtifactBinds: true 11 | userSlots: 42 12 | jobProxyLoggers: 13 | - categoriesFilter: 14 | type: exclude 15 | values: 16 | - Bus 17 | - Concurrency 18 | compression: zstd 19 | format: plain_text 20 | minLogLevel: debug 21 | name: debug 22 | rotationPolicy: 23 | maxTotalSizeToKeep: "3145728" 24 | rotationPeriodMilliseconds: 900000 25 | useTimestampSuffix: false 26 | writerType: file 27 | locations: 28 | - locationType: ChunkCache 29 | path: /yt/hdd1/chunk-cache 30 | - locationType: Slots 31 | path: /yt/hdd2/slots 32 | quota: 5Gi 33 | - locationType: ImageCache 34 | path: /yt/hdd1/images 35 | quota: 4Gi 36 | name: end-a 37 | privileged: false 38 | rack: fake 39 | resources: 40 | limits: 41 | cpu: "20" 42 | memory: 5Gi 43 | tags: 44 | - rack:xn-a 45 | volumeClaimTemplates: 46 | - metadata: 47 | name: hdd1 48 | spec: 49 | accessModes: 50 | - ReadWriteOnce 51 | resources: 52 | requests: 53 | storage: 10Gi 54 | storageClassName: yc-network-hdd 55 | volumeMounts: 56 | - mountPath: /yt/hdd1 57 | name: hdd1 58 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetContainerdConfig/test.canondata: -------------------------------------------------------------------------------- 1 | { 2 | grpc={ 3 | address="/yt/hdd1/images/containerd.sock"; 4 | gid=0; 5 | uid=0; 6 | }; 7 | plugins={ 8 | "io.containerd.grpc.v1.cri"={ 9 | cni={ 10 | "bin_dir"="/usr/local/lib/cni"; 11 | "conf_dir"="/etc/cni/net.d"; 12 | }; 13 | containerd={ 14 | "default_runtime_name"=runc; 15 | runtimes={ 16 | runc={ 17 | options={ 18 | SystemdCgroup=%false; 19 | }; 20 | "runtime_type"="io.containerd.runc.v2"; 21 | "sandbox_mode"=podsandbox; 22 | }; 23 | }; 24 | }; 25 | "image_pull_progress_timeout"="5m0s"; 26 | registry={ 27 | "config_path"=#; 28 | }; 29 | "restrict_oom_score_adj"=%true; 30 | "sandbox_image"="registry.k8s.io/pause:3.8"; 31 | }; 32 | }; 33 | root="/yt/hdd1/images"; 34 | version=2; 35 | } -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetDataNodeConfig/with-trash-ttl/data-node-with-trash-ttl.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 20 2 | locations: 3 | - locationType: ChunkStore 4 | maxTrashMilliseconds: 60000 5 | medium: nvme 6 | path: /yt/hdd1/chunk-store 7 | name: dn-a 8 | rack: fake 9 | resources: 10 | limits: 11 | cpu: "20" 12 | memory: 5Gi 13 | tags: 14 | - rack:xn-a 15 | volumeClaimTemplates: 16 | - metadata: 17 | name: hdd1 18 | spec: 19 | accessModes: 20 | - ReadWriteOnce 21 | resources: 22 | requests: 23 | storage: 10Gi 24 | storageClassName: yc-network-hdd 25 | volumeMounts: 26 | - mountPath: /yt/hdd1 27 | name: hdd1 28 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetDataNodeConfig/with-watermark/data-node-with-watermark.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 20 2 | locations: 3 | - locationType: ChunkStore 4 | lowWatermark: 50Gi 5 | medium: nvme 6 | path: /yt/hdd1/chunk-store 7 | quota: 1Ti 8 | name: dn-a 9 | rack: fake 10 | resources: 11 | limits: 12 | cpu: "20" 13 | memory: 5Gi 14 | tags: 15 | - rack:xn-a 16 | volumeClaimTemplates: 17 | - metadata: 18 | name: hdd1 19 | spec: 20 | accessModes: 21 | - ReadWriteOnce 22 | resources: 23 | requests: 24 | storage: 10Gi 25 | storageClassName: yc-network-hdd 26 | volumeMounts: 27 | - mountPath: /yt/hdd1 28 | name: hdd1 29 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetDataNodeConfig/without-trash-ttl/data-node-without-trash-ttl.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 20 2 | locations: 3 | - locationType: ChunkStore 4 | medium: nvme 5 | path: /yt/hdd1/chunk-store 6 | name: dn-a 7 | rack: fake 8 | resources: 9 | limits: 10 | cpu: "20" 11 | memory: 5Gi 12 | tags: 13 | - rack:xn-a 14 | volumeClaimTemplates: 15 | - metadata: 16 | name: hdd1 17 | spec: 18 | accessModes: 19 | - ReadWriteOnce 20 | resources: 21 | requests: 22 | storage: 10Gi 23 | storageClassName: yc-network-hdd 24 | volumeMounts: 25 | - mountPath: /yt/hdd1 26 | name: hdd1 27 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetDataNodeWithoutYtsaurusConfig/data-node.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 20 2 | locations: 3 | - locationType: ChunkStore 4 | medium: nvme 5 | path: /yt/hdd1/chunk-store 6 | name: dn-a 7 | rack: fake 8 | resources: 9 | limits: 10 | cpu: "20" 11 | memory: 5Gi 12 | tags: 13 | - rack:xn-a 14 | volumeClaimTemplates: 15 | - metadata: 16 | name: hdd1 17 | spec: 18 | accessModes: 19 | - ReadWriteOnce 20 | resources: 21 | requests: 22 | storage: 10Gi 23 | storageClassName: yc-network-hdd 24 | volumeMounts: 25 | - mountPath: /yt/hdd1 26 | name: hdd1 27 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetExecNodeConfig/with-job-resources/exec-node-with-job-resources.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 50 2 | jobProxyLoggers: 3 | - categoriesFilter: 4 | type: exclude 5 | values: 6 | - Bus 7 | - Concurrency 8 | compression: zstd 9 | format: plain_text 10 | minLogLevel: debug 11 | name: debug 12 | rotationPolicy: 13 | maxTotalSizeToKeep: "3145728" 14 | rotationPeriodMilliseconds: 900000 15 | useTimestampSuffix: false 16 | writerType: file 17 | jobResources: 18 | limits: 19 | cpu: "99" 20 | memory: 99Gi 21 | locations: 22 | - locationType: ChunkCache 23 | path: /yt/hdd1/chunk-cache 24 | - locationType: Slots 25 | path: /yt/hdd2/slots 26 | quota: 5Gi 27 | name: end-a 28 | privileged: false 29 | rack: fake 30 | resources: 31 | limits: 32 | cpu: "20" 33 | memory: 5Gi 34 | tags: 35 | - rack:xn-a 36 | volumeClaimTemplates: 37 | - metadata: 38 | name: hdd1 39 | spec: 40 | accessModes: 41 | - ReadWriteOnce 42 | resources: 43 | requests: 44 | storage: 10Gi 45 | storageClassName: yc-network-hdd 46 | volumeMounts: 47 | - mountPath: /yt/hdd1 48 | name: hdd1 49 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetExecNodeConfig/without-job-resources/exec-node-without-job-resources.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 50 2 | jobProxyLoggers: 3 | - categoriesFilter: 4 | type: exclude 5 | values: 6 | - Bus 7 | - Concurrency 8 | compression: zstd 9 | format: plain_text 10 | minLogLevel: debug 11 | name: debug 12 | rotationPolicy: 13 | maxTotalSizeToKeep: "3145728" 14 | rotationPeriodMilliseconds: 900000 15 | useTimestampSuffix: false 16 | writerType: file 17 | locations: 18 | - locationType: ChunkCache 19 | path: /yt/hdd1/chunk-cache 20 | - locationType: Slots 21 | path: /yt/hdd2/slots 22 | quota: 5Gi 23 | name: end-a 24 | privileged: false 25 | rack: fake 26 | resources: 27 | limits: 28 | cpu: "20" 29 | memory: 5Gi 30 | tags: 31 | - rack:xn-a 32 | volumeClaimTemplates: 33 | - metadata: 34 | name: hdd1 35 | spec: 36 | accessModes: 37 | - ReadWriteOnce 38 | resources: 39 | requests: 40 | storage: 10Gi 41 | storageClassName: yc-network-hdd 42 | volumeMounts: 43 | - mountPath: /yt/hdd1 44 | name: hdd1 45 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetExecNodeConfigWithCri/isolated-containers-with-job-resources/exec-node-isolated-containers-with-job-resources.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 50 2 | jobEnvironment: 3 | cri: 4 | apiRetryTimeoutSeconds: 120 5 | baseCgroup: /yt 6 | criNamespace: yt 7 | sandboxImage: registry.k8s.io/pause:3.8 8 | doNotSetUserId: true 9 | isolated: true 10 | useArtifactBinds: true 11 | userSlots: 42 12 | jobProxyLoggers: 13 | - categoriesFilter: 14 | type: exclude 15 | values: 16 | - Bus 17 | - Concurrency 18 | compression: zstd 19 | format: plain_text 20 | minLogLevel: debug 21 | name: debug 22 | rotationPolicy: 23 | maxTotalSizeToKeep: "3145728" 24 | rotationPeriodMilliseconds: 900000 25 | useTimestampSuffix: false 26 | writerType: file 27 | jobResources: 28 | limits: 29 | cpu: "99" 30 | memory: 99Gi 31 | locations: 32 | - locationType: ChunkCache 33 | path: /yt/hdd1/chunk-cache 34 | - locationType: Slots 35 | path: /yt/hdd2/slots 36 | quota: 5Gi 37 | - locationType: ImageCache 38 | path: /yt/hdd1/images 39 | quota: 4Gi 40 | name: end-a 41 | privileged: false 42 | rack: fake 43 | resources: 44 | limits: 45 | cpu: "20" 46 | memory: 5Gi 47 | tags: 48 | - rack:xn-a 49 | volumeClaimTemplates: 50 | - metadata: 51 | name: hdd1 52 | spec: 53 | accessModes: 54 | - ReadWriteOnce 55 | resources: 56 | requests: 57 | storage: 10Gi 58 | storageClassName: yc-network-hdd 59 | volumeMounts: 60 | - mountPath: /yt/hdd1 61 | name: hdd1 62 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetExecNodeConfigWithCri/isolated-containers-without-job-resources/exec-node-isolated-containers-without-job-resources.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 50 2 | jobEnvironment: 3 | cri: 4 | apiRetryTimeoutSeconds: 120 5 | baseCgroup: /yt 6 | criNamespace: yt 7 | sandboxImage: registry.k8s.io/pause:3.8 8 | doNotSetUserId: true 9 | isolated: true 10 | useArtifactBinds: true 11 | userSlots: 42 12 | jobProxyLoggers: 13 | - categoriesFilter: 14 | type: exclude 15 | values: 16 | - Bus 17 | - Concurrency 18 | compression: zstd 19 | format: plain_text 20 | minLogLevel: debug 21 | name: debug 22 | rotationPolicy: 23 | maxTotalSizeToKeep: "3145728" 24 | rotationPeriodMilliseconds: 900000 25 | useTimestampSuffix: false 26 | writerType: file 27 | locations: 28 | - locationType: ChunkCache 29 | path: /yt/hdd1/chunk-cache 30 | - locationType: Slots 31 | path: /yt/hdd2/slots 32 | quota: 5Gi 33 | - locationType: ImageCache 34 | path: /yt/hdd1/images 35 | quota: 4Gi 36 | name: end-a 37 | privileged: false 38 | rack: fake 39 | resources: 40 | limits: 41 | cpu: "20" 42 | memory: 5Gi 43 | tags: 44 | - rack:xn-a 45 | volumeClaimTemplates: 46 | - metadata: 47 | name: hdd1 48 | spec: 49 | accessModes: 50 | - ReadWriteOnce 51 | resources: 52 | requests: 53 | storage: 10Gi 54 | storageClassName: yc-network-hdd 55 | volumeMounts: 56 | - mountPath: /yt/hdd1 57 | name: hdd1 58 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetExecNodeConfigWithCri/single-container-with-job-resources/exec-node-single-container-with-job-resources.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 50 2 | jobEnvironment: 3 | cri: 4 | apiRetryTimeoutSeconds: 120 5 | baseCgroup: /yt 6 | criNamespace: yt 7 | sandboxImage: registry.k8s.io/pause:3.8 8 | doNotSetUserId: true 9 | isolated: false 10 | useArtifactBinds: true 11 | userSlots: 42 12 | jobProxyLoggers: 13 | - categoriesFilter: 14 | type: exclude 15 | values: 16 | - Bus 17 | - Concurrency 18 | compression: zstd 19 | format: plain_text 20 | minLogLevel: debug 21 | name: debug 22 | rotationPolicy: 23 | maxTotalSizeToKeep: "3145728" 24 | rotationPeriodMilliseconds: 900000 25 | useTimestampSuffix: false 26 | writerType: file 27 | jobResources: 28 | limits: 29 | cpu: "99" 30 | memory: 99Gi 31 | locations: 32 | - locationType: ChunkCache 33 | path: /yt/hdd1/chunk-cache 34 | - locationType: Slots 35 | path: /yt/hdd2/slots 36 | quota: 5Gi 37 | - locationType: ImageCache 38 | path: /yt/hdd1/images 39 | quota: 4Gi 40 | name: end-a 41 | privileged: false 42 | rack: fake 43 | resources: 44 | limits: 45 | cpu: "20" 46 | memory: 5Gi 47 | tags: 48 | - rack:xn-a 49 | volumeClaimTemplates: 50 | - metadata: 51 | name: hdd1 52 | spec: 53 | accessModes: 54 | - ReadWriteOnce 55 | resources: 56 | requests: 57 | storage: 10Gi 58 | storageClassName: yc-network-hdd 59 | volumeMounts: 60 | - mountPath: /yt/hdd1 61 | name: hdd1 62 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetExecNodeConfigWithCri/single-container-without-job-resources/exec-node-single-container-without-job-resources.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 50 2 | jobEnvironment: 3 | cri: 4 | apiRetryTimeoutSeconds: 120 5 | baseCgroup: /yt 6 | criNamespace: yt 7 | sandboxImage: registry.k8s.io/pause:3.8 8 | doNotSetUserId: true 9 | isolated: false 10 | useArtifactBinds: true 11 | userSlots: 42 12 | jobProxyLoggers: 13 | - categoriesFilter: 14 | type: exclude 15 | values: 16 | - Bus 17 | - Concurrency 18 | compression: zstd 19 | format: plain_text 20 | minLogLevel: debug 21 | name: debug 22 | rotationPolicy: 23 | maxTotalSizeToKeep: "3145728" 24 | rotationPeriodMilliseconds: 900000 25 | useTimestampSuffix: false 26 | writerType: file 27 | locations: 28 | - locationType: ChunkCache 29 | path: /yt/hdd1/chunk-cache 30 | - locationType: Slots 31 | path: /yt/hdd2/slots 32 | quota: 5Gi 33 | - locationType: ImageCache 34 | path: /yt/hdd1/images 35 | quota: 4Gi 36 | name: end-a 37 | privileged: false 38 | rack: fake 39 | resources: 40 | limits: 41 | cpu: "20" 42 | memory: 5Gi 43 | tags: 44 | - rack:xn-a 45 | volumeClaimTemplates: 46 | - metadata: 47 | name: hdd1 48 | spec: 49 | accessModes: 50 | - ReadWriteOnce 51 | resources: 52 | requests: 53 | storage: 10Gi 54 | storageClassName: yc-network-hdd 55 | volumeMounts: 56 | - mountPath: /yt/hdd1 57 | name: hdd1 58 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetExecNodeWithoutYtsaurusConfig/common.yaml: -------------------------------------------------------------------------------- 1 | hostNetwork: false 2 | useIpv4: false 3 | useIpv6: true 4 | usePorto: false 5 | useShortNames: false 6 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetExecNodeWithoutYtsaurusConfig/exec-node.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 50 2 | jobProxyLoggers: 3 | - categoriesFilter: 4 | type: exclude 5 | values: 6 | - Bus 7 | - Concurrency 8 | compression: zstd 9 | format: plain_text 10 | minLogLevel: debug 11 | name: debug 12 | rotationPolicy: 13 | maxTotalSizeToKeep: "3145728" 14 | rotationPeriodMilliseconds: 900000 15 | useTimestampSuffix: false 16 | writerType: file 17 | locations: 18 | - locationType: ChunkCache 19 | path: /yt/hdd1/chunk-cache 20 | - locationType: Slots 21 | path: /yt/hdd2/slots 22 | quota: 5Gi 23 | name: end-a 24 | privileged: false 25 | rack: fake 26 | resources: 27 | limits: 28 | cpu: "20" 29 | memory: 5Gi 30 | tags: 31 | - rack:xn-a 32 | volumeClaimTemplates: 33 | - metadata: 34 | name: hdd1 35 | spec: 36 | accessModes: 37 | - ReadWriteOnce 38 | resources: 39 | requests: 40 | storage: 10Gi 41 | storageClassName: yc-network-hdd 42 | volumeMounts: 43 | - mountPath: /yt/hdd1 44 | name: hdd1 45 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetExecNodeWithoutYtsaurusConfig/remote-ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | creationTimestamp: null 3 | name: test 4 | namespace: fake 5 | spec: 6 | cellTag: 1000 7 | cellTagMasterCaches: 0 8 | hostAddresses: 9 | - host1.external.address 10 | - host2.external.address 11 | - host3.external.address 12 | hostAddressesMasterCaches: 13 | - host1.external.address 14 | - host2.external.address 15 | - host3.external.address 16 | instanceCount: 3 17 | resources: {} 18 | status: {} 19 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetHTTPProxyConfig/http-proxy.yaml: -------------------------------------------------------------------------------- 1 | httpNodePort: 10000 2 | httpsNodePort: 10001 3 | instanceCount: 3 4 | resources: {} 5 | role: control 6 | serviceType: NodePort 7 | transport: 8 | httpsSecret: 9 | name: yt-test-infra-wildcard 10 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetHTTPProxyConfigDisableCreateOauthUser/http-proxy.yaml: -------------------------------------------------------------------------------- 1 | httpNodePort: 10000 2 | httpsNodePort: 10001 3 | instanceCount: 3 4 | resources: {} 5 | role: control 6 | serviceType: NodePort 7 | transport: 8 | httpsSecret: 9 | name: yt-test-infra-wildcard 10 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetHTTPProxyConfigEnableCreateOauthUser/http-proxy.yaml: -------------------------------------------------------------------------------- 1 | httpNodePort: 10000 2 | httpsNodePort: 10001 3 | instanceCount: 3 4 | resources: {} 5 | role: control 6 | serviceType: NodePort 7 | transport: 8 | httpsSecret: 9 | name: yt-test-infra-wildcard 10 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetMasterWithFixedHostsConfig/ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | creationTimestamp: null 3 | name: test 4 | namespace: fake 5 | spec: 6 | discovery: 7 | resources: {} 8 | enableFullUpdate: false 9 | hostNetwork: false 10 | isManaged: false 11 | primaryMasters: 12 | cellTag: 0 13 | hostAddresses: 14 | - host1.external.address 15 | - host2.external.address 16 | - host3.external.address 17 | instanceCount: 1 18 | locations: 19 | - locationType: MasterChangelogs 20 | path: /yt/master-data/master-changelogs 21 | - locationType: MasterSnapshots 22 | path: /yt/master-data/master-snapshots 23 | loggers: 24 | - compression: none 25 | format: plain_text 26 | minLogLevel: info 27 | name: info 28 | useTimestampSuffix: false 29 | writerType: file 30 | - compression: none 31 | format: plain_text 32 | minLogLevel: error 33 | name: error 34 | useTimestampSuffix: false 35 | writerType: file 36 | - categoriesFilter: 37 | type: exclude 38 | values: 39 | - Bus 40 | compression: zstd 41 | format: plain_text 42 | minLogLevel: debug 43 | name: debug 44 | rotationPolicy: 45 | maxTotalSizeToKeep: 10Gi 46 | rotationPeriodMilliseconds: 900000 47 | useTimestampSuffix: false 48 | writerType: file 49 | maxSnapshotCountToKeep: 1543 50 | resources: {} 51 | sidecars: 52 | - '{name: sleep, image: fakeimage:stable, command: [/bin/sleep], args: [inf]}' 53 | volumeClaimTemplates: 54 | - metadata: 55 | name: master-data 56 | spec: 57 | accessModes: 58 | - ReadWriteOnce 59 | resources: 60 | requests: 61 | storage: 20Gi 62 | volumeMounts: 63 | - mountPath: /yt/master-data 64 | name: master-data 65 | useIpv4: false 66 | useIpv6: true 67 | usePorto: false 68 | useShortNames: false 69 | status: 70 | updateStatus: {} 71 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetMasterWithMonitoringPortConfig/ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | creationTimestamp: null 3 | name: test 4 | namespace: fake 5 | spec: 6 | discovery: 7 | resources: {} 8 | enableFullUpdate: false 9 | hostNetwork: false 10 | isManaged: false 11 | primaryMasters: 12 | cellTag: 0 13 | instanceCount: 1 14 | locations: 15 | - locationType: MasterChangelogs 16 | path: /yt/master-data/master-changelogs 17 | - locationType: MasterSnapshots 18 | path: /yt/master-data/master-snapshots 19 | loggers: 20 | - compression: none 21 | format: plain_text 22 | minLogLevel: info 23 | name: info 24 | useTimestampSuffix: false 25 | writerType: file 26 | - compression: none 27 | format: plain_text 28 | minLogLevel: error 29 | name: error 30 | useTimestampSuffix: false 31 | writerType: file 32 | - categoriesFilter: 33 | type: exclude 34 | values: 35 | - Bus 36 | compression: zstd 37 | format: plain_text 38 | minLogLevel: debug 39 | name: debug 40 | rotationPolicy: 41 | maxTotalSizeToKeep: 10Gi 42 | rotationPeriodMilliseconds: 900000 43 | useTimestampSuffix: false 44 | writerType: file 45 | maxSnapshotCountToKeep: 1543 46 | monitoringPort: 20010 47 | resources: {} 48 | sidecars: 49 | - '{name: sleep, image: fakeimage:stable, command: [/bin/sleep], args: [inf]}' 50 | volumeClaimTemplates: 51 | - metadata: 52 | name: master-data 53 | spec: 54 | accessModes: 55 | - ReadWriteOnce 56 | resources: 57 | requests: 58 | storage: 20Gi 59 | volumeMounts: 60 | - mountPath: /yt/master-data 61 | name: master-data 62 | useIpv4: false 63 | useIpv6: true 64 | usePorto: false 65 | useShortNames: false 66 | status: 67 | updateStatus: {} 68 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetNativeClientConfig/test.canondata: -------------------------------------------------------------------------------- 1 | { 2 | "address_resolver"={ 3 | "enable_ipv4"=%true; 4 | "enable_ipv6"=%false; 5 | retries=1000; 6 | }; 7 | logging={ 8 | writers={ 9 | }; 10 | rules=[ 11 | ]; 12 | "flush_period"=0; 13 | }; 14 | driver={ 15 | "timestamp_provider"={ 16 | addresses=[ 17 | "ms-test-0.masters-test.fake.svc.fake.zone:9010"; 18 | ]; 19 | }; 20 | "primary_master"={ 21 | addresses=[ 22 | "ms-test-0.masters-test.fake.svc.fake.zone:9010"; 23 | ]; 24 | peers=[ 25 | { 26 | address="ms-test-0.masters-test.fake.svc.fake.zone:9010"; 27 | voting=%true; 28 | }; 29 | ]; 30 | "cell_id"="65726e65-ad6b7562-259-79747361"; 31 | }; 32 | "api_version"=4; 33 | }; 34 | } -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetRPCProxyConfig/rpc-proxy.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 3 2 | resources: {} 3 | role: default 4 | transport: {} 5 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetRPCProxyWithoutOauthConfig/rpc-proxy.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 3 2 | resources: {} 3 | role: default 4 | transport: {} 5 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetRPCProxyWithoutOauthConfig/ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | creationTimestamp: null 3 | name: test 4 | namespace: fake 5 | spec: 6 | discovery: 7 | resources: {} 8 | enableFullUpdate: false 9 | hostNetwork: false 10 | isManaged: false 11 | primaryMasters: 12 | cellTag: 0 13 | instanceCount: 1 14 | locations: 15 | - locationType: MasterChangelogs 16 | path: /yt/master-data/master-changelogs 17 | - locationType: MasterSnapshots 18 | path: /yt/master-data/master-snapshots 19 | loggers: 20 | - compression: none 21 | format: plain_text 22 | minLogLevel: info 23 | name: info 24 | useTimestampSuffix: false 25 | writerType: file 26 | - compression: none 27 | format: plain_text 28 | minLogLevel: error 29 | name: error 30 | useTimestampSuffix: false 31 | writerType: file 32 | - categoriesFilter: 33 | type: exclude 34 | values: 35 | - Bus 36 | compression: zstd 37 | format: plain_text 38 | minLogLevel: debug 39 | name: debug 40 | rotationPolicy: 41 | maxTotalSizeToKeep: 10Gi 42 | rotationPeriodMilliseconds: 900000 43 | useTimestampSuffix: false 44 | writerType: file 45 | maxSnapshotCountToKeep: 1543 46 | resources: {} 47 | sidecars: 48 | - '{name: sleep, image: fakeimage:stable, command: [/bin/sleep], args: [inf]}' 49 | volumeClaimTemplates: 50 | - metadata: 51 | name: master-data 52 | spec: 53 | accessModes: 54 | - ReadWriteOnce 55 | resources: 56 | requests: 57 | storage: 20Gi 58 | volumeMounts: 59 | - mountPath: /yt/master-data 60 | name: master-data 61 | useIpv4: false 62 | useIpv6: true 63 | usePorto: false 64 | useShortNames: false 65 | status: 66 | updateStatus: {} 67 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetSchedulerWithFixedMasterHostsConfig/ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | creationTimestamp: null 3 | name: test 4 | namespace: fake 5 | spec: 6 | discovery: 7 | resources: {} 8 | enableFullUpdate: false 9 | hostNetwork: false 10 | isManaged: false 11 | primaryMasters: 12 | cellTag: 0 13 | hostAddresses: 14 | - host1.external.address 15 | - host2.external.address 16 | - host3.external.address 17 | instanceCount: 1 18 | locations: 19 | - locationType: MasterChangelogs 20 | path: /yt/master-data/master-changelogs 21 | - locationType: MasterSnapshots 22 | path: /yt/master-data/master-snapshots 23 | loggers: 24 | - compression: none 25 | format: plain_text 26 | minLogLevel: info 27 | name: info 28 | useTimestampSuffix: false 29 | writerType: file 30 | - compression: none 31 | format: plain_text 32 | minLogLevel: error 33 | name: error 34 | useTimestampSuffix: false 35 | writerType: file 36 | - categoriesFilter: 37 | type: exclude 38 | values: 39 | - Bus 40 | compression: zstd 41 | format: plain_text 42 | minLogLevel: debug 43 | name: debug 44 | rotationPolicy: 45 | maxTotalSizeToKeep: 10Gi 46 | rotationPeriodMilliseconds: 900000 47 | useTimestampSuffix: false 48 | writerType: file 49 | maxSnapshotCountToKeep: 1543 50 | resources: {} 51 | sidecars: 52 | - '{name: sleep, image: fakeimage:stable, command: [/bin/sleep], args: [inf]}' 53 | volumeClaimTemplates: 54 | - metadata: 55 | name: master-data 56 | spec: 57 | accessModes: 58 | - ReadWriteOnce 59 | resources: 60 | requests: 61 | storage: 20Gi 62 | volumeMounts: 63 | - mountPath: /yt/master-data 64 | name: master-data 65 | schedulers: 66 | instanceCount: 3 67 | resources: {} 68 | useIpv4: false 69 | useIpv6: true 70 | usePorto: false 71 | useShortNames: false 72 | status: 73 | updateStatus: {} 74 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetStrawberryControllerConfig/test.canondata: -------------------------------------------------------------------------------- 1 | { 2 | "location_proxies"=[ 3 | "http-proxies-lb-test.fake.svc.fake.zone"; 4 | ]; 5 | strawberry={ 6 | root="//sys/strawberry"; 7 | stage=production; 8 | "robot_username"="robot-strawberry-controller"; 9 | }; 10 | controllers={ 11 | chyt={ 12 | "address_resolver"={ 13 | "enable_ipv4"=%true; 14 | "enable_ipv6"=%false; 15 | retries=1000; 16 | }; 17 | }; 18 | jupyt={ 19 | }; 20 | }; 21 | "http_api_endpoint"=":80"; 22 | "http_location_aliases"={ 23 | "http-proxies-lb-test.fake.svc.fake.zone"=[ 24 | test; 25 | ]; 26 | }; 27 | "http_controller_mappings"={ 28 | "*"=chyt; 29 | }; 30 | } -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetStrawberryControllerConfigWithCustomFamilies/test.canondata: -------------------------------------------------------------------------------- 1 | { 2 | "location_proxies"=[ 3 | "http-proxies-lb-test.fake.svc.fake.zone"; 4 | ]; 5 | strawberry={ 6 | root="//sys/strawberry"; 7 | stage=production; 8 | "robot_username"="robot-strawberry-controller"; 9 | }; 10 | controllers={ 11 | superservice1={ 12 | }; 13 | superservice2={ 14 | }; 15 | superservice3={ 16 | }; 17 | }; 18 | "http_api_endpoint"=":80"; 19 | "http_location_aliases"={ 20 | "http-proxies-lb-test.fake.svc.fake.zone"=[ 21 | test; 22 | ]; 23 | }; 24 | "http_controller_mappings"={ 25 | "*"=superservice2; 26 | "superservice1.some.domain"=superservice1; 27 | "superservice3.some.domain"=superservice3; 28 | }; 29 | } -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetStrawberryControllerConfigWithExtendedHTTPMapping/test.canondata: -------------------------------------------------------------------------------- 1 | { 2 | "location_proxies"=[ 3 | "http-proxies-lb-test.fake.svc.fake.zone"; 4 | ]; 5 | strawberry={ 6 | root="//sys/strawberry"; 7 | stage=production; 8 | "robot_username"="robot-strawberry-controller"; 9 | }; 10 | controllers={ 11 | chyt={ 12 | "address_resolver"={ 13 | "enable_ipv4"=%true; 14 | "enable_ipv6"=%false; 15 | retries=1000; 16 | }; 17 | }; 18 | jupyt={ 19 | }; 20 | }; 21 | "http_api_endpoint"=":80"; 22 | "http_location_aliases"={ 23 | "http-proxies-lb-test.fake.svc.fake.zone"=[ 24 | test; 25 | ]; 26 | }; 27 | "http_controller_mappings"={ 28 | "*"=chyt; 29 | "jupyt.some.domain"=jupyt; 30 | }; 31 | } -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetStrawberryInitClusterConfig/test.canondata: -------------------------------------------------------------------------------- 1 | { 2 | proxy="http-proxies-lb-test.fake.svc.fake.zone"; 3 | "strawberry_root"="//sys/strawberry"; 4 | families=[ 5 | chyt; 6 | jupyt; 7 | ]; 8 | } -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetTCPProxyConfig/tcp-proxy.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 3 2 | minPort: 0 3 | portCount: 0 4 | resources: {} 5 | role: default 6 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetTabletNodeConfig/tablet-node.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 100 2 | locations: 3 | - locationType: ChunkCache 4 | path: /yt/hdd1/chunk-cache 5 | - locationType: Slots 6 | path: /yt/hdd2/slots 7 | quota: 5Gi 8 | rack: fake 9 | resources: 10 | limits: 11 | cpu: "20" 12 | memory: 5Gi 13 | tags: 14 | - rack:xn-a 15 | volumeClaimTemplates: 16 | - metadata: 17 | name: hdd1 18 | spec: 19 | accessModes: 20 | - ReadWriteOnce 21 | resources: 22 | requests: 23 | storage: 10Gi 24 | storageClassName: yc-network-hdd 25 | volumeMounts: 26 | - mountPath: /yt/hdd1 27 | name: hdd1 28 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetTabletNodeWithoutYtsaurusConfig/common.yaml: -------------------------------------------------------------------------------- 1 | hostNetwork: false 2 | useIpv4: false 3 | useIpv6: true 4 | usePorto: false 5 | useShortNames: false 6 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetTabletNodeWithoutYtsaurusConfig/remote-ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | creationTimestamp: null 3 | name: test 4 | namespace: fake 5 | spec: 6 | cellTag: 1000 7 | cellTagMasterCaches: 0 8 | hostAddresses: 9 | - host1.external.address 10 | - host2.external.address 11 | - host3.external.address 12 | hostAddressesMasterCaches: 13 | - host1.external.address 14 | - host2.external.address 15 | - host3.external.address 16 | instanceCount: 3 17 | resources: {} 18 | status: {} 19 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetTabletNodeWithoutYtsaurusConfig/tablet-node.yaml: -------------------------------------------------------------------------------- 1 | instanceCount: 100 2 | locations: 3 | - locationType: ChunkCache 4 | path: /yt/hdd1/chunk-cache 5 | - locationType: Slots 6 | path: /yt/hdd2/slots 7 | quota: 5Gi 8 | rack: fake 9 | resources: 10 | limits: 11 | cpu: "20" 12 | memory: 5Gi 13 | tags: 14 | - rack:xn-a 15 | volumeClaimTemplates: 16 | - metadata: 17 | name: hdd1 18 | spec: 19 | accessModes: 20 | - ReadWriteOnce 21 | resources: 22 | requests: 23 | storage: 10Gi 24 | storageClassName: yc-network-hdd 25 | volumeMounts: 26 | - mountPath: /yt/hdd1 27 | name: hdd1 28 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetUIClustersConfig/test.canondata: -------------------------------------------------------------------------------- 1 | { 2 | clusters=[ 3 | { 4 | id=test; 5 | name=test; 6 | proxy="http-proxies-lb-test.fake.svc.fake.zone"; 7 | secure=%false; 8 | authentication=basic; 9 | group="My YTsaurus clusters"; 10 | theme=""; 11 | environment=""; 12 | description="My first YTsaurus. Handle with care."; 13 | primaryMaster={ 14 | cellTag=0; 15 | }; 16 | }; 17 | ]; 18 | } -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetUIClustersConfigWithSettings/test.canondata: -------------------------------------------------------------------------------- 1 | { 2 | clusters=[ 3 | { 4 | id=test; 5 | name=test; 6 | proxy="http-proxies-lb-test.fake.svc.fake.zone"; 7 | externalProxy="https://my-external-proxy.example.com"; 8 | secure=%true; 9 | authentication=basic; 10 | group="My YTsaurus clusters"; 11 | theme=""; 12 | environment=""; 13 | description="My first YTsaurus. Handle with care."; 14 | primaryMaster={ 15 | cellTag=0; 16 | }; 17 | }; 18 | ]; 19 | } -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetUIClustersConfigWithSettings/ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | creationTimestamp: null 3 | name: test 4 | namespace: fake 5 | spec: 6 | discovery: 7 | resources: {} 8 | enableFullUpdate: false 9 | hostNetwork: false 10 | isManaged: false 11 | primaryMasters: 12 | cellTag: 0 13 | instanceCount: 1 14 | locations: 15 | - locationType: MasterChangelogs 16 | path: /yt/master-data/master-changelogs 17 | - locationType: MasterSnapshots 18 | path: /yt/master-data/master-snapshots 19 | loggers: 20 | - compression: none 21 | format: plain_text 22 | minLogLevel: info 23 | name: info 24 | useTimestampSuffix: false 25 | writerType: file 26 | - compression: none 27 | format: plain_text 28 | minLogLevel: error 29 | name: error 30 | useTimestampSuffix: false 31 | writerType: file 32 | - categoriesFilter: 33 | type: exclude 34 | values: 35 | - Bus 36 | compression: zstd 37 | format: plain_text 38 | minLogLevel: debug 39 | name: debug 40 | rotationPolicy: 41 | maxTotalSizeToKeep: 10Gi 42 | rotationPeriodMilliseconds: 900000 43 | useTimestampSuffix: false 44 | writerType: file 45 | maxSnapshotCountToKeep: 1543 46 | resources: {} 47 | sidecars: 48 | - '{name: sleep, image: fakeimage:stable, command: [/bin/sleep], args: [inf]}' 49 | volumeClaimTemplates: 50 | - metadata: 51 | name: master-data 52 | spec: 53 | accessModes: 54 | - ReadWriteOnce 55 | resources: 56 | requests: 57 | storage: 20Gi 58 | volumeMounts: 59 | - mountPath: /yt/master-data 60 | name: master-data 61 | ui: 62 | externalProxy: https://my-external-proxy.example.com 63 | odinBaseUrl: http://odin-webservice.odin.svc.cluster.local 64 | resources: {} 65 | secure: true 66 | useInsecureCookies: false 67 | useIpv4: false 68 | useIpv6: true 69 | usePorto: false 70 | useShortNames: false 71 | status: 72 | updateStatus: {} 73 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetUICustomConfig/test.canondata: -------------------------------------------------------------------------------- 1 | { 2 | odinBaseUrl="http://odin-webservice.odin.svc.cluster.local"; 3 | } -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetUICustomConfig/ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | creationTimestamp: null 3 | name: test 4 | namespace: fake 5 | spec: 6 | discovery: 7 | resources: {} 8 | enableFullUpdate: false 9 | hostNetwork: false 10 | isManaged: false 11 | primaryMasters: 12 | cellTag: 0 13 | instanceCount: 1 14 | locations: 15 | - locationType: MasterChangelogs 16 | path: /yt/master-data/master-changelogs 17 | - locationType: MasterSnapshots 18 | path: /yt/master-data/master-snapshots 19 | loggers: 20 | - compression: none 21 | format: plain_text 22 | minLogLevel: info 23 | name: info 24 | useTimestampSuffix: false 25 | writerType: file 26 | - compression: none 27 | format: plain_text 28 | minLogLevel: error 29 | name: error 30 | useTimestampSuffix: false 31 | writerType: file 32 | - categoriesFilter: 33 | type: exclude 34 | values: 35 | - Bus 36 | compression: zstd 37 | format: plain_text 38 | minLogLevel: debug 39 | name: debug 40 | rotationPolicy: 41 | maxTotalSizeToKeep: 10Gi 42 | rotationPeriodMilliseconds: 900000 43 | useTimestampSuffix: false 44 | writerType: file 45 | maxSnapshotCountToKeep: 1543 46 | resources: {} 47 | sidecars: 48 | - '{name: sleep, image: fakeimage:stable, command: [/bin/sleep], args: [inf]}' 49 | volumeClaimTemplates: 50 | - metadata: 51 | name: master-data 52 | spec: 53 | accessModes: 54 | - ReadWriteOnce 55 | resources: 56 | requests: 57 | storage: 20Gi 58 | volumeMounts: 59 | - mountPath: /yt/master-data 60 | name: master-data 61 | ui: 62 | externalProxy: https://my-external-proxy.example.com 63 | odinBaseUrl: http://odin-webservice.odin.svc.cluster.local 64 | resources: {} 65 | secure: true 66 | useInsecureCookies: false 67 | useIpv4: false 68 | useIpv6: true 69 | usePorto: false 70 | useShortNames: false 71 | status: 72 | updateStatus: {} 73 | -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetUICustomConfigWithSettings/test.canondata: -------------------------------------------------------------------------------- 1 | { 2 | odinBaseUrl="http://odin-webservice.odin.svc.cluster.local"; 3 | } -------------------------------------------------------------------------------- /pkg/ytconfig/canondata/TestGetUICustomConfigWithSettings/ytsaurus.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | creationTimestamp: null 3 | name: test 4 | namespace: fake 5 | spec: 6 | discovery: 7 | resources: {} 8 | enableFullUpdate: false 9 | hostNetwork: false 10 | isManaged: false 11 | primaryMasters: 12 | cellTag: 0 13 | instanceCount: 1 14 | locations: 15 | - locationType: MasterChangelogs 16 | path: /yt/master-data/master-changelogs 17 | - locationType: MasterSnapshots 18 | path: /yt/master-data/master-snapshots 19 | loggers: 20 | - compression: none 21 | format: plain_text 22 | minLogLevel: info 23 | name: info 24 | useTimestampSuffix: false 25 | writerType: file 26 | - compression: none 27 | format: plain_text 28 | minLogLevel: error 29 | name: error 30 | useTimestampSuffix: false 31 | writerType: file 32 | - categoriesFilter: 33 | type: exclude 34 | values: 35 | - Bus 36 | compression: zstd 37 | format: plain_text 38 | minLogLevel: debug 39 | name: debug 40 | rotationPolicy: 41 | maxTotalSizeToKeep: 10Gi 42 | rotationPeriodMilliseconds: 900000 43 | useTimestampSuffix: false 44 | writerType: file 45 | maxSnapshotCountToKeep: 1543 46 | resources: {} 47 | sidecars: 48 | - '{name: sleep, image: fakeimage:stable, command: [/bin/sleep], args: [inf]}' 49 | volumeClaimTemplates: 50 | - metadata: 51 | name: master-data 52 | spec: 53 | accessModes: 54 | - ReadWriteOnce 55 | resources: 56 | requests: 57 | storage: 20Gi 58 | volumeMounts: 59 | - mountPath: /yt/master-data 60 | name: master-data 61 | ui: 62 | odinBaseUrl: http://odin-webservice.odin.svc.cluster.local 63 | resources: {} 64 | secure: false 65 | useInsecureCookies: false 66 | useIpv4: false 67 | useIpv6: true 68 | usePorto: false 69 | useShortNames: false 70 | status: 71 | updateStatus: {} 72 | -------------------------------------------------------------------------------- /pkg/ytconfig/cell_id.go: -------------------------------------------------------------------------------- 1 | package ytconfig 2 | 3 | import ( 4 | "crypto/rand" 5 | "encoding/base64" 6 | "fmt" 7 | "strings" 8 | 9 | "github.com/google/uuid" 10 | ) 11 | 12 | func generateCellID(cellTag int16) string { 13 | cellID, err := uuid.NewRandomFromReader(strings.NewReader("ytsaurus-kubernetes-operator")) 14 | if err != nil { 15 | panic(err) 16 | } 17 | uuidBytes, err := cellID.MarshalBinary() 18 | if err != nil { 19 | panic(err) 20 | } 21 | 22 | uuidBytes[4] = byte(cellTag >> 8) 23 | uuidBytes[5] = byte(cellTag & 0xff) 24 | 25 | masterCellType := 601 26 | uuidBytes[6] = byte(masterCellType >> 8) 27 | uuidBytes[7] = byte(masterCellType & 0xff) 28 | 29 | getGUIDPart := func(data []byte) string { 30 | format := strings.Repeat("%02x", len(data)) 31 | args := make([]any, 0, len(data)) 32 | for _, value := range data { 33 | args = append(args, value) 34 | } 35 | 36 | part := fmt.Sprintf(format, args...) 37 | return strings.TrimLeft(part, "0") 38 | } 39 | return fmt.Sprintf("%s-%s-%s-%s", getGUIDPart(uuidBytes[12:]), getGUIDPart(uuidBytes[8:12]), getGUIDPart(uuidBytes[4:8]), getGUIDPart(uuidBytes[:4])) 40 | } 41 | 42 | func RandString(n int) string { 43 | b := make([]byte, n) 44 | _, err := rand.Read(b) 45 | if err != nil { 46 | panic(err) 47 | } 48 | return base64.RawURLEncoding.EncodeToString(b) 49 | } 50 | -------------------------------------------------------------------------------- /pkg/ytconfig/cri.go: -------------------------------------------------------------------------------- 1 | package ytconfig 2 | 3 | import ( 4 | "path" 5 | 6 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 7 | 8 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 9 | ) 10 | 11 | func GetContainerdSocketPath(spec *ytv1.ExecNodesSpec) string { 12 | if location := ytv1.FindFirstLocation(spec.Locations, ytv1.LocationTypeImageCache); location != nil { 13 | return path.Join(location.Path, consts.ContainerdSocketName) 14 | } 15 | // In non-overlayfs setup CRI could work without own location. 16 | return path.Join(consts.ConfigMountPoint, consts.ContainerdSocketName) 17 | } 18 | 19 | func (g *NodeGenerator) GetContainerdConfig(spec *ytv1.ExecNodesSpec) ([]byte, error) { 20 | criSpec := spec.JobEnvironment.CRI 21 | 22 | var rootPath *string 23 | if location := ytv1.FindFirstLocation(spec.Locations, ytv1.LocationTypeImageCache); location != nil { 24 | rootPath = &location.Path 25 | } 26 | 27 | config := map[string]any{ 28 | "version": 2, 29 | "root": rootPath, 30 | 31 | "grpc": map[string]any{ 32 | "address": GetContainerdSocketPath(spec), 33 | "uid": 0, 34 | "gid": 0, 35 | }, 36 | 37 | "plugins": map[string]any{ 38 | "io.containerd.grpc.v1.cri": map[string]any{ 39 | "sandbox_image": criSpec.SandboxImage, 40 | "restrict_oom_score_adj": true, 41 | "image_pull_progress_timeout": "5m0s", 42 | 43 | "cni": map[string]any{ 44 | "conf_dir": "/etc/cni/net.d", 45 | "bin_dir": "/usr/local/lib/cni", 46 | }, 47 | 48 | "containerd": map[string]any{ 49 | "default_runtime_name": "runc", 50 | "runtimes": map[string]any{ 51 | "runc": map[string]any{ 52 | "runtime_type": "io.containerd.runc.v2", 53 | "sandbox_mode": "podsandbox", 54 | "options": map[string]any{ 55 | "SystemdCgroup": false, 56 | }, 57 | }, 58 | }, 59 | }, 60 | 61 | "registry": map[string]any{ 62 | "config_path": criSpec.RegistryConfigPath, 63 | }, 64 | }, 65 | }, 66 | } 67 | 68 | // TODO(khlebnikov): Refactor and remove this mess with formats. 69 | return marshallYsonConfig(config) 70 | } 71 | -------------------------------------------------------------------------------- /pkg/ytconfig/discovery.go: -------------------------------------------------------------------------------- 1 | package ytconfig 2 | 3 | import ( 4 | "k8s.io/utils/ptr" 5 | 6 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 7 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 8 | ) 9 | 10 | type Discovery struct { 11 | // Unfortunately AddressList is not applicable here, since 12 | // config field is named differently. 13 | Addresses []string `yson:"server_addresses"` 14 | } 15 | 16 | type DiscoveryServer struct { 17 | CommonServer 18 | 19 | BusClient *Bus `yson:"bus_client,omitempty"` 20 | DiscoveryServer Discovery `yson:"discovery_server"` 21 | } 22 | 23 | func getDiscoveryLogging(spec *ytv1.DiscoverySpec) Logging { 24 | return createLogging( 25 | &spec.InstanceSpec, 26 | "discovery", 27 | []ytv1.TextLoggerSpec{defaultInfoLoggerSpec(), defaultStderrLoggerSpec()}) 28 | } 29 | 30 | func getDiscoveryServerCarcass(spec *ytv1.DiscoverySpec) (DiscoveryServer, error) { 31 | var c DiscoveryServer 32 | 33 | c.MonitoringPort = ptr.Deref(spec.InstanceSpec.MonitoringPort, consts.DiscoveryMonitoringPort) 34 | c.RPCPort = consts.DiscoveryRPCPort 35 | 36 | c.Logging = getDiscoveryLogging(spec) 37 | 38 | return c, nil 39 | } 40 | -------------------------------------------------------------------------------- /pkg/ytconfig/master_caches.go: -------------------------------------------------------------------------------- 1 | package ytconfig 2 | 3 | import ( 4 | "k8s.io/utils/ptr" 5 | 6 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 7 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 8 | ) 9 | 10 | type MasterCacheServer struct { 11 | CommonServer 12 | 13 | BusClient *Bus `yson:"bus_client,omitempty"` 14 | } 15 | 16 | func getMasterCachesLogging(spec *ytv1.MasterCachesSpec) Logging { 17 | return createLogging( 18 | &spec.InstanceSpec, 19 | "master-cache", 20 | []ytv1.TextLoggerSpec{defaultInfoLoggerSpec(), defaultStderrLoggerSpec()}) 21 | } 22 | 23 | func getMasterCachesCarcass(spec *ytv1.MasterCachesSpec) (MasterCacheServer, error) { 24 | var mcs MasterCacheServer 25 | mcs.RPCPort = consts.MasterCachesRPCPort 26 | mcs.MonitoringPort = ptr.Deref(spec.MonitoringPort, consts.MasterCachesMonitoringPort) 27 | mcs.Logging = getMasterCachesLogging(spec) 28 | 29 | return mcs, nil 30 | } 31 | -------------------------------------------------------------------------------- /pkg/ytconfig/query_tracker.go: -------------------------------------------------------------------------------- 1 | package ytconfig 2 | 3 | import ( 4 | "k8s.io/utils/ptr" 5 | 6 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 7 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 8 | ) 9 | 10 | type QueryTrackerServer struct { 11 | CommonServer 12 | User string `yson:"user"` 13 | CreateStateTablesOnStartup bool `yson:"create_state_tables_on_startup"` 14 | } 15 | 16 | func getQueryTrackerLogging(spec *ytv1.QueryTrackerSpec) Logging { 17 | return createLogging( 18 | &spec.InstanceSpec, 19 | "query-tracker", 20 | []ytv1.TextLoggerSpec{defaultInfoLoggerSpec(), defaultStderrLoggerSpec()}) 21 | } 22 | 23 | func getQueryTrackerServerCarcass(spec *ytv1.QueryTrackerSpec) (QueryTrackerServer, error) { 24 | var c QueryTrackerServer 25 | c.RPCPort = consts.QueryTrackerRPCPort 26 | c.MonitoringPort = ptr.Deref(spec.InstanceSpec.MonitoringPort, consts.QueryTrackerMonitoringPort) 27 | c.User = "query_tracker" 28 | c.CreateStateTablesOnStartup = true 29 | 30 | c.Logging = getQueryTrackerLogging(spec) 31 | 32 | return c, nil 33 | } 34 | -------------------------------------------------------------------------------- /pkg/ytconfig/queue_agent.go: -------------------------------------------------------------------------------- 1 | package ytconfig 2 | 3 | import ( 4 | "k8s.io/utils/ptr" 5 | 6 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 7 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 8 | ) 9 | 10 | type QueueAgent struct { 11 | Stage string `yson:"stage"` 12 | } 13 | 14 | type QueueAgentServer struct { 15 | CommonServer 16 | 17 | BusClient *Bus `yson:"bus_client,omitempty"` 18 | User string `yson:"user"` 19 | QueueAgent QueueAgent `yson:"queue_agent"` 20 | } 21 | 22 | func getQueueAgentLogging(spec *ytv1.QueueAgentSpec) Logging { 23 | return createLogging( 24 | &spec.InstanceSpec, 25 | "queue-agent", 26 | []ytv1.TextLoggerSpec{defaultInfoLoggerSpec(), defaultStderrLoggerSpec()}) 27 | } 28 | 29 | func getQueueAgentServerCarcass(spec *ytv1.QueueAgentSpec) (QueueAgentServer, error) { 30 | var c QueueAgentServer 31 | c.RPCPort = consts.QueueAgentRPCPort 32 | 33 | c.MonitoringPort = ptr.Deref(spec.InstanceSpec.MonitoringPort, consts.QueueAgentMonitoringPort) 34 | c.User = "queue_agent" 35 | c.QueueAgent.Stage = "production" 36 | 37 | c.Logging = getQueueAgentLogging(spec) 38 | 39 | return c, nil 40 | } 41 | -------------------------------------------------------------------------------- /pkg/ytconfig/scheduler.go: -------------------------------------------------------------------------------- 1 | package ytconfig 2 | 3 | import ( 4 | "k8s.io/utils/ptr" 5 | 6 | ytv1 "github.com/ytsaurus/ytsaurus-k8s-operator/api/v1" 7 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/consts" 8 | ) 9 | 10 | type OperationsCleaner struct { 11 | EnableOperationArchivation *bool `yson:"enable_operation_archivation,omitempty"` 12 | } 13 | 14 | type Scheduler struct { 15 | OperationsCleaner OperationsCleaner `yson:"operations_cleaner"` 16 | } 17 | 18 | type SchedulerServer struct { 19 | CommonServer 20 | Scheduler Scheduler `yson:"scheduler"` 21 | } 22 | 23 | type AlertManager struct { 24 | LowCpuUsageAlertStatisics []string `yson:"low_cpu_usage_alert_statistics,omitempty"` 25 | } 26 | 27 | type ControllerAgent struct { 28 | EnableTmpfs bool `yson:"enable_tmpfs"` 29 | UseColumnarStatisticsDefault bool `yson:"use_columnar_statistics_default"` 30 | 31 | AlertManager AlertManager `yson:"alert_manager"` 32 | } 33 | 34 | type ControllerAgentServer struct { 35 | CommonServer 36 | ControllerAgent ControllerAgent `yson:"controller_agent"` 37 | } 38 | 39 | func getSchedulerLogging(spec *ytv1.SchedulersSpec) Logging { 40 | return createLogging( 41 | &spec.InstanceSpec, 42 | "scheduler", 43 | []ytv1.TextLoggerSpec{defaultInfoLoggerSpec(), defaultStderrLoggerSpec()}) 44 | } 45 | 46 | func getControllerAgentLogging(spec *ytv1.ControllerAgentsSpec) Logging { 47 | return createLogging( 48 | &spec.InstanceSpec, 49 | "controller-agent", 50 | []ytv1.TextLoggerSpec{defaultInfoLoggerSpec(), defaultStderrLoggerSpec()}) 51 | } 52 | 53 | func getSchedulerServerCarcass(spec *ytv1.SchedulersSpec) (SchedulerServer, error) { 54 | var c SchedulerServer 55 | c.RPCPort = consts.SchedulerRPCPort 56 | c.MonitoringPort = ptr.Deref(spec.MonitoringPort, consts.SchedulerMonitoringPort) 57 | c.Logging = getSchedulerLogging(spec) 58 | 59 | return c, nil 60 | } 61 | 62 | func getControllerAgentServerCarcass(spec *ytv1.ControllerAgentsSpec) (ControllerAgentServer, error) { 63 | var c ControllerAgentServer 64 | c.RPCPort = consts.ControllerAgentRPCPort 65 | c.MonitoringPort = ptr.Deref(spec.MonitoringPort, consts.ControllerAgentMonitoringPort) 66 | c.Logging = getControllerAgentLogging(spec) 67 | 68 | return c, nil 69 | } 70 | -------------------------------------------------------------------------------- /pkg/ytconfig/ui.go: -------------------------------------------------------------------------------- 1 | package ytconfig 2 | 3 | type UIAuthenticationType string 4 | 5 | const ( 6 | uiAuthenticationBasic UIAuthenticationType = "basic" 7 | ) 8 | 9 | type UIPrimaryMaster struct { 10 | CellTag int16 `yson:"cellTag"` 11 | } 12 | 13 | type UICluster struct { 14 | ID string `yson:"id"` 15 | Name string `yson:"name"` 16 | Proxy string `yson:"proxy"` 17 | ExternalProxy *string `yson:"externalProxy,omitempty"` 18 | Secure bool `yson:"secure"` 19 | Authentication UIAuthenticationType `yson:"authentication"` 20 | Group string `yson:"group"` 21 | Theme string `yson:"theme"` 22 | Environment string `yson:"environment"` 23 | Description string `yson:"description"` 24 | PrimaryMaster UIPrimaryMaster `yson:"primaryMaster"` 25 | } 26 | 27 | type UIClusters struct { 28 | Clusters []UICluster `yson:"clusters"` 29 | } 30 | 31 | func getUIClusterCarcass() UICluster { 32 | return UICluster{ 33 | Secure: false, 34 | Authentication: uiAuthenticationBasic, 35 | Group: "My YTsaurus clusters", 36 | Description: "My first YTsaurus. Handle with care.", 37 | } 38 | } 39 | 40 | type UICustomSettings struct { 41 | DirectDownload *bool `yson:"directDownload,omitempty"` 42 | } 43 | 44 | type UICustom struct { 45 | OdinBaseUrl *string `yson:"odinBaseUrl,omitempty"` 46 | Settings *UICustomSettings `yson:"uiSettings,omitempty"` 47 | } 48 | -------------------------------------------------------------------------------- /test/e2e/checks_test.go: -------------------------------------------------------------------------------- 1 | package controllers_test 2 | 3 | func getInitializingStageJobNames() []string { 4 | return []string{ 5 | "yt-master-init-job-default", 6 | "yt-client-init-job-user", 7 | "yt-scheduler-init-job-user", 8 | "yt-scheduler-init-job-op-archive", 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /test/e2e/watcher_test.go: -------------------------------------------------------------------------------- 1 | package controllers_test 2 | 3 | import ( 4 | "context" 5 | 6 | . "github.com/onsi/gomega" 7 | batchv1 "k8s.io/api/batch/v1" 8 | 9 | "k8s.io/apimachinery/pkg/watch" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | 12 | "github.com/ytsaurus/ytsaurus-k8s-operator/pkg/testutil" 13 | ) 14 | 15 | type NamespaceWatcher struct { 16 | kubeWatcher watch.Interface 17 | stopCh chan struct{} 18 | events []watch.Event 19 | } 20 | 21 | func NewNamespaceWatcher(ctx context.Context, namespace string) *NamespaceWatcher { 22 | watcher, err := testutil.NewCombinedKubeWatcher(ctx, k8sClient, namespace, []client.ObjectList{ 23 | &batchv1.JobList{}, 24 | }) 25 | Expect(err).ToNot(HaveOccurred()) 26 | return &NamespaceWatcher{ 27 | kubeWatcher: watcher, 28 | stopCh: make(chan struct{}), 29 | } 30 | } 31 | 32 | func (w *NamespaceWatcher) Start() { 33 | go w.loop() 34 | } 35 | 36 | func (w *NamespaceWatcher) Stop() { 37 | close(w.stopCh) 38 | w.kubeWatcher.Stop() 39 | } 40 | 41 | func (w *NamespaceWatcher) GetRawEvents() []watch.Event { 42 | return w.events 43 | } 44 | 45 | // TODO: not really generic, but good enough for the start. 46 | func (w *NamespaceWatcher) GetCompletedJobNames() []string { 47 | var result []string 48 | for _, ev := range w.events { 49 | if job, ok := ev.Object.(*batchv1.Job); ok { 50 | if job.Status.Succeeded == 0 { 51 | continue 52 | } 53 | result = append(result, job.Name) 54 | } 55 | } 56 | return result 57 | } 58 | 59 | func (w *NamespaceWatcher) loop() { 60 | for { 61 | select { 62 | case <-w.stopCh: 63 | return 64 | case ev := <-w.kubeWatcher.ResultChan(): 65 | w.events = append(w.events, ev) 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /ytop-chart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /ytop-chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ytop-chart 3 | description: A Helm chart for Kubernetes 4 | type: application 5 | version: "0.0.0-alpha" 6 | appVersion: "0.0.0-alpha" 7 | sources: 8 | - https://github.com/ytsaurus/ytsaurus-k8s-operator 9 | -------------------------------------------------------------------------------- /ytop-chart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "ytop-chart.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "ytop-chart.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "ytop-chart.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "ytop-chart.labels" -}} 37 | helm.sh/chart: {{ include "ytop-chart.chart" . }} 38 | {{ include "ytop-chart.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "ytop-chart.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "ytop-chart.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "ytop-chart.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "ytop-chart.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /ytop-chart/templates/leader-election-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: {{ include "ytop-chart.fullname" . }}-leader-election-role 5 | labels: 6 | {{- include "ytop-chart.labels" . | nindent 4 }} 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - configmaps 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | - create 17 | - update 18 | - patch 19 | - delete 20 | - apiGroups: 21 | - coordination.k8s.io 22 | resources: 23 | - leases 24 | verbs: 25 | - get 26 | - list 27 | - watch 28 | - create 29 | - update 30 | - patch 31 | - delete 32 | - apiGroups: 33 | - "" 34 | resources: 35 | - events 36 | verbs: 37 | - create 38 | - patch 39 | --- 40 | apiVersion: rbac.authorization.k8s.io/v1 41 | kind: RoleBinding 42 | metadata: 43 | name: {{ include "ytop-chart.fullname" . }}-leader-election-rolebinding 44 | labels: 45 | {{- include "ytop-chart.labels" . | nindent 4 }} 46 | roleRef: 47 | apiGroup: rbac.authorization.k8s.io 48 | kind: Role 49 | name: '{{ include "ytop-chart.fullname" . }}-leader-election-role' 50 | subjects: 51 | - kind: ServiceAccount 52 | name: '{{ include "ytop-chart.fullname" . }}-controller-manager' 53 | namespace: '{{ .Release.Namespace }}' -------------------------------------------------------------------------------- /ytop-chart/templates/manager-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ include "ytop-chart.fullname" . }}-manager-config 5 | labels: 6 | {{- include "ytop-chart.labels" . | nindent 4 }} 7 | data: 8 | controller_manager_config.yaml: {{ .Values.managerConfig.controllerManagerConfigYaml 9 | | toYaml | indent 1 }} -------------------------------------------------------------------------------- /ytop-chart/templates/metrics-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: {{ include "ytop-chart.fullname" . }}-metrics-cert 5 | labels: 6 | {{- include "ytop-chart.labels" . | nindent 4 }} 7 | spec: 8 | dnsNames: 9 | - '{{ include "ytop-chart.fullname" . }}-controller-manager-metrics-service.{{ .Release.Namespace 10 | }}.svc' 11 | - '{{ include "ytop-chart.fullname" . }}-controller-manager-metrics-service.{{ .Release.Namespace 12 | }}.svc.{{ .Values.kubernetesClusterDomain }}' 13 | issuerRef: 14 | kind: Issuer 15 | name: '{{ include "ytop-chart.fullname" . }}-selfsigned-issuer' 16 | secretName: yt-operator-metrics-cert -------------------------------------------------------------------------------- /ytop-chart/templates/metrics-reader-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: {{ include "ytop-chart.fullname" . }}-metrics-reader 5 | labels: 6 | {{- include "ytop-chart.labels" . | nindent 4 }} 7 | rules: 8 | - nonResourceURLs: 9 | - /metrics 10 | verbs: 11 | - get -------------------------------------------------------------------------------- /ytop-chart/templates/metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "ytop-chart.fullname" . }}-controller-manager-metrics-service 5 | labels: 6 | control-plane: controller-manager 7 | {{- include "ytop-chart.labels" . | nindent 4 }} 8 | spec: 9 | type: {{ .Values.metricsService.type }} 10 | selector: 11 | control-plane: controller-manager 12 | {{- include "ytop-chart.selectorLabels" . | nindent 4 }} 13 | ports: 14 | {{- .Values.metricsService.ports | toYaml | nindent 2 }} -------------------------------------------------------------------------------- /ytop-chart/templates/proxy-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: {{ include "ytop-chart.fullname" . }}-proxy-role 5 | labels: 6 | {{- include "ytop-chart.labels" . | nindent 4 }} 7 | rules: 8 | - apiGroups: 9 | - authentication.k8s.io 10 | resources: 11 | - tokenreviews 12 | verbs: 13 | - create 14 | - apiGroups: 15 | - authorization.k8s.io 16 | resources: 17 | - subjectaccessreviews 18 | verbs: 19 | - create 20 | --- 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: ClusterRoleBinding 23 | metadata: 24 | name: {{ include "ytop-chart.fullname" . }}-proxy-rolebinding 25 | labels: 26 | {{- include "ytop-chart.labels" . | nindent 4 }} 27 | roleRef: 28 | apiGroup: rbac.authorization.k8s.io 29 | kind: ClusterRole 30 | name: '{{ include "ytop-chart.fullname" . }}-proxy-role' 31 | subjects: 32 | - kind: ServiceAccount 33 | name: '{{ include "ytop-chart.fullname" . }}-controller-manager' 34 | namespace: '{{ .Release.Namespace }}' -------------------------------------------------------------------------------- /ytop-chart/templates/selfsigned-issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Issuer 3 | metadata: 4 | name: {{ include "ytop-chart.fullname" . }}-selfsigned-issuer 5 | labels: 6 | {{- include "ytop-chart.labels" . | nindent 4 }} 7 | spec: 8 | selfSigned: {} -------------------------------------------------------------------------------- /ytop-chart/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: {{ include "ytop-chart.fullname" . }}-controller-manager 5 | labels: 6 | {{- include "ytop-chart.labels" . | nindent 4 }} 7 | annotations: 8 | {{- toYaml .Values.controllerManager.serviceAccount.annotations | nindent 4 }} -------------------------------------------------------------------------------- /ytop-chart/templates/webhook-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: {{ include "ytop-chart.fullname" . }}-webhook-cert 5 | labels: 6 | {{- include "ytop-chart.labels" . | nindent 4 }} 7 | spec: 8 | dnsNames: 9 | - '{{ include "ytop-chart.fullname" . }}-webhook-service.{{ .Release.Namespace }}.svc' 10 | - '{{ include "ytop-chart.fullname" . }}-webhook-service.{{ .Release.Namespace }}.svc.{{ 11 | .Values.kubernetesClusterDomain }}' 12 | issuerRef: 13 | kind: Issuer 14 | name: '{{ include "ytop-chart.fullname" . }}-selfsigned-issuer' 15 | secretName: yt-operator-webhook-cert -------------------------------------------------------------------------------- /ytop-chart/templates/webhook-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "ytop-chart.fullname" . }}-webhook-service 5 | labels: 6 | app.kubernetes.io/component: webhook 7 | app.kubernetes.io/created-by: ytsaurus-k8s-operator 8 | app.kubernetes.io/part-of: ytsaurus-k8s-operator 9 | {{- include "ytop-chart.labels" . | nindent 4 }} 10 | spec: 11 | type: {{ .Values.webhookService.type }} 12 | selector: 13 | control-plane: controller-manager 14 | {{- include "ytop-chart.selectorLabels" . | nindent 4 }} 15 | ports: 16 | {{- .Values.webhookService.ports | toYaml | nindent 2 }} --------------------------------------------------------------------------------