├── .devcontainer ├── devcontainer.json └── devenv │ └── Dockerfile ├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── dev.yml │ ├── nightly.yml │ └── rust.yml ├── .gitignore ├── CODEOWNERS ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── Dockerfile.e2e ├── LICENSE.txt ├── Makefile ├── README-old.md ├── README.md ├── charts ├── .dockerignore ├── .gitignore ├── Dockerfile.tmpl ├── README.md ├── healthscope │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── deployment.yaml │ │ ├── rolebinding.yaml │ │ └── service.yaml │ └── values.yaml ├── porter.yaml ├── rudr │ ├── .helmignore │ ├── Chart.yaml │ ├── crds │ │ ├── appconfigs.yaml │ │ ├── componentinstances.yaml │ │ ├── componentschematics.yaml │ │ ├── healthscope.yaml │ │ ├── scopes.yaml │ │ ├── traits.yaml │ │ └── workloadtypes.yaml │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── deployment.yaml │ │ ├── rolebinding.yaml │ │ ├── scopes.yaml │ │ ├── traits.yaml │ │ └── workloadtypes.yaml │ └── values.yaml └── waitForCRDs.sh ├── docs ├── README.md ├── concepts │ ├── application-configuration.md │ ├── component-schematic.md │ ├── scopes.md │ ├── traits.md │ └── workloads.md ├── developer │ ├── debug.md │ └── writing_a_trait.md ├── faq.md ├── how-to │ ├── app │ │ ├── Dockerfile │ │ └── app.py │ ├── create_component_from_scratch.md │ ├── migrating.md │ └── using_helm_kustomize_manage_oam.md ├── media │ ├── app-operator-role.png │ ├── appconfigcomic.PNG │ ├── application.png │ ├── appscopecomic.PNG │ ├── component.png │ ├── componentcomic.PNG │ ├── developer-role.png │ ├── infra-operator-role.png │ ├── k8s_application_complexities.png │ ├── rudr-how-it-works.png │ ├── rudr-logo.png │ ├── runtime.png │ ├── scopes.png │ ├── traitcomic.PNG │ └── traits.png ├── setup │ ├── appendix.md │ ├── install.md │ └── install_windows.md └── tutorials │ ├── deploy_and_update.md │ ├── deploy_openfaas_workload.md │ └── deploy_prometheus_workload.md ├── examples ├── README.md ├── autoscaler.yaml ├── charts │ ├── README.md │ └── hello-rudr │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── appconfig.yaml │ │ └── component.yaml │ │ └── values.yaml ├── components.yaml ├── env-vars.yaml ├── first-app-config.yaml ├── health-scope-config.yaml ├── helloworld-python-component.yaml ├── image-pull-secret.yaml ├── kustomize │ └── hello-rudr │ │ ├── base │ │ ├── appconfig.yaml │ │ ├── component.yaml │ │ └── kustomization.yaml │ │ └── overlay │ │ └── production │ │ ├── kustomization.yaml │ │ └── patch.yaml ├── manual-scaler.yaml ├── multi-component.yaml ├── multi-server.yaml ├── nginx-component.yaml ├── openfaasapp.yaml ├── prometheusapp.yaml ├── replicable-task.yaml ├── singleton-nginx-config.yaml ├── task.yaml ├── volumes.yaml ├── voting │ ├── README.md │ ├── components.yaml │ └── configuration.yaml └── worker.yaml ├── governance.md ├── healthscope ├── Cargo.toml ├── README.md └── src │ ├── lib.rs │ └── main.rs └── src ├── instigator.rs ├── instigator_test.rs ├── kube_event.rs ├── lib.rs ├── lifecycle.rs ├── lifecycle_test.rs ├── main.rs ├── schematic.rs ├── schematic ├── component.rs ├── component_instance.rs ├── component_test.rs ├── configuration.rs ├── configuration_test.rs ├── parameter.rs ├── parameter_test.rs ├── scopes.rs ├── scopes │ ├── health.rs │ └── network.rs ├── traits.rs ├── traits │ ├── autoscaler.rs │ ├── autoscaler_test.rs │ ├── empty.rs │ ├── ingress.rs │ ├── ingress_test.rs │ ├── manual_scaler.rs │ ├── manual_scaler_test.rs │ ├── util.rs │ └── volume_mounter.rs ├── traits_test.rs └── variable.rs ├── trait_manager.rs ├── workload_type.rs ├── workload_type ├── extended_workload.rs ├── extended_workload │ ├── openfaas.rs │ └── others.rs ├── server.rs ├── statefulset_builder.rs ├── task.rs ├── worker.rs └── workload_builder.rs └── workload_type_test.rs /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // See https://aka.ms/vscode-remote/devcontainer.json for format details. 2 | { 3 | "name": "rudr dev env", 4 | 5 | "image": "oamdev/rudr-dev", 6 | 7 | "service": "development", 8 | 9 | // Uncomment the next line if you want to publish any ports. 10 | //"appPort": [8080], 11 | 12 | // Uncomment the next line if you want to add in default container specific settings.json values 13 | // "settings": { "workbench.colorTheme": "Quiet Light" }, 14 | 15 | // Uncomment the next line to run commands after the container is created. 16 | // "postCreateCommand": "yarn install", 17 | 18 | "runArgs": ["-v","//var/run/docker.sock:/var/run/docker.sock"], 19 | 20 | // "extensions": [ 21 | // "dbaeumer.vscode-eslint", 22 | // "ms-vscode.vscode-typescript-tslint-plugin" 23 | // ], 24 | 25 | "remote.extensionKind": { 26 | "ms-azuretools.vscode-docker": "workspace" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /.devcontainer/devenv/Dockerfile: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------------------------------------- 2 | # Copyright (c) Microsoft Corporation. All rights reserved. 3 | # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. 4 | #------------------------------------------------------------------------------------------------------------- 5 | 6 | FROM ubuntu:18.04 7 | 8 | # Avoid warnings by switching to noninteractive 9 | ENV DEBIAN_FRONTEND=noninteractive 10 | 11 | # Install prereqs 12 | RUN apt-get update && apt-get install -y apt-transport-https curl wget gnupg2 git procps 13 | 14 | # Install k8s 15 | RUN curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \ 16 | && echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | tee -a /etc/apt/sources.list.d/kubernetes.list \ 17 | && apt-get update \ 18 | && apt-get install -y kubectl 19 | 20 | # Install Helm 3 21 | RUN wget https://get.helm.sh/helm-v3.0.0-beta.5-linux-amd64.tar.gz \ 22 | && tar -zxvf helm-v3.0.0-beta.5-linux-amd64.tar.gz \ 23 | && mv linux-amd64/helm /usr/local/bin/helm 24 | 25 | # Install rust 26 | RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \ 27 | && apt install build-essential libssl-dev pkg-config -y \ 28 | && echo 'export PATH="$HOME/.cargo/bin:$PATH"' >> ~/.bashrc 29 | 30 | # Switch back to dialog for any ad-hoc use of apt-get 31 | ENV DEBIAN_FRONTEND=dialog 32 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target/ 2 | _scratch/ -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: 'issue title' 5 | labels: 'Type: Bug' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | Output of helm version: 12 | 13 | Output of kubectl version: 14 | 15 | Cloud Provider/Platform (AKS, GKE, Minikube etc.): 16 | 17 | 18 | 19 | 28 | 29 | **Describe the bug** 30 | A clear and concise description of what the bug is. 31 | 32 | **OAM yaml files used** 33 | 34 | **What happened**: 35 | 36 | 39 | 40 | **What you expected to happen**: 41 | 42 | **Relevant screenshots**: 43 | 44 | 45 | **How to reproduce it** (as minimally and precisely as possible): 46 | 47 | 49 | 50 | **Anything else we need to know**: 51 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: 'issue title' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 18 | 19 | **OAM Spec Info** 20 | 21 | Rudr will always follow OAM spec, if this feature is related with spec definition, please make sure they are consistent. 22 | 23 | **Is your feature request related to a problem? Please describe.** 24 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 25 | 26 | **Describe the solution you'd like** 27 | A clear and concise description of what you want to happen. 28 | 29 | **Describe alternatives you've considered** 30 | A clear and concise description of any alternative solutions or features you've considered. 31 | 32 | **Additional context** 33 | Add any other context or screenshots about the feature request here. -------------------------------------------------------------------------------- /.github/workflows/dev.yml: -------------------------------------------------------------------------------- 1 | name: Dev 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v1 11 | - name: Build Container Images for dev environment 12 | run: make docker-build-dev 13 | - name: Publish Container Images 14 | env: 15 | hydraoss_secret: ${{ secrets.hydraoss }} 16 | run: make docker-publish-dev -------------------------------------------------------------------------------- /.github/workflows/nightly.yml: -------------------------------------------------------------------------------- 1 | name: Nightly 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - uses: actions/checkout@v1 13 | - name: Build Container Images for amd64 14 | run: make docker-build-amd64 15 | - name: Build Container Images for arm64 16 | run: make docker-build-arm64 17 | - name: Publish Container Images 18 | env: 19 | hydraoss_secret: ${{ secrets.hydraoss }} 20 | run: make docker-publish 21 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v1 11 | - name: Env check 12 | run: rustc --version 13 | - name: Build 14 | run: cargo build --verbose 15 | - name: Run tests 16 | run: cargo test --verbose 17 | - uses: engineerd/setup-kind@v0.1.0 18 | - uses: engineerd/configurator@v0.0.1 19 | with: 20 | name: helm 21 | url: https://get.helm.sh/helm-v3.0.0-beta.3-linux-amd64.tar.gz 22 | pathInArchive: linux-amd64/helm 23 | - name: Kubernetes E2E 24 | run: | 25 | export KUBECONFIG="$(kind get kubeconfig-path)" 26 | kubectl cluster-info 27 | kubectl get pods -n kube-system 28 | kubectl wait -n kube-system --for=condition=Ready -l k8s-app=kube-dns pods 29 | make kind-e2e 30 | - name: Style check 31 | run: cargo clippy 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | 4 | /vendor 5 | 6 | # editor 7 | /.vscode 8 | /.idea/ 9 | *.iml 10 | 11 | /healthscope/target -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | CODEOWNERS @technosophos @fibonacci1729 @suhuruli @wonderflow @hongchaodeng 2 | LICENSE @technosophos @fibonacci1729 @suhuruli @wonderflow @hongchaodeng -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rudr" 3 | version = "0.1.0" 4 | authors = ["Matt Butcher ", "Jianbo Sun "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | kube = { version = "0.16.1", features = ["openapi"] } 9 | k8s-openapi = { version = "0.5.1", features = ["v1_15"] } 10 | serde = "1.0" 11 | serde_derive = "1.0" 12 | serde_json = "1.0" 13 | failure = "0.1.5" 14 | spectral = "0.6" 15 | reqwest = "0.9" 16 | log = "0.4" 17 | env_logger = "0.6.1" 18 | hyper = "0.12" 19 | clap = "~2.33" 20 | regex = "1.0" 21 | lazy_static = "1.4.0" 22 | chrono = { version = "0.4", features = ["serde"] } 23 | 24 | [workspace] 25 | members = [ 26 | "healthscope", 27 | ] -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILDER_IMAGE=rust:1.38 2 | ARG BASE_IMAGE=debian:buster-slim 3 | ARG PACKAGE_NAME=rudr 4 | FROM ${BUILDER_IMAGE} AS builder 5 | WORKDIR /usr/src/rudr 6 | RUN mkdir healthscope 7 | 8 | COPY Cargo.toml . 9 | COPY Cargo.lock . 10 | COPY healthscope/Cargo.toml ./healthscope/ 11 | 12 | # Create dummy files to build the dependencies, cargo won't build without src/main.rs and src/lib.rs 13 | # then remove Rudr fingerprint for following rebuild 14 | RUN mkdir -p ./src/ && \ 15 | echo 'fn main() {}' > ./src/main.rs && \ 16 | echo '' > ./src/lib.rs && \ 17 | mkdir -p ./healthscope/src/ && \ 18 | echo 'fn main() {}' > ./healthscope/src/main.rs && \ 19 | echo '' > ./healthscope/src/lib.rs 20 | RUN cargo build --release && \ 21 | cargo build --package healthscope --release && \ 22 | rm -rf ./target/release/.fingerprint/rudr-* && \ 23 | rm -rf ./target/release/.fingerprint/healthscope-* 24 | 25 | # Build real binaries now 26 | COPY ./src ./src 27 | COPY ./healthscope/src ./healthscope/src 28 | RUN cargo build --release 29 | RUN cargo build --release --package healthscope 30 | 31 | FROM ${BASE_IMAGE} 32 | RUN apt-get update && apt-get install -y pkg-config libssl-dev openssl && rm -rf /var/lib/apt/lists/* 33 | WORKDIR /usr/app 34 | ARG PACKAGE_NAME 35 | COPY --from=builder /usr/src/rudr/target/release/${PACKAGE_NAME} . 36 | ENV RUST_LOG ${PACKAGE_NAME}=info 37 | RUN echo "./${PACKAGE_NAME}" > entrypoint.sh 38 | RUN chmod 0755 entrypoint.sh 39 | ENTRYPOINT ["/bin/sh", "./entrypoint.sh"] 40 | -------------------------------------------------------------------------------- /Dockerfile.e2e: -------------------------------------------------------------------------------- 1 | FROM debian:buster-slim 2 | WORKDIR /usr/app 3 | RUN apt-get update && apt-get install -y pkg-config libssl-dev openssl && rm -rf /var/lib/apt/lists/* 4 | COPY debug/rudr . 5 | ENV RUST_LOG rudr=info 6 | CMD ["./rudr"] -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright © Microsoft Corporation. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | REPO = oamdev/rudr 2 | HEALTHREPO = oamdev/healthscope 3 | TAG ?= latest 4 | ARCHS := amd64 arm64 5 | LOG_LEVEL := rudr=debug,kube=info 6 | 7 | .PHONY: build 8 | build: 9 | cargo build 10 | 11 | foo: 12 | echo $(PWD) 13 | 14 | .PHONY: build-linux 15 | build-linux: docker-build-amd64 16 | build-linux: 17 | docker run -it --rm -v $(PWD)/_target:/dest $(REPO):$(TAG) cp /usr/app/rudr /dest/rudr-linux-x86_64 18 | 19 | .PHONY: test 20 | test: 21 | cargo test 22 | cargo clippy 23 | cargo test --package healthscope 24 | cargo clippy --package healthscope 25 | 26 | .PHONY: run 27 | run: 28 | RUST_LOG="$(LOG_LEVEL)" RUST_BACKTRACE=short cargo run 29 | 30 | .PHONY: healthscoperun 31 | healthscoperun: 32 | RUST_LOG="healthscope=debug" RUST_BACKTRACE=short cargo run --package healthscope 33 | 34 | GIT_VERSION = $(shell git describe --always --abbrev=7 --dirty) 35 | .PHONY: kind-e2e 36 | kind-e2e: 37 | make build && \ 38 | docker build -t $(REPO):$(GIT_VERSION) -f Dockerfile.e2e target/ && \ 39 | kind load docker-image $(REPO):$(GIT_VERSION) \ 40 | || { echo >&2 "kind not installed or error loading image: $(REPO):$(GIT_VERSION)"; exit 1; } && \ 41 | helm version && \ 42 | helm install rudr ./charts/rudr --set image.repository=$(REPO) --set image.tag=$(GIT_VERSION) --set image.pullPolicy=IfNotPresent --wait \ 43 | || { echo >&2 "helm install timeout"; kubectl logs `kubectl get pods -l "app.kubernetes.io/name=rudr,app.kubernetes.io/instance=rudr" -o jsonpath="{.items[0].metadata.name}"`; exit 1; } && \ 44 | kubectl get trait && \ 45 | kubectl apply -f examples/components.yaml && \ 46 | kubectl get componentschematics && \ 47 | kubectl get componentschematic alpine-task-v1 -o yaml 48 | 49 | 50 | .PHONY: docker-build-cx 51 | docker-build-cx: $(addprefix docker-build-, $(ARCHS)) 52 | 53 | .PHONY: docker-build-arm64 54 | docker-build-arm64: 55 | docker run --rm --privileged multiarch/qemu-user-static --reset -p yes 56 | docker build -t $(REPO)-arm64:$(TAG) --build-arg BUILDER_IMAGE=arm64v8/rust:1.38 --build-arg BASE_IMAGE=arm64v8/debian:buster-slim . 57 | docker build -t $(HEALTHREPO)-arm64:$(TAG) --build-arg BUILDER_IMAGE=arm64v8/rust:1.38 --build-arg BASE_IMAGE=arm64v8/debian:buster-slim --build-arg PACKAGE_NAME=healthscope . 58 | 59 | .PHONY: docker-build-amd64 60 | docker-build-amd64: 61 | docker build -t $(REPO)-amd64:$(TAG) . 62 | docker build -t $(HEALTHREPO)-amd64:$(TAG) --build-arg PACKAGE_NAME=healthscope . 63 | 64 | .PHONY: docker-publish 65 | docker-publish: docker-build-cx 66 | docker login -u hydraoss -p ${hydraoss_secret} 67 | docker push $(REPO)-amd64:$(TAG) 68 | docker push $(HEALTHREPO)-amd64:$(TAG) 69 | docker push $(REPO)-arm64:$(TAG) 70 | docker push $(HEALTHREPO)-arm64:$(TAG) 71 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(REPO):$(TAG) $(REPO)-amd64:$(TAG) $(REPO)-arm64:$(TAG) 72 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(REPO):$(TAG) 73 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(HEALTHREPO):$(TAG) $(HEALTHREPO)-amd64:$(TAG) $(HEALTHREPO)-arm64:$(TAG) 74 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(HEALTHREPO):$(TAG) 75 | 76 | # Temporary while we get the ARM64 build time sorted. 77 | .PHONY: docker-publish-amd64 78 | docker-publish-amd64: 79 | docker push $(REPO)-amd64:$(TAG) 80 | docker push $(HEALTHREPO)-amd64:$(TAG) 81 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(REPO):$(TAG) $(REPO)-amd64:$(TAG) 82 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(REPO):$(TAG) 83 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create $(HEALTHREPO):$(TAG) $(HEALTHREPO)-amd64:$(TAG) 84 | DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push $(HEALTHREPO):$(TAG) 85 | 86 | .PHONY: docker-build-dev 87 | docker-build-dev: 88 | docker build -t $(REPO)-dev:$(TAG) .devcontainer/devenv/ 89 | 90 | .PHONY: docker-push-dev 91 | docker-publish-dev: 92 | docker login -u hydraoss -p ${hydraoss_secret} 93 | docker push $(REPO)-dev:$(TAG) -------------------------------------------------------------------------------- /README-old.md: -------------------------------------------------------------------------------- 1 | # Rudr: A Kubernetes Implementation of the Open Application Model 2 | 3 | ![](https://github.com/oam-dev/rudr/workflows/Rust/badge.svg) 4 | 5 | Rudr is an implementation of the [Open Application Model (OAM) 1.0.0-alpha1](https://github.com/oam-dev/spec/releases/tag/v1.0.0-alpha.1) for Kubernetes. 6 | 7 | ***Note: Rudr is a reference implementation for the initial working draft of the OAM specification. It does not reflect the most recent version of the OAM specification.*** 8 | 9 | ## Why Rudr? 10 | 11 | Kubernetes API resources focused on container infrastructure rather than the applications per se. Yet, application developers think in terms of application architecture, not of infrastructure. 12 | 13 | **Rudr provides application level primitives for Kubernetes that enable:** 14 | 15 | - The ability to define application (e.g., WordPress) in Kubernetes. 16 | - The ability to define operational capability (e.g., auto-scaling policy, rather than HPA) in Kubernetes. 17 | - A portable and self-descriptive application description which includes every dependency and operational ability the application requires to run. 18 | - Building an application centric abstraction atop container infrastructure. 19 | 20 | **Rudr can be used by:** 21 | 22 | - Developers who want to describe application from developer's view, rather than learning low level primitives. 23 | - Operators who want to focus on strategies of operating the application, rather than infrastructure details. 24 | - Kubernetes engineers who want to define "application" in Kubernetes, or expose application level API to developers and operators, rather than full Kubernetes API. 25 | - PaaS engineers who want to build a serverless application platform atop Kubernetes, with minimal effort. 26 | - Software distributors who want to "define once, deploy everywhere", regardless of the differences of Kubernetes providers on multi-cloud. 27 | 28 | ## Get started 29 | 30 | Define and deploy a [helloworld-python](./docs/how-to/create_component_from_scratch.md) application with Rudr. 31 | 32 | ## How does Rudr work? 33 | 34 | ![rudr arch](./docs/media/rudr-how-it-works.png) 35 | 36 | Rudr defines [OAM primitives](https://github.com/oam-dev/spec/blob/master/2.overview_and_terminology.md) as Kubernetes Custom Resource Definitions (CRDs). Hence, Rudr is able to provide OAM style application level APIs including [Components](./docs/concepts/component-schematic.md) for developers to define applications, and [Traits](./docs/concepts/traits.md) for operators to define operational capabilities. Meanwhile, infra operators still work on Kubernetes itself. 37 | 38 | Rudr controllers will maintain the mapping between OAM CRDs (e.g., Component) and Kubernetes API resources (e.g., Deployment). 39 | 40 | ## Try more things out yourself 41 | 42 | Read the [documentation list](./docs/README.md) for more options. Some highlights: 43 | - [Getting started tutorials](https://github.com/oam-dev/rudr/tree/master/docs#get-started) 44 | - [Learn Open Application Model concepts in Kubernetes](https://github.com/oam-dev/rudr/tree/master/docs#concepts) 45 | - [Learn advanced How-To topics](https://github.com/oam-dev/rudr/tree/master/docs#how-tos) 46 | - For example, use Rudr with Helm and Kustmoize and migrate existing Kubernetes resources to Rudr. 47 | - [Install and play with more workload types](https://github.com/oam-dev/rudr/tree/master/docs#extended-workloads) 48 | - [Developing Rudr](https://github.com/oam-dev/rudr/tree/master/docs#extended-workloads) 49 | - [FAQ](https://github.com/oam-dev/rudr/blob/master/docs/faq.md) 50 | 51 | ## More samples and demos 52 | 53 | - [OAM samples repository](https://github.com/oam-dev/samples) 54 | 55 | ## Roadmap 56 | 57 | Currently, Rudr relies on pre-installed workload types and traits to accomplish the task. In next release, Rudr will provide a plugin mechanism to integrate any Kubernetes Operator as workload type or operational capability. The goal is to allow users to assemble Operators ecosystem as a serverless application platform by "one click". 58 | 59 | ## Contributing 60 | 61 | This project welcomes contributions and suggestions. See [CONTRIBUTING.md](CONTRIBUTING.md) for more details. Below are links to join the bi-weekly community meetings and our meeting notes. Community Slack channels & mailing lists will be added shortly (~ 10/1). 62 | 63 | | Item | Value | 64 | |---------------------|---| 65 | | Mailing List | [oam-dev@@googlegroups.com](https://groups.google.com/forum/#!forum/oam-dev) | 66 | | Meeting Information | [Bi-weekly (Starting Oct 22, 2019), Tuesdays 10:30AM PST](https://calendar.google.com/calendar?cid=dDk5YThyNGIwOWJyYTJxajNlbWI0a2FvdGtAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ) | 67 | | | [Bi-weekly APAC (Starting Dec 24, 2019), Tuesdays 1:00PM GMT+8](https://calendar.google.com/event?action=TEMPLATE&tmeid=MzJnbHR2b3R1bHYxMG0wc2YybDJjZmhuc2pfMjAxOTEyMjRUMDUwMDAwWiBmZW5namluZ2NoYW9AbQ&tmsrc=fengjingchao%40gmail.com&scp=ALL)| 68 | | Meeting Link | https://zoom.us/j/271516061 | 69 | | IM Channel | https://gitter.im/oam-dev/ | 70 | | Meeting Notes | [Notes](https://docs.google.com/document/d/1nqdFEyULekyksFHtFvgvFAYE-0AMHKoS3RMnaKsarjs/edit?usp=sharing) | 71 | | Twitter | [@oam_dev](https://twitter.com/oam_dev) | 72 | 73 | ## Governance 74 | 75 | This project follows governance structure of numerous other open source projects. See [governance.md](governance.md) for more details. 76 | 77 | ## License 78 | 79 | This project is available under the terms of the MIT license. See [LICENSE.txt](LICENSE.txt). 80 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rudr 2 | 3 | :rotating_light: **NOTE: Rudr is 4 | [deprecated](https://github.com/oam-dev/rudr/issues/559) in favor of the upcoming [open application platform](https://github.com/oam-dev/kubevela) project as its successor. There are no plans to produce future releases of this project.** :rotating_light: 5 | 6 | **The new project will come to its first release at Q4 2020. Stay tuned!** 7 | 8 | The original README can be found [here](./README-old.md) for historical 9 | purposes. 10 | -------------------------------------------------------------------------------- /charts/.dockerignore: -------------------------------------------------------------------------------- 1 | # See https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Put files here that you don't want copied into your bundle's invocation image 3 | .gitignore 4 | Dockerfile.tmpl 5 | -------------------------------------------------------------------------------- /charts/.gitignore: -------------------------------------------------------------------------------- 1 | Dockerfile 2 | .cnab/ 3 | -------------------------------------------------------------------------------- /charts/Dockerfile.tmpl: -------------------------------------------------------------------------------- 1 | FROM quay.io/deis/lightweight-docker-go:v0.2.0 2 | FROM debian:stretch 3 | COPY --from=0 /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt 4 | 5 | ARG BUNDLE_DIR 6 | 7 | # This is a template Dockerfile for the bundle's invocation image 8 | # You can customize it to use different base images, install tools and copy configuration files. 9 | # 10 | # Porter will use it as a template and append lines to it for the mixins 11 | # and to set the CMD appropriately for the CNAB specification. 12 | # 13 | # Add the following line to porter.yaml to instruct Porter to use this template 14 | # dockerfile: Dockerfile.tmpl 15 | 16 | # Use the BUNDLE_DIR build argument to copy files into the bundle 17 | # COPY . $BUNDLE_DIR 18 | -------------------------------------------------------------------------------- /charts/README.md: -------------------------------------------------------------------------------- 1 | # rudr-install 2 | 3 | This bundle installs the CRDs necessary to install rudr, waits for the CRDs to rebuild the cache, and then installs rudr itself using the helm chart. 4 | 5 | To use, the easiest way will be to install https://porter.sh, and then: 6 | 7 | 1. Navigate and checkout this directory. 8 | 2. `porter build` 9 | 3. `porter creds generate` and select `file path` and enter the path to your .kube/config. 10 | 4. `porter install rudr-install -c rudr-install` 11 | 5. profit. 12 | 13 | # Contents 14 | 15 | ## porter.yaml 16 | 17 | This is the porter manifest. See https://porter.sh/authoring-bundles/ for 18 | details on every field and how to configure your bundle. This is a required 19 | file. 20 | 21 | ## README.md 22 | 23 | This explains the files created by `porter create`. It is not used by porter and 24 | can be deleted. 25 | 26 | ## Dockerfile.tmpl 27 | 28 | This is a template Dockerfile for the bundle's invocation image. You can 29 | customize it to use different base images, install tools and copy configuration 30 | files. Porter will use it as a template and append lines to it for the mixin and to set 31 | the CMD appropriately for the CNAB specification. You can delete this file if you don't 32 | need it. 33 | 34 | Add the following line to **porter.yaml** to enable the Dockerfile template: 35 | 36 | ```yaml 37 | dockerfile: Dockerfile.tmpl 38 | ``` 39 | 40 | By default, the Dockerfile template is disabled and Porter automatically copies 41 | all of the files in the current directory into the bundle's invocation image. When 42 | you use a custom Dockerfile template, you must manually copy files into the bundle 43 | using COPY statements in the Dockerfile template. 44 | 45 | ## .gitignore 46 | 47 | This is a default file that we provide to help remind you which files are 48 | generated by Porter, and shouldn't be committed to source control. You can 49 | delete it if you don't need it. 50 | 51 | ## .dockerignore 52 | 53 | This is a default file that controls which files are copied into the bundle's 54 | invocation image by default. You can delete it if you don't need it. -------------------------------------------------------------------------------- /charts/healthscope/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /charts/healthscope/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: healthscope 3 | description: A HealthScope implementation for OAM 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | version: 0.1.0 18 | 19 | # This is the version number of the application being deployed. This version number should be 20 | # incremented each time you make changes to the application. 21 | appVersion: 1.16.0 22 | -------------------------------------------------------------------------------- /charts/healthscope/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if contains "NodePort" .Values.service.type }} 3 | export NODE_PORT=$(kubectl get -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "healthscope.fullname" . }}) 4 | export NODE_IP=$(kubectl get nodes -o jsonpath="{.items[0].status.addresses[0].address}") 5 | echo http://$NODE_IP:$NODE_PORT 6 | {{- else if contains "ClusterIP" .Values.service.type }} 7 | export POD_NAME=$(kubectl get pods -l "app.kubernetes.io/name={{ template "healthscope.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 8 | echo "Visit http://127.0.0.1:8080 to use your application" 9 | kubectl port-forward $POD_NAME 8080:80 10 | {{- end }} 11 | -------------------------------------------------------------------------------- /charts/healthscope/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "healthscope.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "healthscope.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "healthscope.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "healthscope.labels" -}} 38 | app.kubernetes.io/name: {{ include "healthscope.name" . }} 39 | helm.sh/chart: {{ include "healthscope.chart" . }} 40 | app.kubernetes.io/instance: {{ .Release.Name }} 41 | {{- if .Chart.AppVersion }} 42 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 43 | {{- end }} 44 | app.kubernetes.io/managed-by: {{ .Release.Service }} 45 | {{- end -}} 46 | -------------------------------------------------------------------------------- /charts/healthscope/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "healthscope.fullname" . }} 5 | labels: 6 | {{ include "healthscope.labels" . | indent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount }} 9 | selector: 10 | matchLabels: 11 | app.kubernetes.io/name: {{ include "healthscope.name" . }} 12 | app.kubernetes.io/instance: {{ .Release.Name }} 13 | template: 14 | metadata: 15 | labels: 16 | app.kubernetes.io/name: {{ include "healthscope.name" . }} 17 | app.kubernetes.io/instance: {{ .Release.Name }} 18 | spec: 19 | serviceAccountName: {{ template "healthscope.fullname" . }} 20 | containers: 21 | - name: {{ .Chart.Name }} 22 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 23 | imagePullPolicy: {{ .Values.image.pullPolicy }} 24 | ports: 25 | - name: metrics 26 | containerPort: 8080 27 | protocol: TCP 28 | - name: http 29 | containerPort: 80 30 | protocol: TCP 31 | livenessProbe: 32 | httpGet: 33 | path: /health 34 | port: 8080 35 | readinessProbe: 36 | httpGet: 37 | path: /health 38 | port: 8080 39 | resources: 40 | {{ toYaml .Values.resources | indent 12 }} 41 | {{- with .Values.nodeSelector }} 42 | nodeSelector: 43 | {{ toYaml . | indent 8 }} 44 | {{- end }} 45 | {{- with .Values.affinity }} 46 | affinity: 47 | {{ toYaml . | indent 8 }} 48 | {{- end }} 49 | {{- with .Values.tolerations }} 50 | tolerations: 51 | {{ toYaml . | indent 8 }} 52 | {{- end }} 53 | -------------------------------------------------------------------------------- /charts/healthscope/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "healthscope.fullname" . }} 6 | labels: 7 | {{ include "healthscope.labels" . | indent 4 }} 8 | 9 | {{ if .Values.enableRBAC }} 10 | --- 11 | 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRole 14 | metadata: 15 | name: {{ template "healthscope.fullname" . }} 16 | labels: 17 | {{ include "healthscope.labels" . | indent 4 }} 18 | rules: 19 | - apiGroups: ["", "apps", "batch", "extensions", "core.oam.dev", "apiextensions.k8s.io"] 20 | resources: ["*"] 21 | verbs: ["*"] 22 | 23 | --- 24 | 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: ClusterRoleBinding 27 | metadata: 28 | name: {{ template "healthscope.fullname" . }} 29 | labels: 30 | {{ include "healthscope.labels" . | indent 4 }} 31 | subjects: 32 | - kind: ServiceAccount 33 | name: {{ template "healthscope.fullname" . }} 34 | namespace: {{ .Release.Namespace }} 35 | roleRef: 36 | kind: ClusterRole 37 | name: {{ template "healthscope.fullname" . }} 38 | apiGroup: "" 39 | {{ end }} -------------------------------------------------------------------------------- /charts/healthscope/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "healthscope.fullname" . }} 5 | labels: 6 | {{ include "healthscope.labels" . | indent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | selector: 15 | app.kubernetes.io/name: {{ include "healthscope.name" . }} 16 | app.kubernetes.io/instance: {{ .Release.Name }} 17 | -------------------------------------------------------------------------------- /charts/healthscope/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for healthscope. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: oamdev/healthscope 9 | tag: latest # We're in pre-release 10 | pullPolicy: Always 11 | 12 | nameOverride: "" 13 | fullnameOverride: "" 14 | 15 | service: 16 | type: ClusterIP 17 | port: 80 18 | 19 | # Turning this off will omit the Role and RoleBinding declarations. 20 | enableRBAC: true 21 | 22 | resources: {} 23 | # We usually recommend not to specify default resources and to leave this as a conscious 24 | # choice for the user. This also increases chances charts run on environments with little 25 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 26 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 27 | # limits: 28 | # cpu: 100m 29 | # memory: 128Mi 30 | # requests: 31 | # cpu: 100m 32 | # memory: 128Mi 33 | 34 | nodeSelector: {} 35 | 36 | tolerations: [] 37 | 38 | affinity: {} 39 | -------------------------------------------------------------------------------- /charts/porter.yaml: -------------------------------------------------------------------------------- 1 | # This is the configuration for Porter 2 | # You must define steps for each action, but the rest is optional 3 | # See https://porter.sh/authoring-bundles for documentation on how to configure your bundle 4 | # Uncomment out the sections below to take full advantage of what Porter can do! 5 | 6 | name: rudr-install 7 | version: 0.1.0 8 | description: "An example Porter configuration" 9 | invocationImage: squillace/rudr-install:latest 10 | tag: squillace/rudr-install-cnab:latest 11 | 12 | # Uncomment out the line below to use a template Dockerfile for your invocation image 13 | #dockerfile: Dockerfile.tmpl 14 | 15 | mixins: 16 | - exec 17 | - helm 18 | - kubernetes 19 | 20 | credentials: 21 | - name: KUBE_CONFIG 22 | path: /root/.kube/config 23 | 24 | parameters: 25 | - name: HELM_RELEASE 26 | type: string 27 | default: "rudr" 28 | 29 | install: 30 | - kubernetes: 31 | description: "Creating rudr CRDs..." 32 | manifests: 33 | - rudr/crds/ 34 | wait: true 35 | - exec: 36 | description: "Wait for CRDs to be cached...." 37 | command: "bash" 38 | arguments: 39 | - "waitForCRDs.sh" 40 | - helm: 41 | description: "Deploy rudr using helm...." 42 | name: "{{bundle.parameters.HELM_RELEASE}}" 43 | chart: rudr 44 | replace: true 45 | 46 | upgrade: 47 | - exec: 48 | description: "Not implemented." 49 | command: bash 50 | flags: 51 | c: echo Not implemented. 52 | 53 | uninstall: 54 | - helm: 55 | description: "Removing the rudr-install chart..." 56 | purge: true 57 | releases: 58 | - "{{bundle.parameters.HELM_RELEASE}}" 59 | - exec: 60 | description: "The CRDs remain. Please remove the CRDs manually." 61 | command: bash 62 | flags: 63 | c: echo CRD removal not yet implemented. 64 | # See https://porter.sh/authoring-bundles/#dependencies 65 | #dependencies: 66 | # mysql: 67 | # tag: deislabs/porter-mysql:latest 68 | # parameters: 69 | # database-name: wordpress 70 | 71 | # See https://porter.sh/wiring/#credentials 72 | #credentials: 73 | # - name: kubeconfig 74 | # path: /root/.kube/config 75 | -------------------------------------------------------------------------------- /charts/rudr/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /charts/rudr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: rudr 3 | description: The rudr implementation of OAM 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | version: 0.1.0 18 | 19 | # This is the version number of the application being deployed. This version number should be 20 | # incremented each time you make changes to the application. 21 | appVersion: 0.1.0 22 | -------------------------------------------------------------------------------- /charts/rudr/crds/appconfigs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: applicationconfigurations.core.oam.dev 5 | labels: 6 | app.kubernetes.io/part-of: core.oam.dev 7 | spec: 8 | group: core.oam.dev 9 | versions: 10 | - name: v1alpha1 11 | served: true 12 | storage: true 13 | scope: Namespaced 14 | names: 15 | plural: applicationconfigurations 16 | singular: applicationconfiguration 17 | kind: ApplicationConfiguration 18 | shortNames: 19 | - cfg 20 | - configuration 21 | - configurations 22 | -------------------------------------------------------------------------------- /charts/rudr/crds/componentinstances.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: componentinstances.core.oam.dev 5 | labels: 6 | app.kubernetes.io/part-of: core.oam.dev 7 | spec: 8 | group: core.oam.dev 9 | versions: 10 | - name: v1alpha1 11 | served: true 12 | storage: true 13 | scope: Namespaced 14 | names: 15 | plural: componentinstances 16 | singular: componentinstance 17 | kind: ComponentInstance 18 | shortNames: 19 | - compinst 20 | -------------------------------------------------------------------------------- /charts/rudr/crds/componentschematics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: componentschematics.core.oam.dev 5 | labels: 6 | app.kubernetes.io/part-of: core.oam.dev 7 | spec: 8 | group: core.oam.dev 9 | versions: 10 | - name: v1alpha1 11 | served: true 12 | storage: true 13 | scope: Namespaced 14 | names: 15 | plural: componentschematics 16 | singular: componentschematic 17 | kind: ComponentSchematic 18 | shortNames: 19 | - component 20 | - comp 21 | - components 22 | -------------------------------------------------------------------------------- /charts/rudr/crds/healthscope.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: healthscopes.core.oam.dev 5 | labels: 6 | app.kubernetes.io/part-of: core.oam.dev 7 | spec: 8 | group: core.oam.dev 9 | versions: 10 | - name: v1alpha1 11 | served: true 12 | storage: true 13 | scope: Namespaced 14 | names: 15 | plural: healthscopes 16 | singular: healthscope 17 | kind: HealthScope 18 | shortNames: 19 | - health -------------------------------------------------------------------------------- /charts/rudr/crds/scopes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: applicationscopes.core.oam.dev 5 | labels: 6 | app.kubernetes.io/part-of: core.oam.dev 7 | spec: 8 | group: core.oam.dev 9 | versions: 10 | - name: v1alpha1 11 | served: true 12 | storage: true 13 | scope: Namespaced 14 | names: 15 | plural: applicationscopes 16 | singular: applicationscope 17 | kind: ApplicationScope 18 | shortNames: 19 | - scope -------------------------------------------------------------------------------- /charts/rudr/crds/traits.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: traits.core.oam.dev 5 | labels: 6 | app.kubernetes.io/part-of: core.oam.dev 7 | spec: 8 | group: core.oam.dev 9 | versions: 10 | - name: v1alpha1 11 | served: true 12 | storage: true 13 | scope: Namespaced 14 | names: 15 | plural: traits 16 | singular: trait 17 | kind: Trait 18 | shortNames: 19 | - trait 20 | -------------------------------------------------------------------------------- /charts/rudr/crds/workloadtypes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: workloadtypes.core.oam.dev 5 | labels: 6 | app.kubernetes.io/part-of: core.oam.dev 7 | spec: 8 | group: core.oam.dev 9 | versions: 10 | - name: v1alpha1 11 | served: true 12 | storage: true 13 | scope: Namespaced 14 | names: 15 | plural: workloadtypes 16 | singular: workloadtype 17 | kind: WorkloadType 18 | shortNames: 19 | - workloadtype 20 | -------------------------------------------------------------------------------- /charts/rudr/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Rudr is a Kubernetes controller to manage Configuration CRDs. 2 | 3 | It has been successfully installed. -------------------------------------------------------------------------------- /charts/rudr/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "rudr.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "rudr.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "rudr.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "rudr.labels" -}} 38 | app.kubernetes.io/name: {{ include "rudr.name" . }} 39 | helm.sh/chart: {{ include "rudr.chart" . }} 40 | app.kubernetes.io/instance: {{ .Release.Name }} 41 | {{- if .Chart.AppVersion }} 42 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 43 | {{- end }} 44 | app.kubernetes.io/managed-by: {{ .Release.Service }} 45 | {{- end -}} 46 | -------------------------------------------------------------------------------- /charts/rudr/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "rudr.fullname" . }} 5 | labels: 6 | {{ include "rudr.labels" . | indent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount }} 9 | selector: 10 | matchLabels: 11 | app.kubernetes.io/name: {{ include "rudr.name" . }} 12 | app.kubernetes.io/instance: {{ .Release.Name }} 13 | template: 14 | metadata: 15 | labels: 16 | app.kubernetes.io/name: {{ include "rudr.name" . }} 17 | app.kubernetes.io/instance: {{ .Release.Name }} 18 | spec: 19 | serviceAccountName: {{ template "rudr.fullname" . }} 20 | containers: 21 | - name: {{ .Chart.Name }} 22 | env: 23 | - name: RUST_LOG 24 | value: {{ .Values.logLevel | default "rudr=info" | quote}} 25 | - name: KUBERNETES_NAMESPACE 26 | valueFrom: 27 | fieldRef: 28 | fieldPath: metadata.namespace 29 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 30 | imagePullPolicy: {{ .Values.image.pullPolicy }} 31 | ports: 32 | - name: http 33 | containerPort: 8080 34 | protocol: TCP 35 | livenessProbe: 36 | httpGet: 37 | path: /health 38 | port: 8080 39 | readinessProbe: 40 | httpGet: 41 | path: /health 42 | port: 8080 43 | resources: 44 | {{ toYaml .Values.resources | indent 12 }} 45 | {{- with .Values.nodeSelector }} 46 | nodeSelector: 47 | {{ toYaml . | indent 8 }} 48 | {{- end }} 49 | {{- with .Values.affinity }} 50 | affinity: 51 | {{ toYaml . | indent 8 }} 52 | {{- end }} 53 | {{- with .Values.tolerations }} 54 | tolerations: 55 | {{ toYaml . | indent 8 }} 56 | {{- end }} 57 | -------------------------------------------------------------------------------- /charts/rudr/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "rudr.fullname" . }} 6 | labels: 7 | {{ include "rudr.labels" . | indent 4 }} 8 | 9 | {{ if .Values.enableRBAC }} 10 | --- 11 | 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRole 14 | metadata: 15 | name: {{ template "rudr.fullname" . }} 16 | labels: 17 | {{ include "rudr.labels" . | indent 4 }} 18 | rules: 19 | - apiGroups: ["", "apps", "batch", "extensions", "autoscaling", "core.oam.dev", "apiextensions.k8s.io"] 20 | resources: ["*"] 21 | verbs: ["*"] 22 | 23 | --- 24 | 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: ClusterRoleBinding 27 | metadata: 28 | name: {{ template "rudr.fullname" . }} 29 | labels: 30 | {{ include "rudr.labels" . | indent 4 }} 31 | subjects: 32 | - kind: ServiceAccount 33 | name: {{ template "rudr.fullname" . }} 34 | namespace: {{ .Release.Namespace }} 35 | roleRef: 36 | kind: ClusterRole 37 | name: {{ template "rudr.fullname" . }} 38 | apiGroup: "" 39 | {{ end }} 40 | -------------------------------------------------------------------------------- /charts/rudr/templates/scopes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationScope 3 | metadata: 4 | name: health 5 | annotations: 6 | version: v1.0.0 7 | description: "aggregated health state for a group of components." 8 | spec: 9 | type: core.oam.dev/v1alpha1.HealthScope 10 | allowComponentOverlap: true 11 | parameters: 12 | - name: probe-method 13 | description: The method to probe the components, e.g. 'httpGet'. 14 | type: string 15 | required: true 16 | - name: probe-endpoint 17 | description: The endpoint to probe from the components, e.g. '/v1/health'. 18 | type: string 19 | required: true 20 | - name: probe-timeout 21 | description: The amount of time in seconds to wait when receiving a response before marked failure. 22 | type: integer 23 | required: false 24 | - name: probe-interval 25 | description: The amount of time in seconds between probing tries. 26 | type: integer 27 | required: false 28 | - name: failure-rate-threshold 29 | description: If the rate of failure of total probe results is above this threshold, declared 'failed'. 30 | type: double 31 | required: false 32 | - name: healthy-rate-threshold 33 | description: If the rate of healthy of total probe results is above this threshold, declared 'healthy'. 34 | type: double 35 | required: false 36 | - name: healthThresholdPercentage 37 | description: The % of healthy components required to upgrade scope 38 | type: double 39 | required: false 40 | - name: requiredHealthyComponents 41 | description: Comma-separated list of names of the components required to be healthy for the scope to be health. 42 | type: string 43 | required: false 44 | --- 45 | apiVersion: core.oam.dev/v1alpha1 46 | kind: ApplicationScope 47 | metadata: 48 | name: network 49 | annotations: 50 | version: v1.0.0 51 | description: "network boundary that a group components reside in" 52 | spec: 53 | type: core.oam.dev/v1.NetworkScope 54 | allowComponentOverlap: false 55 | parameters: 56 | - name: network-id 57 | description: The id of the network, e.g. vpc-id, VNet name. 58 | type: string 59 | required: Y 60 | - name: subnet-id 61 | description: The id of the subnet within the network. 62 | type: string 63 | required: Y 64 | - name: internet-gateway-type 65 | description: The type of the gateway, options are 'public', 'nat'. Empty string means no gateway. 66 | type: string 67 | required: N 68 | -------------------------------------------------------------------------------- /charts/rudr/templates/traits.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: Trait 3 | metadata: 4 | name: auto-scaler 5 | annotations: 6 | version: v1.0.0 7 | description: "Allow workload to auto scale by CPU/Memory, this is implemented by the Kubernetes Horizontal Pod Autoscaler." 8 | spec: 9 | appliesTo: 10 | - core.oam.dev/v1alpha1.Server 11 | - core.oam.dev/v1alpha1.Task 12 | properties: | 13 | { 14 | "$schema": "http://json-schema.org/draft-07/schema#", 15 | "type": "object", 16 | "properties": { 17 | "minimum": { 18 | "type": "integer", 19 | "description": "Minumum number of replicas to start.", 20 | "default": 1 21 | }, 22 | "maximum": { 23 | "type": "integer", 24 | "description": "Maximum number of replicas to start.", 25 | "default": 10 26 | }, 27 | "memory": { 28 | "type": "integer", 29 | "description": "The memory consumption threshold (as percent) that will cause a scale event." 30 | }, 31 | "cpu": { 32 | "type": "integer", 33 | "description": "The CPU consumption threshold (as percent) that will cause a scale event" 34 | } 35 | } 36 | } 37 | 38 | --- 39 | apiVersion: core.oam.dev/v1alpha1 40 | kind: Trait 41 | metadata: 42 | name: manual-scaler 43 | annotations: 44 | version: v1.0.0 45 | description: "Allow operators to manually scale a workloads that allow multiple replicas." 46 | spec: 47 | appliesTo: 48 | - core.oam.dev/v1alpha1.Server 49 | - core.oam.dev/v1alpha1.Task 50 | - openfaas.com/v1alpha2.Function 51 | properties: | 52 | { 53 | "$schema": "http://json-schema.org/draft-07/schema#", 54 | "type": "object", 55 | "required": [ 56 | "replicaCount" 57 | ], 58 | "properties": { 59 | "replicaCount": { 60 | "type": "integer", 61 | "description": "the target number of replicas to scale a component to.", 62 | "minimum": 0 63 | } 64 | } 65 | } 66 | 67 | --- 68 | apiVersion: core.oam.dev/v1alpha1 69 | kind: Trait 70 | metadata: 71 | name: ingress 72 | annotations: 73 | version: v1.0.0 74 | description: "Ingress Trait used for components with service workloads and provides load balancing, SSL termination and name-based virtual hosting." 75 | spec: 76 | appliesTo: 77 | - core.oam.dev/v1alpha1.Server 78 | - core.oam.dev/v1alpha1.SingletonServer 79 | properties: | 80 | { 81 | "$schema": "http://json-schema.org/draft-07/schema#", 82 | "type": "object", 83 | "required": [ 84 | "hostname", 85 | "service_port" 86 | ], 87 | "properties": { 88 | "hostname": { 89 | "type": "string", 90 | "description": "Host name for the ingress." 91 | }, 92 | "service_port": { 93 | "type": "integer", 94 | "description": "Port number on the service." 95 | }, 96 | "path": { 97 | "type": "string", 98 | "description": "Path to expose.", 99 | "default": "/" 100 | }, 101 | "tls_hosts": { 102 | "type": "string", 103 | "description": "Host names for TLS certificate." 104 | }, 105 | "tls_secret_name": { 106 | "type": "string", 107 | "description": "TLS's secret name in Kubernetes." 108 | }, 109 | } 110 | } 111 | 112 | --- 113 | apiVersion: core.oam.dev/v1alpha1 114 | kind: Trait 115 | metadata: 116 | name: volume-mounter 117 | annotations: 118 | version: v1.0.0 119 | description: "The volume mounter trait is responsible for attaching a Kubernetes PersistentVolume Claim(PVC) to a component." 120 | spec: 121 | appliesTo: 122 | - core.oam.dev/v1alpha1.Server 123 | - core.oam.dev/v1alpha1.SingletonServer 124 | - core.oam.dev/v1alpha1.Worker 125 | - core.oam.dev/v1alpha1.SingletonWorker 126 | - core.oam.dev/v1alpha1.Task 127 | - core.oam.dev/v1alpha1.SingletonTask 128 | properties: | 129 | { 130 | "$schema": "http://json-schema.org/draft-07/schema#", 131 | "type": "object", 132 | "required": [ 133 | "volumeName", 134 | "storageClass" 135 | ], 136 | "properties": { 137 | "volumeName": { 138 | "type": "string", 139 | "description": "The name of the volume this backs. This matches the volume name declared in the ComponentSchematic." 140 | }, 141 | "storageClass": { 142 | "type": "string", 143 | "description": "The storage class that a PVC requires." 144 | } 145 | } 146 | } 147 | 148 | --- 149 | apiVersion: core.oam.dev/v1alpha1 150 | kind: Trait 151 | metadata: 152 | name: empty 153 | annotations: 154 | version: v1.0.0 155 | description: "An empty trait for exemplification purpose." 156 | spec: 157 | appliesTo: 158 | - "*" 159 | -------------------------------------------------------------------------------- /charts/rudr/templates/workloadtypes.yaml: -------------------------------------------------------------------------------- 1 | # TODO: add settings after spec was fixed 2 | 3 | apiVersion: core.oam.dev/v1alpha1 4 | kind: WorkloadType 5 | metadata: 6 | name: openfaas 7 | annotations: 8 | version: v1.0.0 9 | description: "OpenFaaS Workload" 10 | spec: 11 | group: openfaas.com 12 | version: v1alpha2 13 | names: 14 | kind: Function 15 | singular: function 16 | plural: functions 17 | 18 | --- 19 | apiVersion: core.oam.dev/v1alpha1 20 | kind: WorkloadType 21 | metadata: 22 | name: prometheus 23 | annotations: 24 | version: v1.0.0 25 | description: "Prometheus Workload" 26 | spec: 27 | group: monitoring.coreos.com 28 | version: v1 29 | names: 30 | kind: Prometheus 31 | singular: prometheus 32 | plural: prometheuses -------------------------------------------------------------------------------- /charts/rudr/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for rudr. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | logLevel: "rudr=info,kube=info" 7 | 8 | image: 9 | repository: oamdev/rudr 10 | tag: latest # We're in pre-release 11 | pullPolicy: Always 12 | 13 | # Turning this off will omit the Role and RoleBinding declarations. 14 | enableRBAC: true 15 | 16 | nameOverride: "" 17 | fullnameOverride: "" 18 | 19 | resources: 20 | {} 21 | # We usually recommend not to specify default resources and to leave this as a conscious 22 | # choice for the user. This also increases chances charts run on environments with little 23 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 24 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 25 | # limits: 26 | # cpu: 100m 27 | # memory: 128Mi 28 | # requests: 29 | # cpu: 100m 30 | # memory: 128Mi 31 | 32 | nodeSelector: {} 33 | 34 | tolerations: [] 35 | 36 | affinity: {} 37 | -------------------------------------------------------------------------------- /charts/waitForCRDs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RESPONSE_STRING='' 4 | while [[ $RESPONSE_STRING != "No resources found." ]]; do 5 | echo "Waiting for CRD persistence to finish..." 6 | RESPONSE_STRING=$((kubectl get trait) 2>&1 >/dev/null) 7 | sleep 10 8 | done 9 | echo "rudr CRDs have been persisted to the value cache." -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Rudr documentation 2 | 3 | *Rudr* is the Kubernetes reference implementation of the [Open Application Model (OAM) 1.0.0-alpha1](https://github.com/oam-dev/spec/releases/tag/v1.0.0-alpha.1) (OAM) specification, a team-centric standard for building cloud-native apps, where: 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 |
Developer roleDevelopers define application components,
Application operator roleApplication operators create instances of those components, assign them to application configurations, and designate their operational capabilities, and,
Infrastructure operator roleInfrastructure operators declare, install, and maintain the underlying services available on the platform.
19 | 20 | Components and applications are deployed in the Rudr runtime within your Kubernetes cluster. 21 | 22 | To get started, check out the *Install Rudr* guide and the *Tutorial*. The *Concepts* guides will walk you through all the available options Rudr provides to run and manage applications, and the *How-Tos* cover more advanced scenarios. 23 | 24 | ## Get started 25 | 26 | ### [Install Rudr](./setup/install.md) 27 | Install the Rudr runtime and its dependencies. 28 | 29 | ### [Tutorial](./tutorials/deploy_and_update.md) 30 | Learn how to deploy, inspect, and update a Rudr application. 31 | 32 | ## Concepts 33 | 34 | As a implementation of OAM, Rudr provides application centric abstractions to different roles in application management workflow. Learn more about the main application model abstractions: components (and their workloads), traits, and application configurations. 35 | 36 | ### [Component Schematic](./concepts/component-schematic.md) 37 | 38 | The component schematic defines the parameters, workload type, and containers of a unit of code. 39 | 40 | Learn how a developer can define the functional units that may be instantiated as part of a larger distributed application and their respective [**workloads**](concepts/workloads.md). 41 | 42 |
43 | 44 | ### [Application Configuration](./concepts/application-configuration.md) 45 | 46 | The application configuration defines the parameters, components, and other runtime properties of the overall application. 47 | 48 | Learn how an application operator can define how an overall application will be instantiated and configured. 49 | 50 |
51 | 52 | ### [Traits](./concepts/traits.md) 53 | 54 | Traits represent add-on runtime functionality assigned to component workloads from within the application configuration. 55 | 56 | Learn how an application operator can attach operational features to component workloads of an application. 57 | 58 |
59 |
60 |
61 | 62 | ### [Scopes](./concepts/scopes.md) 63 | 64 | Scopes are used to logically group components within an application. 65 | 66 | Learn how an application operator can define application boundaries by grouping components with common properties and dependencies. 67 | 68 | 69 | ## How-To's 70 | 71 | ### [Create a component from scratch](how-to/create_component_from_scratch.md) 72 | 73 | Build a component from source code to use for testing. 74 | 75 | ### [Manage OAM files with Helm/Kustomize](how-to/using_helm_kustomize_manage_oam.md) 76 | 77 | Learn how to use Helm/Kustomize tools to manage your OAM .yaml filese. 78 | 79 | ### [Migrate existing Kubernetes resources](./how-to/migrating.md) 80 | 81 | Here are tips and best practices for migrating exsiting Kubernetes applications to use Rudr. 82 | 83 | ### Extended Workloads 84 | 85 | Learn how to implement your own workload type, or import existing CRD Operator as [OAM Extended Workload Types](https://github.com/oam-dev/spec/blob/master/3.component_model.md#extended-workload-types). The current tutorial includes: 86 | 87 | 1. How to run [OpenFaaS](./tutorials/deploy_openfaas_workload.md) function as OAM `Component`. 88 | 2. How to provision [Prometheus](./tutorials/deploy_prometheus_workload.md) as OAM `Component`. 89 | 90 | > Note: eventually extended workloads will be moved to a separate repo under oam-dev org. 91 | 92 | ## Develop 93 | 94 | #### [Writing a Trait](./developer/writing_a_trait.md) 95 | 96 | Here's a walk through of the process for writing a new trait for Rudr. 97 | 98 | #### [Debug](./developer/debug.md) 99 | 100 | These are some useful tips for troubleshooting your Rudr deployments. 101 | 102 | #### [Admission Controller](https://github.com/oam-dev/admission-controller) 103 | 104 | Admission controller is used for mutating and validating OAM component, trait and application configuration spec. 105 | 106 | ## [FAQ](./faq.md) 107 | 108 | Find answers to commonly asked questions about Rudr and the Open Application Model (OAM). 109 | -------------------------------------------------------------------------------- /docs/concepts/scopes.md: -------------------------------------------------------------------------------- 1 | # Scopes 2 | 3 | Scopes are used to logically group components within an application. 4 | 5 | Application [*scopes*](https://github.com/oam-dev/spec/blob/master/4.application_scopes.md) are used to logically group components together by providing application boundaries that represent common group behaviors and/or dependencies. Scopes are not mutually exclusive: a given component can belong to multiple application scope instances at the same time. Once installed to your Rudr runtime, a scope can be used (and reused) across any number of [application configurations](./application-configuration.md). In terms of implementation details, scopes are Rudr-defined Kubernetes CRDs. 6 | 7 | *Scopes* are assigned to component workloads by an [application operator](https://github.com/oam-dev/spec/blob/master/2.overview_and_terminology.md#roles-and-responsibilities). 8 | 9 | ![scope schematic comic](../media/appscopecomic.PNG) 10 | 11 | Currently, Rudr supports the following scope types: 12 | 13 | - [Health](#health-scope) 14 | 15 | An [application operator](https://github.com/oam-dev/spec/blob/master/2.overview_and_terminology.md#roles-and-responsibilities) configures and assigns scope instances to component workloads of an application in the [ApplicationConfiguration](application-configuration.md) file. For example, here's a custom configuration of the Rudr [health](#health) scope type: 16 | 17 |
 18 | apiVersion: core.oam.dev/v1alpha1
 19 | kind: ApplicationConfiguration
 20 | metadata:
 21 |   name: my-health-scope
 22 | spec:
 23 |   scopes:
 24 |     - name: health
 25 |       type: core.oam.dev/v1alpha1.HealthScope
 26 |       properties:
 27 |         - name: probe-method
 28 |           value: "kube-get"
 29 |         - name: probe-endpoint
 30 |           value: ".status"
 31 |         - name: probe-timeout
 32 |           value: 30
 33 |         - name: probe-interval
 34 |           value: 60
 35 |         - name: failure-rate-threshold
 36 |           value: 0
 37 |         - name: healthy-rate-threshold
 38 |           value: 100.0
 39 |         - name: healthThresholdPercentage
 40 |           value: 100.0
 41 | 
42 | 43 | And here's how a component would be added to that scope within the [application configuration](application-configuration.md) file: 44 | 45 |
 46 | apiVersion: core.oam.dev/v1alpha1
 47 | kind: ApplicationConfiguration
 48 | metadata:
 49 |   name: first-app
 50 | spec:
 51 |   components:
 52 |     - name: helloworld-python-v1
 53 |       instanceName: first-app-helloworld-python-v1
 54 |       parameterValues:
 55 |         - name: target
 56 |           value: Rudr
 57 |         - name: port
 58 |           value: "9999"
 59 |       traits:
 60 |         - name: ingress
 61 |         properties:
 62 |           hostname: example.com
 63 |           path: /
 64 |           servicePort: 9999
 65 |       applicationScopes:
 66 |         - my-health-scope
 67 | 
68 | 69 | For more on using specific scopes, refer to the sections below. 70 | 71 | ## Supported scopes 72 | 73 | Here's how to get info on the scopes supported on your Rudr installation. 74 | 75 | **List supported scope types**: 76 | 77 | ```console 78 | $ kubectl get scopes 79 | ``` 80 | 81 | **Show the schema details of a scope instance:** 82 | 83 | ```console 84 | $ kubectl get -o yaml 85 | ```` 86 | 87 | ... where `` is one of the Rudr-supported scopes types (per `kubectl get scopes`), and `` is the *name* of a particular scope configuration instance. For example: `kubectl get health my-health-scope -o yaml`. 88 | 89 | ## Health scope 90 | 91 | You can use the *HealthScope* controller to periodically check the aggregate health of components within your application. For a full walkthrough, see [Health Scope Controller](../../healthscope/README.md) 92 | 93 | ### Installation 94 | 95 | Use [Helm 3](https://v3.helm.sh/) to install the Rudr *HealthScope*: 96 | 97 | ```cmd 98 | helm install healthscope ./charts/healthscope 99 | ``` 100 | 101 | ### Properties 102 | 103 | | Name | Description | Allowable values | Required | Default | 104 | | :-- | :--| :-- | :-- | :-- | 105 | | **probe-method** | The method to probe the components, e.g. 'httpGet'. | string | ☑ | | 106 | | **probe-endpoint** | The endpoint to probe from the components, e.g. '/v1/health'. | string | ☑ | | 107 | | **probe-timeout** | The amount of time in seconds to wait when receiving a response before marked failure. | int | | | 108 | | **probe-interval** | The amount of time in seconds between probing tries. | int ||| 109 | | **failure-rate-threshold** | If the rate of failure of total probe results is above this threshold, declared 'failed'. | double ||| 110 | | **healthy-rate-threshold** | If the rate of healthy of total probe results is above this threshold, declared 'healthy'. | double ||| 111 | | **healthThresholdPercentage** | The % of healthy components required to upgrade scope. | double ||| 112 | | **requiredHealthyComponents** | Comma-separated list of names of the components required to be healthy for the scope to be health. | string ||| 113 | 114 | [Here's an example](../../examples/health-scope-config.yaml) of a health scope configuration. Once installed (`kubectl apply -f .yaml`) you would attach this to a component within the application configuration, similar to this [example](../../examples/first-app-config.yaml): 115 | 116 | ```yaml 117 | # Example component scope assignment 118 | apiVersion: core.oam.dev/v1alpha1 119 | kind: ApplicationConfiguration 120 | metadata: 121 | name: first-app 122 | spec: 123 | components: 124 | - name: helloworld-python-v1 125 | instanceName: first-app-helloworld-python-v1 126 | parameterValues: 127 | # ... 128 | traits: 129 | # ... 130 | applicationScopes: 131 | - my-health-scope 132 | ``` 133 | 134 | Once you deploy that application configuration, you can verify the health scope instance was created: 135 | 136 | ```console 137 | kubectl get health 138 | ``` 139 | 140 | And get further details about it with `kubectl get health` **`my-health-scope`** `-o yaml`. 141 | -------------------------------------------------------------------------------- /docs/developer/debug.md: -------------------------------------------------------------------------------- 1 | # How to Debug? 2 | 3 | This document outlines troubleshooting guides to help you debug things when deployments via Rudr are not successful. 4 | 5 | ## Check Rudr pod is running 6 | 7 | Make sure rudr is running successfully. 8 | 9 | ``` 10 | $ kubectl get pods -l app.kubernetes.io/name=rudr 11 | NAME READY STATUS RESTARTS AGE 12 | rudr-6b9b9c57cd-zxgfr 1/1 Running 0 30s 13 | ``` 14 | 15 | ## Check Rudr logs 16 | 17 | When you deploy an Application Configuration, you could find information about it from logs. 18 | 19 | ``` 20 | $ kubectl logs -f rudr-6b9b9c57cd-zxgfr 21 | ``` 22 | 23 | Currently, all workload implemented in Rudr will create pods. 24 | So if your ApplicationConfiguration didn't create any pod, there must be some error information in the logs. 25 | Check the logs and file an issue with the log information. 26 | 27 | 28 | ## Check if the Component exists 29 | 30 | ```shell script 31 | $ kubectl get comp 32 | NAME AGE 33 | alpine-singleton-task-v1 3s 34 | alpine-singleton-worker-v1 3s 35 | alpine-task-v1 3s 36 | alpine-worker-v1 3s 37 | hpa-example-replicated-v1 3s 38 | nginx-replicated-v1 3s 39 | nginx-singleton-v1 3s 40 | ``` 41 | 42 | Don't forget to deploy component before using them. 43 | 44 | ## Check parameters 45 | 46 | Check the parameters and properties of ApplicationConfiguration are consistent with trait and component. 47 | 48 | ## Pods is created but unexpected behavior 49 | 50 | For example incorrect metadata, trait unavailable. Check the logs of Rudr pod, if you couldn't solve it, feel free to create an issue. 51 | -------------------------------------------------------------------------------- /docs/faq.md: -------------------------------------------------------------------------------- 1 | # Frequently Asked Questions (FAQ) 2 | 3 | ## What is the difference between Open Application Model and Rudr? 4 | 5 | *Open Application Model* is a specification for describing applications. 6 | *Rudr* is an implementation of the Open Application Model specification. Rudr runs on Kubernetes, though Open Application Model can be implemented on non-Kubernetes platforms. 7 | 8 | ## What do Open Application Model and Rudr add to the cloud native landscape? 9 | 10 | Open Application Model is designed to introduce separation of concerns (SoC) into Kubernetes. 11 | 12 | In Kubernetes today, developer information is freely intermingled with operator information. We wanted to create a way to distinguish between these two roles so that developers could deliver an artifact that describes their microservice, and operators could apply another artifact that configures and instantiates that microservice. 13 | 14 | In the OAM model, a `ComponentSchematic` describes a developer's view of a microservice, and an `ApplicationConfiguration` describes an application operator's view of how a component is deployed into the cluster. 15 | 16 | ## How does this compare to Knative? 17 | 18 | Rudr implements core workload types defined by OAM spec on Kubernetes platform, and also provides necessary utilities for implementing extension workloads/traits. In this sense, Knative is a promising implementation candidate for "serverless workload type" and "scale to zero" trait in Rudr. See [related issue](https://github.com/oam-dev/rudr/issues/534) for more information. 19 | 20 | Essentially, the OAM spec defines a set of concepts/rules to makes it easier for different components from different parties to compose as an application. For example, a developer could easily deploy an application composed by a Knative Serving instance consumes a AWS RDS instance by simply defining two components in OAM world. 21 | 22 | ## Can I use Open Application Model/Rudr to deploy existing applications? 23 | 24 | You can describe existing applications as Open Application Model applications. See our [migration guide](./how-to/migrating.md). 25 | 26 | ## How does this compare to Helm or Kompose? 27 | 28 | [Helm](https://helm.sh) is a package manager for Kubernetes, and provides a way to package and distribute Kubernetes applications. 29 | 30 | Open Application Model is just an application model that, thanks to Rudr, runs on Kubernetes. You can bundle your Open Application Model applications as Helm charts and deploy them using Helm. Here is an example of [how to do that](how-to/using_helm_kustomize_manage_oam.md). 31 | 32 | [Kompose](http://kompose.io/) is a tool for manipulating Kubernetes YAML documents. It is also compatible with Open Application Model/Rudr. 33 | 34 | ## How does OAM compare with CNAB? 35 | 36 | [Cloud Native Application Bundles (CNAB)](https://cnab.io) is a format for packaging and distributing distributed applications, including applications created using OAM. For its part, OAM does not define or prescribe a packaging format. But it works well with CNAB (as well as with Helm). 37 | 38 | When targeting more than one implementation of OAM, developers may find CNAB a better packaging fit than Helm. 39 | 40 | ## Can I write my own traits? 41 | 42 | Currently, all traits are built into Rudr. However, our plan is to make it possible for custom traits to be written and deployed into a Rudr cluster. 43 | 44 | If you write a custom trait and integrate it with Rudr, consider opening a pull request. We are interested in adding more traits. 45 | 46 | ## Can I write my own scopes? 47 | 48 | Currently, no. Scopes are fixed according to the Open Application Model spec. 49 | 50 | ## Does Rudr support "extended workload types" as described in the Open Application Model specification 51 | 52 | No. That section of the specification is a draft, and we are not yet supporting it. 53 | 54 | 55 | ## Why Rust? 56 | 57 | On occasion, we have been asked why Rudr is written in Rust instead of Go. There is no requirement in the Kubernetes world that Kubernetes controllers be written in Go. Many languages implement the Kubernetes API and can be used for creating controllers. We decided to write Rudr in Rust because the language allows us to write Kubernetes controllers with far less code. Rust's generics make it possible to quickly and succinctly describe custom Kubernetes API resources without requiring developers to run code generators. And Rust's Kubernetes library can easily switch between Kubernetes versions with ease. We recognize that Rust might not be to everyone's taste (and neither is Go). However, we are confident that Rust is a solid choice for writing maintainable and concise Kubernetes applications. 58 | -------------------------------------------------------------------------------- /docs/how-to/app/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the official Python image. 2 | # https://hub.docker.com/_/python 3 | FROM python:3.7-slim-buster 4 | 5 | # Copy local code to the container image. 6 | ENV APP_HOME /app 7 | WORKDIR $APP_HOME 8 | # Install production dependencies. 9 | RUN pip install Flask gunicorn 10 | ENV PORT=8080 11 | COPY . . 12 | 13 | # Run the web service on container startup. Here we use the gunicorn webserver 14 | CMD exec gunicorn --bind :$PORT app:app -------------------------------------------------------------------------------- /docs/how-to/app/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from flask import Flask 4 | 5 | app = Flask(__name__) 6 | 7 | @app.route('/') 8 | def hello_world(): 9 | target = os.environ.get('TARGET', 'World') 10 | return 'Hello {}!\n'.format(target) 11 | 12 | if __name__ == "__main__": 13 | app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', "8080"))) 14 | -------------------------------------------------------------------------------- /docs/how-to/create_component_from_scratch.md: -------------------------------------------------------------------------------- 1 | # Create Component from Scratch 2 | 3 | In this tutorial, we will build a simple web app component written in Python that you can use for testing. 4 | It reads in an env variable TARGET and prints “Hello \${TARGET}!“. 5 | If TARGET is not specified, it will use “World” as the TARGET. 6 | 7 | ## Prerequisites 8 | 9 | * Follow the instructions in the [installation](../setup/install.md) document to get Rudr installed on your Kubernetes cluster. 10 | * [Docker](https://www.docker.com/) installed and running on your local machine, and a [Docker Hub](https://hub.docker.com) account configured (we’ll use it for a container registry). 11 | 12 | ## Steps to build image 13 | 14 | The following instructions will lead you to build an image from source, you can get all the files mentioned here in the [app](./app) folder. 15 | 16 | 1. Create a new directory and cd into it: 17 | ```shell script 18 | mkdir app 19 | cd app 20 | ``` 21 | 2. Create a file named `app.py` and copy the code from [`app/app.py`](./app/app.py) 22 | 3. Create a file named `Dockerfile` and copy the code from [`app/Dockerfile`](./app/Dockerfile), See [official Python docker image](https://hub.docker.com/_/python/) for more details. 23 | 4. Use Docker to build the sample code into a container. To build and push with Docker Hub, run these commands replacing `oamdev` with your Docker Hub username: 24 | ```shell script 25 | # Build the container on your local machine 26 | docker build -t oamdev/helloworld-python:v1 . 27 | 28 | # Push the container to docker registry 29 | docker push oamdev/helloworld-python:v1 30 | ``` 31 | 32 | ## Create Component Schematics File 33 | 34 | Now we have a docker image named `oamdev/helloworld-python:v1`, so we can use this image to create a component schematics file. 35 | 36 | 1. Choose the workloadType: the `helloworld-python` is very typical web application, it is stateless, always running as a service, can be replicated. So we use `core.oam.dev/v1alpha1.Server` without doubt. 37 | 2. Fill the container spec and make ENV configurable: obviously we have two major environment variables in the image, one is TARGET and the other is PORT. 38 | 3. Make parameters so we could let Application Configuration configure these environments. 39 | 40 | After these three concerns, we could figure out this basic component schematic yaml like below: 41 | 42 | ```yaml 43 | apiVersion: core.oam.dev/v1alpha1 44 | kind: ComponentSchematic 45 | metadata: 46 | name: helloworld-python-v1 47 | spec: 48 | workloadType: core.oam.dev/v1alpha1.Server 49 | containers: 50 | - name: foo 51 | image: oamdev/helloworld-python:v1 52 | env: 53 | - name: TARGET 54 | fromParam: target 55 | - name: PORT 56 | fromParam: port 57 | ports: 58 | - protocol: TCP 59 | containerPort: 9999 60 | name: http 61 | parameters: 62 | - name: target 63 | type: string 64 | default: World 65 | - name: port 66 | type: string 67 | default: '9999' 68 | ``` 69 | 70 | Let's name it `helloworld-python-component.yaml` and put it into [the `examples` folder](../../examples/helloworld-python-component.yaml). 71 | 72 | Finally we could apply this yaml to the platform and let our operators to deploy it. 73 | 74 | ```shell script 75 | $ kubectl apply -f examples/helloworld-python-component.yaml 76 | componentschematic.core.oam.dev/helloworld-python-v1 created 77 | ``` 78 | 79 | You can check if your component schematic is OK with: 80 | 81 | ```shell script 82 | $ kubectl get comp 83 | NAME AGE 84 | helloworld-python-v1 15s 85 | ``` 86 | 87 | Yeah, we have successfully built a component from source now. 88 | 89 | ## Upgrade the component 90 | 91 | We assume a component is immutable. If we want to upgrade a component, 92 | the easiest way is to modify the component and change the name suffix with a new version. 93 | 94 | ### Change the code 95 | 96 | For example, we change the code from `Hello` to `Goodbye`. 97 | 98 | ```shell script 99 | import os 100 | 101 | from flask import Flask 102 | 103 | app = Flask(__name__) 104 | 105 | @app.route('/') 106 | def hello_world(): 107 | target = os.environ.get('TARGET', 'World') 108 | - return 'Hello {}!\n'.format(target) 109 | + return 'Goodbye {}!\n'.format(target) 110 | 111 | if __name__ == "__main__": 112 | app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', "8080"))) 113 | ``` 114 | 115 | Build and create image with a new tag. 116 | 117 | ```shell script 118 | docker build -t oamdev/helloworld-python:v2 . 119 | docker push oamdev/helloworld-python:v2 120 | ``` 121 | 122 | ### Change the component 123 | 124 | Change the component with a new name. 125 | 126 | ```diff 127 | apiVersion: core.oam.dev/v1alpha1 128 | kind: ComponentSchematic 129 | metadata: 130 | - name: helloworld-python-v1 131 | + name: helloworld-python-v2 132 | spec: 133 | workloadType: core.oam.dev/v1alpha1.Server 134 | containers: 135 | - name: foo 136 | - image: oamdev/helloworld-python:v1 137 | + image: oamdev/helloworld-python:v2 138 | 139 | env: 140 | - name: TARGET 141 | fromParam: target 142 | - name: PORT 143 | fromParam: port 144 | ports: 145 | - protocol: TCP 146 | containerPort: 9999 147 | name: http 148 | parameters: 149 | - name: target 150 | type: string 151 | default: World 152 | - name: port 153 | type: string 154 | default: '9999' 155 | ``` 156 | 157 | Apply the changed component: 158 | 159 | ```console 160 | $ kubectl apply -f examples/helloworld-python-component.yaml 161 | componentschematic.core.oam.dev/helloworld-python-v2 created 162 | ``` 163 | 164 | ### Check the result 165 | 166 | Now we have two components: 167 | 168 | ```console 169 | $ kubectl get comp 170 | NAME AGE 171 | helloworld-python-v1 1h 172 | helloworld-python-v2 27s 173 | ``` 174 | 175 | They could be used by operator in [application configuration](https://github.com/oam-dev/rudr/blob/master/docs/concepts/application-configuration.md) 176 | -------------------------------------------------------------------------------- /docs/how-to/migrating.md: -------------------------------------------------------------------------------- 1 | # Migrating Kubernetes Resources to Rudr 2 | 3 | This document explains, at a high level, how to represent your Kubernetes applications using the Open Application Model specification. 4 | 5 | ## Terms 6 | 7 | The terms here are described elsewhere, particularly in the [specification](https://github.com/oam-dev/spec). They are summarized here for convenience. 8 | 9 | - Component: A component describes a particular workload (or microservice) that can be deployed. 10 | - Component Instance: A particular instance of a component. If one Component is deployed in two Application Configurations, it will create two Component Instances, each of which is managed by its Application Configuration 11 | - Workload type: Open Application Model describes the basic behavior of a Component using workload types. There are 6 core workload types: 12 | - SingletonServer: This service listens on a network interface. Only one instance of the component's pod can run at a time. 13 | - Server: This service listens on a network interface, but multiple replicas of the component's pod can be running concurrently. 14 | - SingletonTask: This component only runs for a short period of time (it is not a daemon). It does not listen on a network interface. And at any given time, only one of these may run per application. 15 | - Task: This component only runs for a short period of time (it is not a daemon). It does not listen on a network interface. Many replicas of this task may run concurrently. 16 | - SingletonWorker: This component is long-running, but does not listen on a network interface. Only one pod can be running per application. 17 | - Worker: This component is long-running, but does not listen on a network interface. Multiple workers may run concurrently. 18 | - Application Configuration: This resource describes an application as a list of components. Configuration information (parameter values) may be passed into components via the application configuration. Traits and scopes are attached to components in the Application Configuration 19 | - Traits: A trait describes an operational behavior that should be attached to a component at runtime. For example, a Server may have an Autoscaler trait attached. 20 | - Scopes: A scope is an arbitrary group of Component Instances that share a behavior. For example, the health check scope facilitates an aggregate health check of all Component Instances in that Scope. If any Component Instance's health check fails, the Scope's health check fails. 21 | 22 | ## Separation of Concerns 23 | 24 | One of the questions we occasionally hear from seasoned Kubernetes developers is _What do I gain from Open Application Model?_. To be clear, part of the design of Open Application Model was to make it easier for developers to work with Kubernetes without needing to understand the operational aspects of Kubernetes. But the real virtue we see in Open Application Model is its separation of concerns. 25 | 26 | Today's Kubernetes objects are built to _describe a concept_. A Deployment describes a replicable service that can have certain rollout strategies applied. 27 | 28 | But Open Application Model attempts to start with a different premise, and then describe things accordingly: 29 | 30 | > Cloud Native Applications have responsibilities described by three different roles: Developers, Application Operators, and Infrastructure Operators. 31 | 32 | Each of these roles has a different job, and different concerns. A developer is responsible for producing a runnable workload. An application operator is responsible for executing an application inside of Kubernetes. An infrastructure operator is responsible for configuring and running Kubernetes itself. 33 | 34 | In our view, the developer is responsible for creating and maintaining Components. The application operator takes responsibility for the Application Configuration. And the infrastructure operator runs Rudr, and decides how Open Application Model's Traits and Scopes are used in practice. And Rudr's job is to take these various inputs and transform them into underlying Kubernetes types. 35 | 36 | ## Workflow 37 | 38 | Traits and Scopes are provided by Rudr. You can list them with `kubectl get traits` and `kubectl get scopes`. 39 | 40 | You may install your own components. 41 | 42 | To create a new Rudr application, create an `ApplicationConfiguration` YAML file and specify the components, traits, scopes, and parameters. 43 | 44 | Rudr watches for new Application Configurations. When one is created, Rudr will read it, load the component, trait, and scope definitions, and then create new resources in Kubernetes. 45 | 46 | Likewise, when Application Configurations are modified or deleted, Rudr will respond to those events, managing the resources for those components, scopes, and traits. 47 | 48 | ## Converting a Kubernetes Application to Rudr 49 | 50 | A major goal of the Open Application Model specification is to separate operational concerns from developer concerns. So a `ComponentSchematic` describes a component from a developer's view, while an `ApplicationConfiguration` creates instances of Components, and attaches configuration data to them. 51 | 52 | To convert a Kubernetes application to Rudr, you can follow these steps: 53 | 54 | 1. Describe your workloads (microservices) as Components. 55 | - A `Deployment` or `ReplicaSet` can be converted to one of `SingletonServer`, `Server`, `SingletonWorker`, or `ReplicableWorker` depending on its runtime requirements 56 | - A `Job` can be converted to one of `SingletonTask` or `ReplicableTask` 57 | - A `Pod` can be converted to `SingletonServer` or `SingletonWorker` 58 | - At this time, `StatefulSets` and `DaemonSets` do not have Open Application Model equivalents. 59 | - Expose parameters. For example, if your container image takes `FOO` as an environment variable, you may expose a parameter that allows an operator to set `FOO`'s value. 60 | 2. Create an `ApplicationConfiguration` YAML file 61 | 3. Compose your application by listing which Components (defined above) should be instantiated as part of your application. 62 | - For each component... 63 | 1. Determine if any Traits need to be applied 64 | - `Ingress` and `HorizontalPodAutoscaler` have direct equivalents in Rudr, using the `Ingress` and `Autoscaler` traits 65 | - Use `kubectl get traits` to see what other traits may apply 66 | 2. Determine what Scopes need to be applied 67 | - Use `kubectl get scopes` to see what scopes may apply 68 | 3. Set parameter values 69 | - Look at the component's parameters and see if you need to set or override values for any of these 70 | 71 | At the end of this process, you should have one or more `ComponentSchematic` definitions and an `ApplicationConfiguration` definition. You can put these in lots of files, or put them all in one file. But the `ComponentSchematic` needs to be loaded to the Kubernetes API server before an `ApplicationConfiguration` can reference it. 72 | -------------------------------------------------------------------------------- /docs/how-to/using_helm_kustomize_manage_oam.md: -------------------------------------------------------------------------------- 1 | # Using Helm/Kustomize to manage OAM yamls 2 | 3 | OAM use parameters/variables/properties which could be very long and complicated. 4 | In this tutorial, we will introduce using tools (such as helm, kustomize) to solve this problem. 5 | 6 | ## Using Helm 7 | 8 | We have introduced how to [create a helloworld-python component from scratch](./create_component_from_scratch.md) and [deploy it using Application Configuration](../tutorials/deploy_and_update.md). 9 | 10 | You could find the [hello-rudr](../../examples/charts/hello-rudr) in `examples` folder, this is the same `hellowworld-python`app built by helm chart. 11 | 12 | If you have installed rudr, you could easily install this hello-rudr chart by helm v3. 13 | 14 | Assume you have lots of parameters in appconfig, you could just use helm templates and make them configurable by values. 15 | 16 | In this example, we mainly make `target`, `port` configurable. 17 | 18 | ```yaml 19 | kind: ApplicationConfiguration 20 | apiVersion: core.oam.dev/v1alpha1 21 | metadata: 22 | name: "{{ .Release.Name }}" 23 | spec: 24 | components: 25 | - componentName: "{{ .Release.Name }}-{{ .Values.appVersion}}" 26 | instanceName: "{{ .Release.Name }}-{{ .Values.appVersion}}" 27 | parameterValues: 28 | - name: target 29 | value: "{{ .Values.target }}" 30 | - name: port 31 | value: "{{ .Values.port }}" 32 | traits: 33 | - name: ingress 34 | properties: 35 | hostname: example.com 36 | path: / 37 | servicePort: {{ .Values.port }} 38 | - name: manual-scaler 39 | properties: 40 | replicaCount: {{ .Values.replicaCount }} 41 | ``` 42 | 43 | So they could be configured in `values.yaml` like below: 44 | 45 | ```yaml 46 | # Default values for hello-rudr. 47 | # This is a YAML-formatted file. 48 | # Declare variables to be passed into your templates. 49 | 50 | replicaCount: 1 51 | appVersion: v1 52 | target: Rudr 53 | port: "9999" 54 | 55 | ``` 56 | 57 | The `values.yaml` file is clean and easy to understand. 58 | 59 | You could even classify parameters to make them more clear, for example: 60 | 61 | ```yaml 62 | 63 | appVersion: v1 64 | 65 | scale: 66 | replicaCount: 1 67 | 68 | service: 69 | target: Rudr 70 | port: "9999" 71 | 72 | ``` 73 | 74 | Of cource, you should change your templates to: 75 | 76 | ```yaml 77 | kind: ApplicationConfiguration 78 | apiVersion: core.oam.dev/v1alpha1 79 | metadata: 80 | name: "{{ .Release.Name }}" 81 | spec: 82 | components: 83 | - componentName: "{{ .Release.Name }}-{{ .Values.appVersion}}" 84 | instanceName: "{{ .Release.Name }}-{{ .Values.appVersion}}" 85 | parameterValues: 86 | - name: target 87 | value: "{{ .Values.service.target }}" 88 | - name: port 89 | value: "{{ .Values.service.port }}" 90 | traits: 91 | - name: ingress 92 | properties: 93 | hostname: example.com 94 | path: / 95 | servicePort: {{ .Values.port }} 96 | - name: manual-scaler 97 | properties: 98 | replicaCount: {{ .Values.replicaCount }} 99 | ``` 100 | 101 | ## Using Kustomize 102 | 103 | [Kustomize](https://github.com/kubernetes-sigs/kustomize) is a tool to customize kubernetes YAML configurations. 104 | Using Kustomize is a little different, the way kustomize used is just like a patch. 105 | 106 | We also use [hello-rudr example](../../examples/kustomize/hello-rudr) as an example with the `helloworld-python` app. 107 | 108 | First we put the original yaml in [`base`](../../examples/kustomize/hello-rudr/base) directory, 109 | then we define our patch in [`overlay`](../../examples/kustomize/hello-rudr/overlay/production) like below: 110 | 111 | ```yaml 112 | apiVersion: core.oam.dev/v1alpha1 113 | kind: ApplicationConfiguration 114 | metadata: 115 | name: first-app 116 | spec: 117 | components: 118 | - componentName: helloworld-python-v1 119 | instanceName: patched-app 120 | parameterValues: 121 | - name: target 122 | value: Hello 123 | - name: port 124 | value: "8888" 125 | ``` 126 | 127 | In this example, we override 3 parameters here: 128 | 129 | 1. instanceName: change from `first-app-helloworld-python-v1` to `patched-app` 130 | 2. parameterValues(target): change from `Rudr` to `hello` 131 | 3. parameterValues(port): change from `9999` to `8888` 132 | 133 | Finally, you could use `kustomize build` to see the result: 134 | 135 | ```console 136 | $ kustomize build overlay/production 137 | apiVersion: core.oam.dev/v1alpha1 138 | kind: ApplicationConfiguration 139 | metadata: 140 | labels: 141 | variant: production 142 | name: production-first-app 143 | spec: 144 | components: 145 | - componentName: helloworld-python-v1 146 | instanceName: patched-app 147 | parameterValues: 148 | - name: target 149 | value: Hello 150 | - name: port 151 | value: "8888" 152 | --- 153 | apiVersion: core.oam.dev/v1alpha1 154 | kind: ComponentSchematic 155 | metadata: 156 | labels: 157 | variant: production 158 | name: production-helloworld-python-v1 159 | spec: 160 | containers: 161 | - env: 162 | - fromParam: target 163 | name: TARGET 164 | - fromParam: port 165 | name: PORT 166 | image: oamdev/helloworld-python:v1 167 | name: foo 168 | ports: 169 | - containerPort: 9999 170 | name: http 171 | protocol: TCP 172 | name: helloworld-python 173 | parameters: 174 | - default: World 175 | name: target 176 | type: string 177 | - default: "9999" 178 | name: port 179 | type: string 180 | workloadType: core.oam.dev/v1alpha1.Server 181 | ``` 182 | 183 | You could apply the result by: 184 | 185 | ```shell script 186 | kustomize build overlay/production | kubectl apply -f - 187 | ``` 188 | 189 | So next time you just need to change the `patch.yaml` to make things easier. -------------------------------------------------------------------------------- /docs/media/app-operator-role.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/app-operator-role.png -------------------------------------------------------------------------------- /docs/media/appconfigcomic.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/appconfigcomic.PNG -------------------------------------------------------------------------------- /docs/media/application.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/application.png -------------------------------------------------------------------------------- /docs/media/appscopecomic.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/appscopecomic.PNG -------------------------------------------------------------------------------- /docs/media/component.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/component.png -------------------------------------------------------------------------------- /docs/media/componentcomic.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/componentcomic.PNG -------------------------------------------------------------------------------- /docs/media/developer-role.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/developer-role.png -------------------------------------------------------------------------------- /docs/media/infra-operator-role.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/infra-operator-role.png -------------------------------------------------------------------------------- /docs/media/k8s_application_complexities.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/k8s_application_complexities.png -------------------------------------------------------------------------------- /docs/media/rudr-how-it-works.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/rudr-how-it-works.png -------------------------------------------------------------------------------- /docs/media/rudr-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/rudr-logo.png -------------------------------------------------------------------------------- /docs/media/runtime.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/runtime.png -------------------------------------------------------------------------------- /docs/media/scopes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/scopes.png -------------------------------------------------------------------------------- /docs/media/traitcomic.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/traitcomic.PNG -------------------------------------------------------------------------------- /docs/media/traits.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oam-dev/rudr/f193ac54ca6d5a7ac330a8490fecd5d7cc8659e3/docs/media/traits.png -------------------------------------------------------------------------------- /docs/setup/appendix.md: -------------------------------------------------------------------------------- 1 | # Appendix Ⅰ: How to install a compatible version of Kubernetes on AKS? 2 | 3 | On AKS you may install a particular version of Kubernetes. However, you need to choose a recent version of Kubernetes. Often, the default is an older release (currently, 1.13.10). 4 | 5 | ```console 6 | $ az aks get-versions -l eastus -o table 7 | Unable to load extension 'eventgrid'. Use --debug for more information. 8 | KubernetesVersion Upgrades 9 | ------------------- ------------------------ 10 | 1.15.3(preview) None available 11 | 1.14.6 1.15.3(preview) 12 | 1.14.5 1.14.6, 1.15.3(preview) 13 | 1.13.10 1.14.5, 1.14.6 14 | 1.13.9 1.13.10, 1.14.5, 1.14.6 15 | 1.12.8 1.13.9, 1.13.10 16 | 1.12.7 1.12.8, 1.13.9, 1.13.10 17 | 1.11.10 1.12.7, 1.12.8 18 | 1.11.9 1.11.10, 1.12.7, 1.12.8 19 | 1.10.13 1.11.9, 1.11.10 20 | 1.10.12 1.10.13, 1.11.9, 1.11.10 21 | $ az group create -l eastus -n rudr 22 | ... 23 | $ az aks create --kubernetes-version 1.15.3 -n rudr -g rudr 24 | ... 25 | $ az aks get-credentials -n rudr -g rudr 26 | ``` 27 | 28 | At the end of this process, verify that you are connected to this cluster with `kubectl config current-context`. 29 | -------------------------------------------------------------------------------- /docs/setup/install_windows.md: -------------------------------------------------------------------------------- 1 | # Installing Rudr on Windows 2 | 3 | ## Prerequisites 4 | 5 | Rudr has two dependencies, `kubectl` and `Helm 3`. 6 | 7 | Installing necessary software packages on Windows is made easier with the package manager [Chocolatey](https://chocolatey.org/). Rudr has tw 8 | 9 | 1. To create our local development environment we will use Minikube. Install via Chocolatey by running the following commands in an Administrator shell: 10 | 11 | choco install minikube 12 | 13 | choco install kubernetes-cli 14 | 15 | 2. In order for our development environment to be compatible with Docker for Windows (and other apps that use Hyper-V) we must use a virtual network switch. Configuring the switch is straightforward: open the Hyper-V manager and from the right pane select **Network Switch Manager**. From there, create a new virtual switch with type **External**. 16 | 17 | 3. Now we can start up our VM. Be aware that Rudr is compatible with Kubernetes 1.16 and 1.17 only. From an Administrator shell run: 18 | 19 | minikube start --vm-driver hyperv --hyperv-virtual-switch --v=7 --kubernetes-version v1.17.0 20 | 21 | Verify the installation is working properly with: 22 | 23 | kubectl get pods -n kube-system 24 | 25 | 26 | 4. To install Helm, we again can use Chocolatey: 27 | 28 | choco install kubernetes-helm 29 | 30 | Confirm the installation was successful by running: 31 | 32 | helm version 33 | 34 | You should see output displaying Helm's version info. Verify you are running version 3.x.x. 35 | 36 | ## Installing Rudr Using Helm 3 37 | 38 | > Note: In its current version, Rudr will only listen for events in one namespace. This will change in the future. For now, though, you must install Rudr into the namespace into which you will deploy Rudr apps. You may install Rudr multiple times on the same cluster as long as you deploy to a different namespace each time. 39 | 40 | > Tip: As there are some breaking changes, such as Configuration => ApplicationConfiguration, Component => ComponentSchematic, if you reinstall Rudr make sure your old CRDs are deleted. You must do this with `kubectl delete crd -l app.kubernetes.io/part-of=core.oam.dev`. 41 | 42 | In an Administrator shell, run: 43 | 44 | helm install rudr ./charts/rudr 45 | 46 | 47 | ### Upgrading 48 | 49 | To upgrade Rudr, typically you only need to use Helm. 50 | 51 | > Tip: During the Alpha and Beta phase of Rudr, we recommend also deleting your CRDs manually. You must do this with `kubectl delete crd -l app.kubernetes.io/part-of=core.oam.devkubectl delete crd`. 52 | 53 | ```console 54 | helm upgrade rudr charts/rudr 55 | ``` 56 | 57 | The above will update your Rudr to the latest version. 58 | 59 | ### Uninstalling 60 | 61 | ```console 62 | helm delete rudr 63 | ``` 64 | 65 | This will leave the CRDs and configurations intact. 66 | 67 | **NOTE: When you delete the CRDs, it will delete everything touching Open Application Model from configurations to secrets.** 68 | 69 | ```console 70 | kubectl delete crd -l app.kubernetes.io/part-of=core.oam.dev 71 | ``` 72 | 73 | The above will delete the CRDs and clean up everything related with Open Application Model. 74 | 75 | ## Installing Implementations for Traits 76 | 77 | Rudr provides several traits, including ingress and autoscaler. However, it does not install default implementations of some of these. This is because they map to primitive Kubernetes features that can be fulfilled by different controllers. 78 | 79 | The best place to find implementations for your traits is [Helm Hub](https://hub.helm.sh/). 80 | 81 | 82 | ### Manual Scaler 83 | 84 | The manual scaler trait has no external dependencies. 85 | 86 | ### Ingress 87 | 88 | To successfully use an `ingress` trait, you will need to install one of the Kubernetes Ingress controllers. We recommend [nginx-ingress](https://hub.helm.sh/charts/stable/nginx-ingress). 89 | 90 | 1. First, add the stable repo to your Helm installation. 91 | 92 | ```Powershell 93 | helm repo add stable https://kubernetes-charts.storage.googleapis.com/ 94 | ``` 95 | 96 | 2. Install the NGINX ingress using Helm 3. 97 | 98 | ```Powershell 99 | helm install nginx-ingress stable/nginx-ingress 100 | ``` 101 | 102 | *Note:* You still must manage your DNS configuration as well. Mapping an ingress to `example.com` will not work if you do not also control the domain mapping for `example.com`. 103 | 104 | ### Autoscaler 105 | 106 | To use the autoscaler trait, you must install a controller for Kubernetes `HorizontalPodAutoscaler`. We recommend [KEDA](https://hub.helm.sh/charts/kedacore/keda). 107 | 108 | 1. First, add the KEDA repo to your Helm installation. 109 | 110 | ```Powershell 111 | helm repo add kedacore https://kedacore.github.io/charts 112 | ``` 113 | 114 | 2. Update your Helm repo. 115 | 116 | ```Powershell 117 | helm repo update 118 | ``` 119 | 120 | 2. Install KEDA on your cluster. 121 | 122 | ```Powershell 123 | helm install keda kedacore/keda 124 | ``` 125 | 126 | ## Next Steps 127 | 128 | Deploy a sample Rudr application using the [tutorial](../tutorials/deploy_and_update.md). 129 | 130 | ## Appendix 131 | 132 | You could check the [appendix doc](appendix.md) to find more information. -------------------------------------------------------------------------------- /docs/tutorials/deploy_prometheus_workload.md: -------------------------------------------------------------------------------- 1 | # Prometheus Extended Workload 2 | 3 | ## Deploy Prometheus Operator 4 | 5 | ```shell script 6 | $ kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/master/bundle.yaml 7 | ``` 8 | 9 | ## Define the Prometheus component 10 | 11 | The component must have a workloadType combined with `GROUP/VERSION.KIND`, so the Non-Intrusive Workload will find which custom resource to create. 12 | 13 | Then put the whole spec in the workloadSettings value with a name called `spec` like below. 14 | 15 | ```yaml 16 | apiVersion: core.oam.dev/v1alpha1 17 | kind: ComponentSchematic 18 | metadata: 19 | name: prometheus 20 | spec: 21 | workloadType: monitoring.coreos.com/v1.Prometheus 22 | osType: linux 23 | workloadSettings: 24 | - name: spec 25 | type: object 26 | description: the spec of prometheus-operator 27 | required: true 28 | value: 29 | serviceAccountName: default 30 | serviceMonitorSelector: 31 | matchLabels: 32 | team: frontend 33 | resources: 34 | requests: 35 | memory: 400Mi 36 | enableAdminAPI: true 37 | ``` 38 | 39 | ## Prepare the application configuration 40 | 41 | The application configuration just need to use this component. 42 | 43 | ```yaml 44 | apiVersion: core.oam.dev/v1alpha1 45 | kind: ApplicationConfiguration 46 | metadata: 47 | name: prometheus 48 | spec: 49 | components: 50 | - componentName: prometheus 51 | instanceName: prometheus-app 52 | ``` 53 | 54 | ## Apply our configurations 55 | 56 | ```shell script 57 | $ kubectl apply -f examples/prometheusapp.yaml 58 | componentschematic.core.oam.dev/prometheus created 59 | applicationconfiguration.core.oam.dev/prometheus created 60 | ``` 61 | 62 | we could see that an Prometheus Operator CR was created by rudr. 63 | 64 | ```shell script 65 | $ kubectl get prometheuses 66 | NAME AGE 67 | prometheus-app 37s 68 | ``` 69 | 70 | Then the Prometheus operator we create an real Prometheus app described by the CR. 71 | 72 | ```shell script 73 | $ kubectl get statefulset 74 | NAME READY AGE 75 | prometheus-prometheus-app 1/1 6m21s 76 | ``` 77 | 78 | You could change the component spec as you like if you want. 79 | 80 | ## TODO 81 | 82 | - [ ] Register and generate serving information for this Prometheus workload for other workload to consume. 83 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Overview of Samples 2 | 3 | This directory contains samples and examples used throughout the Rudr documentation. This document outlines what each sample attempts to show and can be used as a reference for. 4 | 5 | | File Name | Description 6 | |-|-| 7 | | voting.yaml | This folder contains manifests for the canonical voting sample. It is a multi-component application that uses environment variables and the ingress trait. | 8 | | autoscaler.yaml| This is an example of how to use the autoscaler trait. | 9 | | components.yaml| General components that are used in quickstarts, how-to's and instantiated by the app configurations in this folder. | 10 | | env-vars.yaml| This is an example of how to specify environment variables for your containers. | 11 | | first-app-config.yaml| This is the configuration applied to the `nginx-component` as part of the quickstarts. | 12 | | helloworld-python-component.yaml| This component is the complete version of the **Create Component from Scratch** [guide](../docs/how-to/create_component_from_scratch.md) | 13 | | image-pull-secret.yaml| This is an example of how to specify secrets for private registries| 14 | | manual-scaler.yaml| This is an example of to manually specify replica counts for components. | 15 | | multi-component.yaml| This is an example of how to specify multiple components in one manifest. | 16 | | multi-server.yaml| This is an example of an app configuration instantiating multiple components. | 17 | | nginx-component.yaml| Basic NGINX component used for quickstarts. | 18 | | replicable-task.yaml| This is an example of an app config instantiating replicable tasks.| 19 | | task.yaml| This is an example of an app config instantating a simple task to completion. | 20 | | volumes.yaml| This is an example of how to specify volumes required in the component schematics. | 21 | | worker.yaml| This is an example of how to instantiate a worker component. | 22 | 23 | -------------------------------------------------------------------------------- /examples/autoscaler.yaml: -------------------------------------------------------------------------------- 1 | # This will create a Horizontal Pod Autoscaler, but your cluster must have 2 | # an implementation of an HPA before this will actually cause autoscaling. 3 | apiVersion: core.oam.dev/v1alpha1 4 | kind: ApplicationConfiguration 5 | metadata: 6 | name: autoscaler-example 7 | spec: 8 | components: 9 | - componentName: hpa-example-replicated-v1 10 | instanceName: autoscaled-repsvc 11 | parameterValues: 12 | - name: poet 13 | value: Eliot 14 | - name: poem 15 | value: The Wasteland 16 | traits: 17 | - name: auto-scaler 18 | properties: 19 | maximum : 6 20 | minimum : 2 21 | cpu : 50 22 | memory : 50 23 | -------------------------------------------------------------------------------- /examples/charts/README.md: -------------------------------------------------------------------------------- 1 | # Example Charts of OAM Apps 2 | 3 | This directory contains example Helm charts that install Open Application Model apps. 4 | 5 | Helm is a useful tool for parameterizing AppConfig files. There are various strategies for installing ComponentSchematics: 6 | 7 | - They may be bundled into the same chart that manages them, and treated like standard resources 8 | - When this chart is upgraded or deleted, components will be updated or deleted 9 | - This can be bad if multiple apps share the same components 10 | - This can be good if your app configs and components are closely related 11 | - They may be bundled into the same chart that references them, but managed with hooks 12 | - You can configure hooks to not delete components 13 | - This solves some of the problems above 14 | - They may be kept in separate Helm charts 15 | - This model is best if you want to have lots of components that can be shared among different app configs 16 | 17 | 18 | ## Installing OAM Apps 19 | 20 | These examples are built for Helm 3. They can be installed with the following command: 21 | 22 | ```console 23 | $ helm install my-hello hello-rudr 24 | ``` 25 | 26 | They can be uninstalled with `helm delete my-hello`. 27 | -------------------------------------------------------------------------------- /examples/charts/hello-rudr/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /examples/charts/hello-rudr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: hello-rudr 3 | description: Demonstration of using Helm with Rudr, using the Python example. 4 | type: application 5 | version: 0.1.0 6 | appVersion: 0.1.0 7 | -------------------------------------------------------------------------------- /examples/charts/hello-rudr/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | This chart is an example of using Helm to install rudr applications. -------------------------------------------------------------------------------- /examples/charts/hello-rudr/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "hello-rudr.name" -}} 6 | {{/* - trunc 63 .Chart.Name | trimSuffix "-" - */}} 7 | {{ .Chart.name }} 8 | {{- end -}} 9 | 10 | {{/* 11 | Create a default fully qualified app name. 12 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 13 | If release name contains chart name it will be used as a full name. 14 | */}} 15 | {{- define "hello-rudr.fullname" -}} 16 | {{- if .Values.fullnameOverride -}} 17 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 18 | {{- else -}} 19 | {{- $name := default .Chart.Name .Values.nameOverride -}} 20 | {{- if contains $name .Release.Name -}} 21 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 22 | {{- else -}} 23 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 24 | {{- end -}} 25 | {{- end -}} 26 | {{- end -}} 27 | 28 | {{/* 29 | Create chart name and version as used by the chart label. 30 | */}} 31 | {{- define "hello-rudr.chart" -}} 32 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 33 | {{- end -}} 34 | 35 | {{/* 36 | Common labels 37 | */}} 38 | {{- define "hello-rudr.labels" -}} 39 | app.kubernetes.io/name: {{ include "hello-rudr.name" . }} 40 | helm.sh/chart: {{ include "hello-rudr.chart" . }} 41 | app.kubernetes.io/instance: {{ .Release.Name }} 42 | {{- if .Chart.AppVersion }} 43 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 44 | {{- end }} 45 | app.kubernetes.io/managed-by: {{ .Release.Service }} 46 | {{- end -}} 47 | -------------------------------------------------------------------------------- /examples/charts/hello-rudr/templates/appconfig.yaml: -------------------------------------------------------------------------------- 1 | kind: ApplicationConfiguration 2 | apiVersion: core.oam.dev/v1alpha1 3 | metadata: 4 | name: "{{ .Release.Name }}" 5 | spec: 6 | components: 7 | - componentName: "{{ .Release.Name }}-{{ .Values.appVersion}}" 8 | instanceName: "{{ .Release.Name }}-{{ .Values.appVersion}}" 9 | parameterValues: 10 | - name: target 11 | value: "{{ .Values.target }}" 12 | - name: port 13 | value: "{{ .Values.port }}" 14 | traits: 15 | - name: ingress 16 | properties: 17 | hostname: example.com 18 | path: / 19 | servicePort: {{ .Values.port }} 20 | - name: manual-scaler 21 | properties: 22 | replicaCount: {{ .Values.replicaCount }} 23 | -------------------------------------------------------------------------------- /examples/charts/hello-rudr/templates/component.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ComponentSchematic 3 | metadata: 4 | name: "{{.Release.Name}}-{{.Values.appVersion}}" 5 | spec: 6 | workloadType: core.oam.dev/v1alpha1.Server 7 | containers: 8 | - name: foo 9 | image: oamdev/helloworld-python:v1 10 | env: 11 | - name: TARGET 12 | fromParam: target 13 | - name: PORT 14 | fromParam: port 15 | ports: 16 | - protocol: TCP 17 | containerPort: 9999 18 | name: http 19 | parameters: 20 | - name: target 21 | type: string 22 | default: World 23 | - name: port 24 | type: string 25 | default: "9999" 26 | -------------------------------------------------------------------------------- /examples/charts/hello-rudr/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for hello-rudr. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | appVersion: v1 7 | target: Rudr 8 | port: "9999" 9 | -------------------------------------------------------------------------------- /examples/components.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ComponentSchematic 3 | metadata: 4 | name: hpa-example-replicated-v1 5 | spec: 6 | workloadType: core.oam.dev/v1alpha1.Server 7 | containers: 8 | - name: server 9 | image: k8s.gcr.io/hpa-example:latest 10 | ports: 11 | - name: http 12 | containerPort: 80 13 | protocol: TCP 14 | resources: 15 | cpu: 16 | required: 0.5 17 | memory: 18 | required: "128" 19 | --- 20 | apiVersion: core.oam.dev/v1alpha1 21 | kind: ComponentSchematic 22 | metadata: 23 | name: nginx-replicated-v1 24 | spec: 25 | workloadType: core.oam.dev/v1alpha1.Server 26 | containers: 27 | - name: server 28 | image: nginx:latest 29 | ports: 30 | - name: http 31 | containerPort: 80 32 | protocol: TCP 33 | resources: 34 | cpu: 35 | required: 0.1 36 | memory: 37 | required: "128" 38 | --- 39 | apiVersion: core.oam.dev/v1alpha1 40 | kind: ComponentSchematic 41 | metadata: 42 | name: nginx-singleton-v1 43 | spec: 44 | workloadType: core.oam.dev/v1alpha1.SingletonServer 45 | containers: 46 | - name: server 47 | image: nginx:latest 48 | ports: 49 | - name: http 50 | containerPort: 80 51 | protocol: TCP 52 | resources: 53 | cpu: 54 | required: 0.1 55 | memory: 56 | required: "128" 57 | --- 58 | apiVersion: core.oam.dev/v1alpha1 59 | kind: ComponentSchematic 60 | metadata: 61 | name: alpine-task-v1 62 | spec: 63 | workloadType: core.oam.dev/v1alpha1.Task 64 | osType: linux 65 | containers: 66 | - name: runner 67 | image: alpine:latest 68 | resources: 69 | cpu: 70 | required: 0.1 71 | memory: 72 | required: "128" 73 | --- 74 | apiVersion: core.oam.dev/v1alpha1 75 | kind: ComponentSchematic 76 | metadata: 77 | name: alpine-singleton-task-v1 78 | spec: 79 | workloadType: core.oam.dev/v1alpha1.SingletonTask 80 | osType: linux 81 | containers: 82 | - name: runner 83 | image: alpine:latest 84 | resources: 85 | cpu: 86 | required: 0.1 87 | memory: 88 | required: "128" 89 | --- 90 | apiVersion: core.oam.dev/v1alpha1 91 | kind: ComponentSchematic 92 | metadata: 93 | name: alpine-singleton-worker-v1 94 | spec: 95 | workloadType: core.oam.dev/v1alpha1.SingletonWorker 96 | osType: linux 97 | containers: 98 | - name: worker 99 | image: nginx:latest 100 | resources: 101 | cpu: 102 | required: 0.1 103 | memory: 104 | required: "128" 105 | --- 106 | apiVersion: core.oam.dev/v1alpha1 107 | kind: ComponentSchematic 108 | metadata: 109 | name: alpine-worker-v1 110 | spec: 111 | workloadType: core.oam.dev/v1alpha1.Worker 112 | osType: linux 113 | containers: 114 | - name: worker 115 | image: nginx:latest 116 | resources: 117 | cpu: 118 | required: 0.1 119 | memory: 120 | required: "128" 121 | -------------------------------------------------------------------------------- /examples/env-vars.yaml: -------------------------------------------------------------------------------- 1 | ## This is an example of using parameters, parameter values, and env vars. 2 | ## The component definition defines the parameters and maps them to 3 | ## env vars. Then the application configuration overrides the component's 4 | ## default values. 5 | ## 6 | ## Once you have installed this, you can verify that the env vars were 7 | ## set correctly by doing: 8 | ## 9 | ## $ kubectl get pod -o yaml example-alpine-forever 10 | ## 11 | ## You can get that pod's logs to see the results of injecting the FOO 12 | ## variable. 13 | apiVersion: core.oam.dev/v1alpha1 14 | kind: ComponentSchematic 15 | metadata: 16 | name: alpine-forever-v1 17 | spec: 18 | workloadType: core.oam.dev/v1alpha1.SingletonServer 19 | parameters: 20 | - name: message 21 | type: string 22 | required: false 23 | - name: unused_integer 24 | type: number 25 | required: false 26 | default: 5678 27 | containers: 28 | - name: runner 29 | image: technosophos/alpine-forever:latest 30 | env: 31 | - name: FOO 32 | value: bar 33 | fromParam: message 34 | - name: UNUSED 35 | value: "1234" 36 | fromParam: unused_integer 37 | --- 38 | apiVersion: core.oam.dev/v1alpha1 39 | kind: ApplicationConfiguration 40 | metadata: 41 | name: example-env-vars 42 | spec: 43 | components: 44 | - componentName: alpine-forever-v1 45 | instanceName: example-alpine-forever 46 | parameterValues: 47 | - name: message 48 | value: Hello World 49 | -------------------------------------------------------------------------------- /examples/first-app-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationConfiguration 3 | metadata: 4 | name: first-app 5 | spec: 6 | components: 7 | - componentName: helloworld-python-v1 8 | instanceName: first-app-helloworld-python-v1 9 | parameterValues: 10 | - name: target 11 | value: Rudr 12 | - name: port 13 | value: "9999" 14 | traits: 15 | - name: ingress 16 | properties: 17 | hostname: example.com 18 | path: / 19 | servicePort: 9999 20 | -------------------------------------------------------------------------------- /examples/health-scope-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationConfiguration 3 | metadata: 4 | name: my-health-scope 5 | spec: 6 | scopes: 7 | - name: health 8 | type: core.oam.dev/v1alpha1.HealthScope 9 | properties: 10 | - name: probe-method 11 | value: "kube-get" 12 | - name: probe-endpoint 13 | value: ".status" 14 | - name: probe-timeout 15 | value: 30 16 | - name: probe-interval 17 | value: 60 18 | - name: failure-rate-threshold 19 | value: 0 20 | - name: healthy-rate-threshold 21 | value: 100.0 22 | - name: healthThresholdPercentage 23 | value: 100.0 -------------------------------------------------------------------------------- /examples/helloworld-python-component.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ComponentSchematic 3 | metadata: 4 | name: helloworld-python-v1 5 | spec: 6 | workloadType: core.oam.dev/v1alpha1.Server 7 | containers: 8 | - name: foo 9 | image: oamdev/helloworld-python:v1 10 | env: 11 | - name: TARGET 12 | fromParam: target 13 | - name: PORT 14 | fromParam: port 15 | ports: 16 | - protocol: TCP 17 | containerPort: 9999 18 | name: http 19 | resources: 20 | cpu: 21 | required: 0.1 22 | memory: 23 | required: "128" 24 | parameters: 25 | - name: target 26 | type: string 27 | default: World 28 | - name: port 29 | type: string 30 | default: "9999" 31 | -------------------------------------------------------------------------------- /examples/image-pull-secret.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # To run this example, first create an image pull secret named 'example-image-pull-secret' 3 | # https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 4 | # kubectl create secret docker-registry example-image-pull-secret --docker-server= --docker-username= --docker-password= --docker-email= 5 | # 6 | 7 | apiVersion: core.oam.dev/v1alpha1 8 | kind: ComponentSchematic 9 | metadata: 10 | name: image-pull-secret-example-v1 11 | spec: 12 | workloadType: core.oam.dev/v1alpha1.Task 13 | osType: linux 14 | containers: 15 | - name: runner 16 | image: alpine:latest 17 | imagePullSecret: example-image-pull-secret 18 | resources: 19 | cpu: 20 | required: "0.1" 21 | memory: 22 | required: "128" 23 | --- 24 | apiVersion: core.oam.dev/v1alpha1 25 | kind: ApplicationConfiguration 26 | metadata: 27 | name: example-image-pull-secret-task 28 | spec: 29 | components: 30 | - componentName: image-pull-secret-example-v1 31 | instanceName: one-image-pull-secret-task 32 | parameterValues: 33 | - name: message 34 | value: Hello World 35 | -------------------------------------------------------------------------------- /examples/kustomize/hello-rudr/base/appconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationConfiguration 3 | metadata: 4 | name: first-app 5 | spec: 6 | components: 7 | - componentName: helloworld-python-v1 8 | instanceName: first-app-helloworld-python-v1 9 | parameterValues: 10 | - name: target 11 | value: Rudr 12 | - name: port 13 | value: "9999" 14 | traits: 15 | - name: ingress 16 | properties: 17 | hostname: example.com 18 | path: / 19 | servicePort: 9999 20 | applicationScopes: 21 | - my-health-scope 22 | -------------------------------------------------------------------------------- /examples/kustomize/hello-rudr/base/component.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ComponentSchematic 3 | metadata: 4 | name: helloworld-python-v1 5 | spec: 6 | workloadType: core.oam.dev/v1alpha1.Server 7 | containers: 8 | - name: foo 9 | image: oamdev/helloworld-python:v1 10 | env: 11 | - name: TARGET 12 | fromParam: target 13 | - name: PORT 14 | fromParam: port 15 | ports: 16 | - protocol: TCP 17 | containerPort: 9999 18 | name: http 19 | parameters: 20 | - name: target 21 | type: string 22 | default: World 23 | - name: port 24 | type: string 25 | default: "9999" 26 | -------------------------------------------------------------------------------- /examples/kustomize/hello-rudr/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - appconfig.yaml 3 | - component.yaml 4 | -------------------------------------------------------------------------------- /examples/kustomize/hello-rudr/overlay/production/kustomization.yaml: -------------------------------------------------------------------------------- 1 | namePrefix: production- 2 | commonLabels: 3 | variant: production 4 | resources: 5 | - ../../base/ 6 | patchesStrategicMerge: 7 | - patch.yaml 8 | -------------------------------------------------------------------------------- /examples/kustomize/hello-rudr/overlay/production/patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationConfiguration 3 | metadata: 4 | name: first-app 5 | spec: 6 | components: 7 | - componentName: helloworld-python-v1 8 | instanceName: patched-app 9 | parameterValues: 10 | - name: target 11 | value: Hello 12 | - name: port 13 | value: "8888" 14 | -------------------------------------------------------------------------------- /examples/manual-scaler.yaml: -------------------------------------------------------------------------------- 1 | # Manual scaling is enabled by trait. 2 | # This example shows how to apply a manual scaler to a replicatable service. 3 | apiVersion: core.oam.dev/v1alpha1 4 | kind: ApplicationConfiguration 5 | metadata: 6 | name: manual-scaler-example 7 | spec: 8 | components: 9 | - componentName: nginx-replicated-v1 10 | instanceName: scaled-repsvc 11 | parameterValues: 12 | - name: poet 13 | value: Eliot 14 | - name: poem 15 | value: The Wasteland 16 | traits: 17 | - name: manual-scaler 18 | properties: 19 | replicaCount: 3 20 | -------------------------------------------------------------------------------- /examples/multi-component.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationConfiguration 3 | metadata: 4 | name: example-multi-component 5 | spec: 6 | components: 7 | - componentName: nginx-singleton-v1 8 | instanceName: example-multi-component-nginx 9 | parameterValues: 10 | - name: poet 11 | value: Eliot 12 | - name: poem 13 | value: The Wasteland 14 | traits: 15 | - name: ingress 16 | parameterValues: 17 | hostname: example.com 18 | path: / 19 | - componentName: alpine-task-v1 20 | instanceName: example-multi-component-alpine 21 | parameterValues: 22 | - name: message 23 | value: Hello World 24 | -------------------------------------------------------------------------------- /examples/multi-server.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationConfiguration 3 | metadata: 4 | name: example-multi-server 5 | spec: 6 | components: 7 | - componentName: nginx-singleton-v1 8 | instanceName: multi-singleton-1 9 | traits: 10 | - name: ingress 11 | parameterValues: 12 | hostname: example.com 13 | path: / 14 | - componentName: nginx-singleton-v1 15 | instanceName: multi-singleton-2 16 | -------------------------------------------------------------------------------- /examples/nginx-component.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ComponentSchematic 3 | metadata: 4 | name: nginx-component-v1 5 | spec: 6 | workloadType: core.oam.dev/v1alpha1.SingletonServer 7 | osType: linux 8 | arch: amd64 9 | containers: 10 | - name: foo 11 | image: nginx:latest 12 | cmd: 13 | - nginx-debug 14 | args: 15 | - "-g" 16 | - "daemon off;" 17 | env: 18 | - name: TEST 19 | value: FOO 20 | config: 21 | - path: "/etc/access/default_user.txt" 22 | value: "admin" 23 | - path: "/etc/run/db-data" 24 | fromParam: "poet" 25 | ports: 26 | - protocol: TCP 27 | containerPort: 80 28 | name: http 29 | resources: 30 | cpu: 31 | required: 0.1 32 | memory: 33 | required: "128" 34 | parameters: 35 | - name: poet 36 | type: string 37 | default: Yeats 38 | - name: poem 39 | type: string 40 | default: The Second Coming 41 | -------------------------------------------------------------------------------- /examples/openfaasapp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ComponentSchematic 3 | metadata: 4 | name: openfaas 5 | spec: 6 | workloadType: openfaas.com/v1alpha2.Function 7 | osType: linux 8 | parameters: 9 | - name: image 10 | type: string 11 | default: alexellisuk/go-echo:0.2.2 12 | - name: handler 13 | type: string 14 | default: "" 15 | - name: write_debug 16 | type: string 17 | default: "true" 18 | workloadSettings: 19 | - name: image 20 | type: string 21 | description: docker image name of this function 22 | fromParam: image 23 | value: alexellisuk/go-echo:0.2.2 24 | required: true 25 | - name: handler 26 | type: string 27 | description: entrypoint of your function, eg. node main.js 28 | required: false 29 | fromParam: handler 30 | - name: environment 31 | type: array 32 | description: environments of functions 33 | value: 34 | - name: write_debug 35 | type: string 36 | value: "true" 37 | fromParam: write_debug 38 | 39 | --- 40 | 41 | apiVersion: core.oam.dev/v1alpha1 42 | kind: ApplicationConfiguration 43 | metadata: 44 | name: openfaas 45 | spec: 46 | components: 47 | - componentName: openfaas 48 | instanceName: nodeinfo 49 | parameterValues: 50 | - name: image 51 | value: functions/nodeinfo 52 | - name: handler 53 | value: "node main.js" 54 | - name: write_debug 55 | value: "false" 56 | traits: 57 | - name: manual-scaler 58 | properties: 59 | replicaCount: 2 -------------------------------------------------------------------------------- /examples/prometheusapp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ComponentSchematic 3 | metadata: 4 | name: prometheus 5 | spec: 6 | workloadType: monitoring.coreos.com/v1.Prometheus 7 | osType: linux 8 | workloadSettings: 9 | - name: spec 10 | type: object 11 | description: the spec of prometheus-operator 12 | required: true 13 | value: 14 | serviceAccountName: default 15 | serviceMonitorSelector: 16 | matchLabels: 17 | team: frontend 18 | resources: 19 | requests: 20 | memory: 100Mi 21 | enableAdminAPI: true 22 | 23 | --- 24 | apiVersion: core.oam.dev/v1alpha1 25 | kind: ApplicationConfiguration 26 | metadata: 27 | name: prometheus 28 | spec: 29 | components: 30 | - componentName: prometheus 31 | instanceName: prometheus-ap -------------------------------------------------------------------------------- /examples/replicable-task.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationConfiguration 3 | metadata: 4 | name: example-replicable-task 5 | spec: 6 | components: 7 | - componentName: alpine-task-v1 8 | instanceName: multi-alpine-task 9 | parameterValues: 10 | - name: message 11 | value: Hello World 12 | traits: 13 | - name: manual-scaler 14 | properties: 15 | replicaCount: 3 16 | -------------------------------------------------------------------------------- /examples/singleton-nginx-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationConfiguration 3 | metadata: 4 | name: singleton-nginx-app 5 | spec: 6 | components: 7 | - componentName: alpine-singleton-worker-v1 8 | instanceName: singleton-nginx-app 9 | traits: 10 | - name: ingress 11 | parameterValues: 12 | hostname: example.com 13 | path: / 14 | -------------------------------------------------------------------------------- /examples/task.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationConfiguration 3 | metadata: 4 | name: example-task 5 | spec: 6 | components: 7 | - componentName: alpine-singleton-task-v1 8 | instanceName: one-alpine-task 9 | parameterValues: 10 | - name: message 11 | value: Hello World 12 | -------------------------------------------------------------------------------- /examples/volumes.yaml: -------------------------------------------------------------------------------- 1 | # This is a stand-alone example of using a volume with a component. 2 | apiVersion: core.oam.dev/v1alpha1 3 | kind: ComponentSchematic 4 | metadata: 5 | name: server-with-volume-v1 6 | spec: 7 | workloadType: core.oam.dev/v1alpha1.Server 8 | containers: 9 | - name: server 10 | image: nginx:latest 11 | resources: 12 | volumes: 13 | - name: myvol 14 | mountPath: /myvol 15 | disk: 16 | required: "50M" 17 | ephemeral: false 18 | cpu: 19 | required: 0.1 20 | memory: 21 | required: "128" 22 | --- 23 | apiVersion: core.oam.dev/v1alpha1 24 | kind: ApplicationConfiguration 25 | metadata: 26 | name: example-server-with-volume 27 | spec: 28 | components: 29 | - componentName: server-with-volume-v1 30 | instanceName: example-server-with-volume 31 | traits: 32 | - name: volume-mounter 33 | properties: 34 | volumeName: myvol 35 | storageClass: default 36 | # TODO: Add trait to bind a PVC 37 | -------------------------------------------------------------------------------- /examples/voting/README.md: -------------------------------------------------------------------------------- 1 | # Voting Example 2 | 3 | This is the Docker voting application exhibited using the OAM schematics. 4 | 5 | ## Installation 6 | 7 | The first step is to install all of the component descriptions: 8 | 9 | ```console 10 | $ kubectl create -f components.yaml 11 | $ kubectl get components 12 | NAME AGE 13 | postgres 13s 14 | redis 12s 15 | voting-admin 13s 16 | voting-frontend 12s 17 | voting-worker 12s 18 | ``` 19 | 20 | Next, create a configuration: 21 | 22 | ```console 23 | $ kubectl create -f configuration.yaml 24 | ``` 25 | 26 | In a few minutes, you should have the entire voting application running. -------------------------------------------------------------------------------- /examples/voting/components.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ComponentSchematic 3 | metadata: 4 | name: voting-admin-v1 5 | annotations: 6 | version: "1.0.0" 7 | description: Voting results interface 8 | spec: 9 | workloadType: core.oam.dev/v1alpha1.Server 10 | containers: 11 | - name: server 12 | ports: 13 | - containerPort: 5001 14 | name: http 15 | protocol: TCP 16 | image: dockersamples/examplevotingapp_result:before 17 | --- 18 | apiVersion: core.oam.dev/v1alpha1 19 | kind: ComponentSchematic 20 | metadata: 21 | name: postgres-v9 22 | annotations: 23 | version: "1.0.0" 24 | description: PostgreSQL Database 25 | spec: 26 | workloadType: core.oam.dev/v1alpha1.SingletonServer 27 | containers: 28 | - name: db 29 | ports: 30 | - containerPort: 5432 31 | name: pgsql 32 | protocol: TCP 33 | image: postgres:9.4 34 | --- 35 | apiVersion: core.oam.dev/v1alpha1 36 | kind: ComponentSchematic 37 | metadata: 38 | name: redis-v1 39 | annotations: 40 | version: "1.0.0" 41 | description: Redis single-node 42 | spec: 43 | workloadType: core.oam.dev/v1alpha1.SingletonServer 44 | containers: 45 | - name: cache 46 | ports: 47 | - containerPort: 6379 48 | name: redis 49 | protocol: TCP 50 | image: redis:alpine 51 | --- 52 | apiVersion: core.oam.dev/v1alpha1 53 | kind: ComponentSchematic 54 | metadata: 55 | name: voting-frontend-v1 56 | annotations: 57 | version: "1.0.0" 58 | description: Voting front-end webserver 59 | spec: 60 | workloadType: core.oam.dev/v1alpha1.Server 61 | containers: 62 | - name: server 63 | ports: 64 | - containerPort: 5000 65 | name: http 66 | protocol: TCP 67 | image: dockersamples/examplevotingapp_vote:before 68 | --- 69 | apiVersion: core.oam.dev/v1alpha1 70 | kind: ComponentSchematic 71 | metadata: 72 | name: voting-worker-v1 73 | annotations: 74 | version: "1.0.0" 75 | description: Worker for tallying voting results 76 | spec: 77 | workloadType: core.oam.dev/v1alpha1.Server 78 | containers: 79 | - name: server 80 | image: dockersamples/examplevotingapp_worker 81 | --- 82 | 83 | -------------------------------------------------------------------------------- /examples/voting/configuration.yaml: -------------------------------------------------------------------------------- 1 | # This is a single-shot configuration that creates the entire Docker Voting app 2 | # all in one go. 3 | apiVersion: core.oam.dev/v1alpha1 4 | kind: ApplicationConfiguration 5 | metadata: 6 | name: voter-app 7 | spec: 8 | components: 9 | - componentName: postgres-v9 10 | instanceName: voter-pg 11 | - componentName: redis-v1 12 | instanceName: voter-redis 13 | - componentName: voting-worker-v1 14 | instanceName: voter-worker 15 | - componentName: voting-frontend-v1 16 | instanceName: voter-fe 17 | traits: 18 | - name: ingress 19 | properties: 20 | hostname: voting.example.com 21 | path: / 22 | - componentName: voting-admin-v1 23 | instanceName: voter-admin 24 | traits: 25 | - name: ingress 26 | properties: 27 | hostname: admin.example.com 28 | path: / 29 | -------------------------------------------------------------------------------- /examples/worker.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: core.oam.dev/v1alpha1 2 | kind: ApplicationConfiguration 3 | metadata: 4 | name: worker-app 5 | spec: 6 | components: 7 | - componentName: alpine-worker-v1 8 | instanceName: worker-nginx 9 | parameterValues: 10 | - name: poet 11 | value: Eliot 12 | - name: poem 13 | value: The Wasteland 14 | traits: 15 | - name: manual-scaler 16 | properties: 17 | replicaCount: 2 18 | -------------------------------------------------------------------------------- /governance.md: -------------------------------------------------------------------------------- 1 | # Governance 2 | 3 | ## LGTM Policy 4 | 5 | Every code change committed to Rudr must be reviewed by at least one project maintainer who did not author the PR. When a PR has been marked `Approved` by a project maintainer and passes all mandatory gates (including the contributor license agreement verification), the PR can be merged by any project maintainer. 6 | 7 | ## Project Maintainers 8 | [Project maintainers](CODEOWNERS) are responsible for activities around maintaining and updating Rudr. Final decisions on the project reside with the project maintainers. 9 | 10 | Maintainers MUST remain active. If they are unresponsive for >3 months, they will be automatically removed unless a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) of the other project maintainers agrees to extend the period to be greater than 3 months. 11 | 12 | New maintainers can be added to the project by a [super-majority](https://en.wikipedia.org/wiki/Supermajority#Two-thirds_vote) vote of the existing maintainers. 13 | 14 | A maintainer may step down by submitting an [issue](https://github.com/oam-dev/rudr/issues/new) stating their intent. 15 | 16 | ## Code of Conduct 17 | This project has adopted the [Microsoft Open Source Code of conduct](https://opensource.microsoft.com/codeofconduct/). 18 | For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. 19 | -------------------------------------------------------------------------------- /healthscope/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "healthscope" 3 | version = "0.1.0" 4 | authors = ["天元 "] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | kube = { version = "0.16.1", features = ["openapi"] } 11 | k8s-openapi = { version = "0.5.1", features = ["v1_15"] } 12 | serde = "1.0" 13 | serde_derive = "1.0" 14 | serde_json = "1.0" 15 | failure = "0.1.5" 16 | futures = "0.1.21" 17 | spectral = "0.6" 18 | reqwest = "0.9.17" 19 | log = "0.4" 20 | env_logger = "0.6.1" 21 | hyper = "0.12" 22 | clap = "~2.33" 23 | chrono = "0.4" 24 | rudr = { path = '../' } 25 | -------------------------------------------------------------------------------- /healthscope/README.md: -------------------------------------------------------------------------------- 1 | # Health Scope Controller 2 | 3 | HealthScope controller is used to periodically check the health of all the components and update health scope custom resources. 4 | 5 | ## What will health scope controller do? 6 | 7 | 1. periodically check health status of components and update the HealthScope resource status. 8 | 2. serve as a http server, to output aggregated health information. 9 | 10 | ## How to install? 11 | 12 | Use helm to install: 13 | 14 | ```shell script 15 | helm install healthscope ./charts/healthscope 16 | ``` 17 | 18 | ## How to use? 19 | 20 | After HealthScope controller is installed by helm charts, setup an endpoint to it. 21 | By default the charts will use ClusterPort, we can use port-forward to access HealthScope controller. 22 | 23 | ```shell script 24 | export POD_NAME=$(kubectl get pods -l "app.kubernetes.io/name=healthscope,app.kubernetes.io/instance=health" -o jsonpath="{.items[0].metadata.name}") 25 | kubectl port-forward $POD_NAME 8080:80 26 | ``` 27 | 28 | Then we'll be able to visit http://127.0.0.1:8080 to query status of health scope resource. 29 | 30 | ### Create health scope instance using application configuration 31 | 32 | If you want to use health scope, you should create a health scope instance first, using Application Configuration. 33 | 34 | A health scope instance is like below: 35 | 36 | ```yaml 37 | apiVersion: core.oam.dev/v1alpha1 38 | kind: ApplicationConfiguration 39 | metadata: 40 | name: my-health-scope 41 | spec: 42 | scopes: 43 | - name: health 44 | type: core.oam.dev/v1alpha1.HealthScope 45 | properties: 46 | - name: probe-method 47 | value: "kube-get" 48 | - name: probe-endpoint 49 | value: ".status" 50 | - name: probe-timeout 51 | value: 30 52 | - name: probe-interval 53 | value: 60 54 | - name: failure-rate-threshold 55 | value: 0 56 | - name: healthy-rate-threshold 57 | value: 100.0 58 | - name: healthThresholdPercentage 59 | value: 100.0 60 | ``` 61 | 62 | You can also find this example here: [examples/health-scope-config.yaml](../examples/health-scope-config.yaml). 63 | 64 | Apply this yaml: 65 | 66 | ```shell script 67 | $ kubectl apply -f examples/health-scope-config.yaml 68 | applicationconfiguration.core.oam.dev/my-health-scope created 69 | ``` 70 | 71 | Then you will find a health scope instance was created: 72 | 73 | ```shell script 74 | $ kubectl get health 75 | NAME AGE 76 | my-health-scope 31s 77 | ``` 78 | 79 | You could get more details from: 80 | 81 | ```shell script 82 | $ kubectl get health my-health-scope -o yaml 83 | apiVersion: core.oam.dev/v1alpha1 84 | kind: HealthScope 85 | metadata: 86 | creationTimestamp: "2019-10-20T09:42:30Z" 87 | generation: 3 88 | name: my-health-scope 89 | namespace: default 90 | ownerReferences: 91 | - apiVersion: core.oam.dev/v1alpha1 92 | blockOwnerDeletion: true 93 | controller: true 94 | kind: ApplicationConfiguration 95 | name: my-health-scope 96 | uid: 1c113398-16f2-4aa1-9dc0-cd05a686d17d 97 | resourceVersion: "2465881" 98 | selfLink: /apis/core.oam.dev/v1alpha1/namespaces/default/healthscopes/my-health-scope 99 | uid: ba3fd01c-9ba6-4e1a-93c0-b96d102700af 100 | spec: 101 | failureRateThreshold: 0 102 | healthyRateThreshold: 100 103 | probeEndpoint: .status 104 | probeInterval: 60 105 | probeMethod: kube-get 106 | probeTimeout: 30 107 | status: 108 | lastAggregateTimestamp: "2019-10-20T09:43:31.541142387+00:00" 109 | ``` 110 | 111 | Then we could use this health scope in other Application Configuration. 112 | 113 | ### Add Component to health scope 114 | 115 | We add application scopes to our [`first-app-config.yaml`](../examples/first-app-config.yaml). 116 | 117 | ```shell script 118 | apiVersion: core.oam.dev/v1alpha1 119 | kind: ApplicationConfiguration 120 | metadata: 121 | name: first-app 122 | spec: 123 | components: 124 | - componentName: helloworld-python-v1 125 | instanceName: first-app-helloworld-python-v1 126 | parameterValues: 127 | - name: target 128 | value: Rudr 129 | - name: port 130 | value: "9999" 131 | traits: 132 | - name: ingress 133 | properties: 134 | hostname: example.com 135 | path: / 136 | servicePort: 9999 137 | + applicationScopes: 138 | + - my-health-scope 139 | ``` 140 | 141 | Apply the config file: 142 | 143 | ```shell script 144 | $ kubectl apply -f examples/first-app-config.yaml 145 | applicationconfiguration.core.oam.dev/first-app created 146 | ``` 147 | 148 | You could check the scope like below: 149 | 150 | ```shell script 151 | $ kubectl get health -o yaml my-health-scope 152 | apiVersion: core.oam.dev/v1alpha1 153 | kind: HealthScope 154 | metadata: 155 | creationTimestamp: "2019-10-20T09:42:30Z" 156 | generation: 10 157 | name: my-health-scope 158 | namespace: default 159 | ownerReferences: 160 | - apiVersion: core.oam.dev/v1alpha1 161 | blockOwnerDeletion: true 162 | controller: true 163 | kind: ApplicationConfiguration 164 | name: my-health-scope 165 | uid: 1c113398-16f2-4aa1-9dc0-cd05a686d17d 166 | resourceVersion: "2466413" 167 | selfLink: /apis/core.oam.dev/v1alpha1/namespaces/default/healthscopes/my-health-scope 168 | uid: ba3fd01c-9ba6-4e1a-93c0-b96d102700af 169 | spec: 170 | failureRateThreshold: 0 171 | healthyRateThreshold: 100 172 | probeEndpoint: .status 173 | probeInterval: 60 174 | probeMethod: kube-get 175 | probeTimeout: 30 176 | status: 177 | components: 178 | - instanceName: first-app-helloworld-python-v1 179 | componentName: helloworld-python-v1 180 | status: healthy 181 | lastAggregateTimestamp: "2019-10-20T09:49:22.820141484+00:00" 182 | ``` 183 | 184 | The status indicates that we have successfully added our component to this scope. 185 | 186 | ### Visit health scope instance to check health 187 | 188 | Do you still remember our port mapping in the first step? Visit that url with our health scope instance: 189 | ``` 190 | $ curl 127.0.0.1:8080/my-health-scope 191 | healthy 192 | ``` 193 | 194 | Then you will find it's healthy now。 -------------------------------------------------------------------------------- /healthscope/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate chrono; 2 | extern crate futures; 3 | extern crate rudr; 4 | -------------------------------------------------------------------------------- /src/instigator_test.rs: -------------------------------------------------------------------------------- 1 | use crate::instigator::*; 2 | use crate::schematic::configuration::ComponentConfiguration; 3 | use std::collections::BTreeMap; 4 | 5 | #[test] 6 | fn test_config_owner_reference() { 7 | let name = "configuration".to_string(); 8 | let uid = "87707f2d-9ddc-11e9-b1c3-4ec16ac9a10f".to_string(); 9 | 10 | config_owner_reference(name.clone(), Some(uid.clone())) 11 | .and_then(|owner| { 12 | assert_eq!(owner.name, name); 13 | assert_eq!(owner.uid, uid); 14 | Ok(owner) 15 | }) 16 | .expect("expected owner reference"); 17 | } 18 | 19 | #[test] 20 | fn test_record_ann() { 21 | let records = None; 22 | let ann = get_record_annotation(records).expect("get_record_annotation from none"); 23 | assert_eq!(ann.len(), 0); 24 | let mut one: RecordAnnotation = BTreeMap::new(); 25 | let cr = ComponentRecord { 26 | version: "123".to_string(), 27 | config: ComponentConfiguration { 28 | component_name: "n123".to_string(), 29 | instance_name: "inst123".to_string(), 30 | parameter_values: None, 31 | traits: None, 32 | application_scopes: None, 33 | }, 34 | }; 35 | let cr2 = ComponentRecord { 36 | version: "321".to_string(), 37 | config: ComponentConfiguration { 38 | component_name: "n321".to_string(), 39 | instance_name: "inst321".to_string(), 40 | parameter_values: None, 41 | traits: None, 42 | application_scopes: None, 43 | }, 44 | }; 45 | one.insert("comp1".to_string(), cr.clone()); 46 | one.insert("comp2".to_string(), cr2.clone()); 47 | let json_str = &serde_json::to_string(&one).expect("record annotation json value"); 48 | let records = Some(json_str); 49 | let ann = get_record_annotation(records).expect("get_record_annotation from json"); 50 | assert_eq!(2, ann.len()); 51 | let crgot = ann.get("comp1").expect("comp1 ComponentRecord"); 52 | let crgot2 = ann.get("comp2").expect("comp2 ComponentRecord"); 53 | assert_eq!(crgot, &cr); 54 | assert_eq!(crgot2, &cr2); 55 | } 56 | 57 | #[test] 58 | fn test_check_diff() { 59 | let new_record = ComponentRecord { 60 | version: "123".to_string(), 61 | config: ComponentConfiguration { 62 | component_name: "test".to_string(), 63 | instance_name: "test_inst".to_string(), 64 | parameter_values: None, 65 | traits: None, 66 | application_scopes: None, 67 | }, 68 | }; 69 | let old_record = ComponentRecord { 70 | version: "123".to_string(), 71 | config: ComponentConfiguration { 72 | component_name: "test".to_string(), 73 | instance_name: "test_inst".to_string(), 74 | parameter_values: None, 75 | traits: None, 76 | application_scopes: None, 77 | }, 78 | }; 79 | 80 | assert_eq!(check_diff(None, &new_record), true); 81 | assert_eq!(check_diff(Some(old_record.clone()), &new_record), false); 82 | 83 | let new_record2 = ComponentRecord { 84 | version: "1234".to_string(), 85 | config: ComponentConfiguration { 86 | component_name: "test".to_string(), 87 | instance_name: "test_inst".to_string(), 88 | parameter_values: None, 89 | traits: None, 90 | application_scopes: None, 91 | }, 92 | }; 93 | assert_eq!(check_diff(Some(new_record2), &old_record), true); 94 | let new_record3 = ComponentRecord { 95 | version: "1234".to_string(), 96 | config: ComponentConfiguration { 97 | component_name: "test".to_string(), 98 | instance_name: "test_inst".to_string(), 99 | parameter_values: Some(vec![]), 100 | traits: None, 101 | application_scopes: None, 102 | }, 103 | }; 104 | assert_eq!(check_diff(Some(new_record3), &old_record), true); 105 | } 106 | 107 | #[test] 108 | fn test_combine_name() { 109 | let name = combine_name("component-a".to_string(), "instance-b".to_string()); 110 | assert_eq!("component-a-instance-b", name.as_str()) 111 | } 112 | -------------------------------------------------------------------------------- /src/kube_event.rs: -------------------------------------------------------------------------------- 1 | use failure::Error; 2 | use k8s_openapi::api::core::v1::ObjectReference; 3 | use kube::{api::Api, api::PostParams, client::APIClient}; 4 | use std::fmt; 5 | 6 | pub enum Type { 7 | Normal, 8 | Warning, 9 | } 10 | impl fmt::Display for Type { 11 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 12 | match self { 13 | Type::Normal => write!(f, "Normal"), 14 | Type::Warning => write!(f, "Warning"), 15 | } 16 | } 17 | } 18 | 19 | #[derive(Clone)] 20 | pub struct Event { 21 | pub client: APIClient, 22 | pub namespace: String, 23 | pub reporting_component: Option, // Name of the controller that emitted this Event,e.g. "oam.dev/rudr" 24 | pub reporting_instance: Option, //ID of the controller instance 25 | pub event_handle: Api, 26 | } 27 | 28 | pub struct Info { 29 | pub action: String, 30 | pub message: String, 31 | pub reason: String, 32 | } 33 | 34 | impl Event { 35 | pub fn new(client: APIClient, namespace: String) -> Self { 36 | Event { 37 | client: client.clone(), 38 | namespace: namespace.clone(), 39 | reporting_component: None, 40 | reporting_instance: None, 41 | event_handle: Api::v1Event(client).within(namespace.as_str()), 42 | } 43 | } 44 | fn make_event( 45 | now: chrono::DateTime, 46 | namespace: String, 47 | type_: Type, 48 | info: Info, 49 | involved_object: ObjectReference, 50 | reporting_component: Option, 51 | reporting_instance: Option, 52 | ) -> kube::api::v1Event { 53 | let name = format!( 54 | "{}.{:x}", 55 | involved_object.name.clone().unwrap(), 56 | now.timestamp_nanos(), 57 | ); 58 | kube::api::v1Event { 59 | metadata: kube::api::ObjectMeta { 60 | name, 61 | namespace: Some(namespace.clone()), 62 | ..Default::default() 63 | }, 64 | involvedObject: involved_object, 65 | reportingComponent: reporting_component.unwrap_or_else(|| "".to_string()), 66 | reportingInstance: reporting_instance.unwrap_or_else(|| "".to_string()), 67 | message: info.message, 68 | reason: info.reason, 69 | count: 1, 70 | type_: type_.to_string(), 71 | action: Some(info.action), 72 | eventTime: None, 73 | firstTimestamp: Some(k8s_openapi::apimachinery::pkg::apis::meta::v1::Time(now)), 74 | lastTimestamp: Some(k8s_openapi::apimachinery::pkg::apis::meta::v1::Time(now)), 75 | related: None, 76 | series: None, 77 | source: None, 78 | } 79 | } 80 | pub fn push_event_message( 81 | &self, 82 | type_: Type, 83 | info: Info, 84 | involved_object: ObjectReference, 85 | ) -> Result<(), Error> { 86 | let now = chrono::Utc::now(); 87 | let event = Event::make_event( 88 | now, 89 | self.namespace.clone(), 90 | type_, 91 | info, 92 | involved_object, 93 | self.reporting_component.clone(), 94 | self.reporting_instance.clone(), 95 | ); 96 | self.event_handle 97 | .create(&PostParams::default(), serde_json::to_vec(&event)?)?; 98 | Ok(()) 99 | } 100 | } 101 | 102 | #[test] 103 | fn test_make_event() { 104 | let now = chrono::Utc::now(); 105 | let ev = Event::make_event( 106 | now, 107 | "default".to_string(), 108 | Type::Normal, 109 | Info { 110 | action: "ac".to_string(), 111 | message: "ms".to_string(), 112 | reason: "re".to_string(), 113 | }, 114 | ObjectReference { 115 | api_version: Some("core.oam.dev/v1alpha1".to_string()), 116 | kind: None, 117 | name: Some("test".to_string()), 118 | field_path: None, 119 | namespace: None, 120 | resource_version: None, 121 | uid: None, 122 | }, 123 | None, 124 | None, 125 | ); 126 | assert_eq!(ev.count, 1); 127 | assert_eq!( 128 | ev.metadata.name, 129 | format!("test.{:x}", now.timestamp_nanos()) 130 | ); 131 | assert_eq!(ev.action, Some("ac".to_string())) 132 | } 133 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate failure; 3 | #[macro_use] 4 | extern crate serde_derive; 5 | #[macro_use] 6 | extern crate lazy_static; 7 | extern crate regex; 8 | 9 | pub mod instigator; 10 | pub mod kube_event; 11 | pub mod lifecycle; 12 | pub mod schematic; 13 | mod trait_manager; 14 | pub mod workload_type; 15 | 16 | #[cfg(test)] 17 | mod instigator_test; 18 | #[cfg(test)] 19 | mod lifecycle_test; 20 | #[cfg(test)] 21 | mod workload_type_test; 22 | -------------------------------------------------------------------------------- /src/lifecycle.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | /// Phase describes the lifecycle phase for an operation. 4 | /// 5 | /// The order of operations is this: 6 | /// 7 | /// ADD 8 | /// - Kubernetes Add 9 | /// - Configuration 10 | /// - PreAdd (traits only): Before components are added 11 | /// - Component Configuration 12 | /// - Traits 13 | /// - Add: Resources added and initialized 14 | /// - Components 15 | /// - Traits 16 | /// 17 | /// MODIFY 18 | /// - Kubernetes Update 19 | /// - Configuration 20 | /// - PreModify (traits only): Before components are modified 21 | /// - Traits 22 | /// - Modify: Resources are modified 23 | /// - Components 24 | /// - Traits 25 | /// 26 | /// DELETE 27 | /// - Kubernetes Delete 28 | /// - Configuration 29 | /// - PreDelete: 30 | /// - Traits 31 | /// - Delete: 32 | /// - Components 33 | /// - Traits 34 | /// 35 | /// Note that in deletion operations, Kubernetes will delete by owner reference before PreDelete. This means 36 | /// that the components will likely be unavailable by the time PreDelete fires. It is only guaranteed to fire 37 | /// before the component's Delete operation is fired. 38 | #[derive(Clone, Debug, PartialEq)] 39 | pub enum Phase { 40 | // PreAdd happens before resources are added 41 | PreAdd, 42 | Add, 43 | PreModify, 44 | Modify, 45 | PreDelete, 46 | Delete, 47 | } 48 | 49 | impl fmt::Display for Phase { 50 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 51 | match self { 52 | Phase::PreAdd => write!(f, "PreAdd"), 53 | Phase::Add => write!(f, "Add"), 54 | Phase::PreModify => write!(f, "PreModify"), 55 | Phase::Modify => write!(f, "Modify"), 56 | Phase::PreDelete => write!(f, "PreDelete"), 57 | Phase::Delete => write!(f, "Delete"), 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/lifecycle_test.rs: -------------------------------------------------------------------------------- 1 | use crate::lifecycle::Phase; 2 | 3 | #[test] 4 | fn test_lifecycle_phase() { 5 | let mut phase = Phase::Modify; 6 | assert_eq!("Modify", phase.to_string()); 7 | phase = Phase::Delete; 8 | assert_eq!("Delete", phase.to_string()); 9 | } 10 | -------------------------------------------------------------------------------- /src/schematic.rs: -------------------------------------------------------------------------------- 1 | use failure::err_msg; 2 | use std::collections::BTreeMap; 3 | 4 | pub mod component; 5 | pub mod component_instance; 6 | pub mod configuration; 7 | pub mod parameter; 8 | pub mod scopes; 9 | pub mod traits; 10 | pub mod variable; 11 | 12 | #[cfg(test)] 13 | mod component_test; 14 | #[cfg(test)] 15 | mod configuration_test; 16 | #[cfg(test)] 17 | mod parameter_test; 18 | #[cfg(test)] 19 | mod traits_test; 20 | 21 | /// Application defines an OAM application 22 | #[derive(Serialize, Deserialize, Clone, Debug)] 23 | #[serde(rename_all = "camelCase")] 24 | pub struct Application {} 25 | 26 | // TODO: This part is not specified in the spec b/c it is considered a runtime 27 | // detail of Kubernetes. Need to fill this in as we go. 28 | 29 | /// OAMStatus is the status of an Open Application Model object, per Kubernetes. 30 | #[derive(Serialize, Deserialize, Clone, Debug)] 31 | #[serde(rename_all = "camelCase")] 32 | pub struct OAMStatus { 33 | pub phase: Option, 34 | pub components: Option>>, 35 | } 36 | impl Default for OAMStatus { 37 | fn default() -> Self { 38 | OAMStatus { 39 | phase: None, 40 | components: None, 41 | } 42 | } 43 | } 44 | impl OAMStatus { 45 | pub fn new( 46 | phase: Option, 47 | components: Option>>, 48 | ) -> OAMStatus { 49 | OAMStatus { phase, components } 50 | } 51 | } 52 | 53 | /// Status is a convenience for an optional OAMStatus. 54 | pub type Status = Option; 55 | 56 | /// GroupVersionKind represents a fully qualified identifier for a resource type. 57 | /// 58 | /// It is, as the name suggests, composed of three pieces of information: 59 | /// - Group is a namespace 60 | /// - Version is an API version 61 | /// - Kind is the actual type marker 62 | pub struct GroupVersionKind { 63 | pub group: String, 64 | pub version: String, 65 | pub kind: String, 66 | } 67 | 68 | /// GroupVersionKind represents a canonical name, composed of group, version, and (you guessed it) kind. 69 | /// 70 | /// Group is a dotted name. While the specification requires at least one dot in the group, we do not enforce. 71 | /// Version is an API version 72 | /// Kind the name of the type 73 | impl GroupVersionKind { 74 | /// Create a new GroupVersionKind from each component. 75 | /// 76 | /// This does not check the formatting of each part. 77 | pub fn new(group: &str, version: &str, kind: &str) -> GroupVersionKind { 78 | GroupVersionKind { 79 | group: group.into(), 80 | version: version.into(), 81 | kind: kind.into(), 82 | } 83 | } 84 | } 85 | impl std::str::FromStr for GroupVersionKind { 86 | type Err = failure::Error; 87 | /// Parse a string into a GroupVersionKind. 88 | fn from_str(gvp: &str) -> Result { 89 | // I suspect that this function could be made much more elegant. 90 | let parts: Vec<&str> = gvp.splitn(2, '/').collect(); 91 | if parts.len() != 2 { 92 | return Err(err_msg("missing version and kind")); 93 | } 94 | 95 | let vk: Vec<&str> = parts.get(1).unwrap().splitn(2, '.').collect(); 96 | if vk.len() != 2 { 97 | return Err(err_msg("missing kind")); 98 | } 99 | 100 | Ok(GroupVersionKind { 101 | group: parts.get(0).unwrap().to_string(), 102 | version: vk.get(0).unwrap().to_string(), 103 | kind: vk.get(1).unwrap().to_string(), 104 | }) 105 | } 106 | } 107 | 108 | impl std::fmt::Display for GroupVersionKind { 109 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 110 | write!(f, "{}/{}.{}", self.group, self.version, self.kind) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /src/schematic/component_instance.rs: -------------------------------------------------------------------------------- 1 | #[derive(Serialize, Deserialize, Clone, Debug)] 2 | #[serde(rename_all = "camelCase")] 3 | pub struct ComponentInstance { 4 | pub traits: Option>, 5 | } 6 | 7 | /// Convenience type for Kubernetes wrapped ComponentInstance. 8 | pub type KubeComponentInstance = kube::api::Object; 9 | -------------------------------------------------------------------------------- /src/schematic/configuration.rs: -------------------------------------------------------------------------------- 1 | use crate::schematic::{parameter::ParameterValue, traits::TraitBinding, variable::Variable}; 2 | 3 | /// Configuration creates an instance of a specified component, and attaches configuration to it. 4 | /// 5 | /// In OAM, an instance is a Component definition plus a Configuration. Practically speaking, a 6 | /// Configuration says "Create a component of type X in scopes A, B, and C, set the following 7 | /// parameters, and attach these traits" 8 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] 9 | #[serde(rename_all = "camelCase")] 10 | pub struct ComponentConfiguration { 11 | /// The name of the component to instantiate 12 | pub component_name: String, 13 | /// The name of the instance that is to be created 14 | pub instance_name: String, 15 | /// Values to substitute into the component 16 | pub parameter_values: Option>, 17 | /// Traits to attach to the component 18 | pub traits: Option>, 19 | /// Application Scopes which the component was involved 20 | pub application_scopes: Option>, 21 | } 22 | 23 | /// ApplicationConfiguration is the top-level configuration object in OAM. 24 | /// 25 | /// An ApplicationConfiguration can describe one or more components, a collection 26 | /// of related parameters, and the associated traits and scopes. 27 | #[derive(Serialize, Deserialize, Clone, Debug)] 28 | #[serde(rename_all = "camelCase")] 29 | pub struct ApplicationConfiguration { 30 | pub variables: Option>, 31 | pub scopes: Option>, 32 | pub components: Option>, 33 | } 34 | 35 | #[derive(Serialize, Deserialize, Clone, Debug)] 36 | #[serde(rename_all = "camelCase")] 37 | pub struct ScopeBinding { 38 | pub name: String, 39 | 40 | #[serde(rename(serialize = "type", deserialize = "type"))] 41 | pub scope_type: String, 42 | 43 | //TODO this should use Properties here, but we don't have Properties yet, keep consistent with TraitBinding. 44 | #[serde(rename(serialize = "properties", deserialize = "properties"))] 45 | pub parameter_values: Option>, 46 | } 47 | -------------------------------------------------------------------------------- /src/schematic/configuration_test.rs: -------------------------------------------------------------------------------- 1 | use crate::schematic::configuration::*; 2 | 3 | #[test] 4 | fn test_component_configuration() { 5 | // Test that a configuration deserializes correctly. 6 | 7 | let conf: ComponentConfiguration = serde_json::from_str( 8 | r#"{ 9 | "componentName": "test", 10 | "instanceName": "squidgy", 11 | "parameterValues": [ 12 | { 13 | "name": "param1", 14 | "value": 1234 15 | } 16 | ] 17 | }"#, 18 | ) 19 | .expect("JSON must parse"); 20 | 21 | assert_eq!("test", conf.component_name); 22 | assert_eq!("squidgy", conf.instance_name); 23 | assert!(conf.parameter_values.is_some()); 24 | assert!(conf.traits.is_none()); 25 | } 26 | 27 | #[test] 28 | fn test_application_configuration() { 29 | // Test that an application configuration deserializes correctly. 30 | 31 | let conf: ApplicationConfiguration = serde_json::from_str( 32 | r#"{ 33 | "variables": [ 34 | { 35 | "name": "var1", 36 | "value": 1234 37 | } 38 | ] 39 | }"#, 40 | ) 41 | .expect("JSON must parse"); 42 | 43 | assert!(conf.variables.is_some()); 44 | } -------------------------------------------------------------------------------- /src/schematic/scopes.rs: -------------------------------------------------------------------------------- 1 | // Re-exports 2 | pub mod health; 3 | pub use crate::schematic::scopes::health::Health; 4 | pub mod network; 5 | use crate::schematic::configuration::ComponentConfiguration; 6 | pub use crate::schematic::scopes::network::Network; 7 | use failure::Error; 8 | use k8s_openapi::apimachinery::pkg::apis::meta::v1 as meta; 9 | 10 | pub const HEALTH_SCOPE: &str = "core.oam.dev/v1alpha1.HealthScope"; 11 | pub const NETWORK_SCOPE: &str = "core.oam.dev/v1alpha1.NetworkScope"; 12 | 13 | /// Scopes describes Hydra application scopes. 14 | /// 15 | /// Application scopes are used to group components together into logical applications 16 | /// by providing different forms of application boundaries with common group behaviors. 17 | /// For example, a health scope will aggregate health states for components and determine whether it's healthy or not. 18 | pub enum OAMScope { 19 | Health(Health), 20 | Network(Network), 21 | } 22 | 23 | fn convert_owner_ref(owner: meta::OwnerReference) -> kube::api::OwnerReference { 24 | kube::api::OwnerReference { 25 | controller: owner.controller.unwrap_or(false), 26 | blockOwnerDeletion: owner.block_owner_deletion.unwrap_or(false), 27 | name: owner.name, 28 | apiVersion: owner.api_version, 29 | kind: owner.kind, 30 | uid: owner.uid, 31 | } 32 | } 33 | 34 | impl OAMScope { 35 | pub fn allow_overlap(&self) -> bool { 36 | match self { 37 | OAMScope::Health(h) => h.allow_overlap(), 38 | OAMScope::Network(n) => n.allow_overlap(), 39 | } 40 | } 41 | pub fn scope_type(&self) -> String { 42 | match self { 43 | OAMScope::Health(h) => h.scope_type(), 44 | OAMScope::Network(n) => n.scope_type(), 45 | } 46 | } 47 | /// create will create a real scope instance 48 | pub fn create(&self, owner: meta::OwnerReference) -> Result<(), Error> { 49 | match self { 50 | OAMScope::Health(h) => h.create(convert_owner_ref(owner.clone())), 51 | OAMScope::Network(n) => n.create(owner.clone()), 52 | } 53 | } 54 | /// modify will modify the scope instance 55 | pub fn modify(&self) -> Result<(), Error> { 56 | match self { 57 | OAMScope::Health(h) => h.modify(), 58 | OAMScope::Network(n) => n.modify(), 59 | } 60 | } 61 | /// delete will delete the scope instance, we can depend on OwnerReference if only k8s objects were created 62 | pub fn delete(&self) -> Result<(), Error> { 63 | match self { 64 | OAMScope::Health(h) => h.delete(), 65 | OAMScope::Network(n) => n.delete(), 66 | } 67 | } 68 | /// add will add a component to this scope 69 | pub fn add(&self, spec: ComponentConfiguration) -> Result<(), Error> { 70 | match self { 71 | OAMScope::Health(h) => h.add(spec), 72 | OAMScope::Network(n) => n.add(spec), 73 | } 74 | } 75 | /// remove will remove component from this scope 76 | pub fn remove(&self, spec: ComponentConfiguration) -> Result<(), Error> { 77 | match self { 78 | OAMScope::Health(h) => h.remove(spec), 79 | OAMScope::Network(n) => n.remove(spec), 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/schematic/scopes/network.rs: -------------------------------------------------------------------------------- 1 | use crate::schematic::configuration::ComponentConfiguration; 2 | /// Network scope is defined as https://github.com/oam-dev/spec/blob/master/4.application_scopes.md#network-scope 3 | /// TODO: Now we don't really implement network scope, this is just a framework as the spec describe. 4 | use crate::schematic::parameter::{extract_string_params, ParameterValue}; 5 | use crate::schematic::scopes::NETWORK_SCOPE; 6 | use failure::{format_err, Error}; 7 | use k8s_openapi::apimachinery::pkg::apis::meta::v1 as meta; 8 | use kube::client::APIClient; 9 | 10 | #[derive(Clone)] 11 | pub struct Network { 12 | client: APIClient, 13 | namespace: String, 14 | pub name: String, 15 | pub allow_component_overlap: bool, 16 | pub network_id: String, 17 | pub subnet_id: String, 18 | pub internet_gateway_type: Option, 19 | } 20 | 21 | impl Network { 22 | pub fn from_params( 23 | name: String, 24 | namespace: String, 25 | client: APIClient, 26 | params: Vec, 27 | ) -> Result { 28 | let network_id = match extract_string_params("network-id", params.clone()) { 29 | Some(network_id) => network_id, 30 | None => return Err(format_err!("network-id is not exist")), 31 | }; 32 | let subnet_id = match extract_string_params("subnet-id", params.clone()) { 33 | Some(network_id) => network_id, 34 | None => return Err(format_err!("subnet-id is not exist")), 35 | }; 36 | Ok(Network { 37 | network_id, 38 | subnet_id, 39 | name, 40 | namespace, 41 | client, 42 | internet_gateway_type: extract_string_params("internet-gateway-type", params.clone()), 43 | allow_component_overlap: false, 44 | }) 45 | } 46 | pub fn allow_overlap(&self) -> bool { 47 | self.allow_component_overlap 48 | } 49 | pub fn scope_type(&self) -> String { 50 | String::from(NETWORK_SCOPE) 51 | } 52 | pub fn create(&self, _owner: meta::OwnerReference) -> Result<(), Error> { 53 | Err(format_err!("network scope create not implemented")) 54 | } 55 | pub fn modify(&self) -> Result<(), Error> { 56 | Err(format_err!("network scope modify not implemented")) 57 | } 58 | /// could let OwnerReference delete 59 | pub fn delete(&self) -> Result<(), Error> { 60 | Err(format_err!("network scope delete not implemented")) 61 | } 62 | pub fn add(&self, _spec: ComponentConfiguration) -> Result<(), Error> { 63 | Err(format_err!("network scope add component not implemented")) 64 | } 65 | pub fn remove(&self, _spec: ComponentConfiguration) -> Result<(), Error> { 66 | Err(format_err!( 67 | "network scope remove component not implemented" 68 | )) 69 | } 70 | } 71 | 72 | #[cfg(test)] 73 | mod test { 74 | use crate::schematic::parameter::ParameterValue; 75 | use crate::schematic::scopes::{Network, NETWORK_SCOPE}; 76 | use kube::client::APIClient; 77 | use kube::config::Configuration; 78 | /// This mock builds a KubeConfig that will not be able to make any requests. 79 | fn mock_kube_config() -> Configuration { 80 | Configuration { 81 | base_path: ".".into(), 82 | client: reqwest::Client::new(), 83 | } 84 | } 85 | 86 | #[test] 87 | fn test_create_network() { 88 | let mut params = vec![]; 89 | params.insert( 90 | 0, 91 | ParameterValue { 92 | name: "network-id".to_string(), 93 | value: Some("nid".into()), 94 | from_param: None, 95 | }, 96 | ); 97 | params.insert( 98 | 1, 99 | ParameterValue { 100 | name: "subnet-id".to_string(), 101 | value: Some("sid".into()), 102 | from_param: None, 103 | }, 104 | ); 105 | let net = Network::from_params( 106 | "test-net".to_string(), 107 | "namespace".to_string(), 108 | APIClient::new(mock_kube_config()), 109 | params, 110 | ) 111 | .unwrap(); 112 | assert_eq!(false, net.allow_overlap()); 113 | assert_eq!(NETWORK_SCOPE.to_string(), net.scope_type()); 114 | assert_eq!("nid".to_string(), net.network_id); 115 | assert_eq!("sid".to_string(), net.subnet_id); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/schematic/traits.rs: -------------------------------------------------------------------------------- 1 | use crate::lifecycle::Phase; 2 | use crate::schematic::parameter::ParameterValue; 3 | use kube::client::APIClient; 4 | use log::info; 5 | 6 | // Re-exports 7 | mod autoscaler; 8 | pub use crate::schematic::traits::autoscaler::Autoscaler; 9 | mod ingress; 10 | pub use crate::schematic::traits::ingress::Ingress; 11 | mod empty; 12 | pub use crate::schematic::traits::empty::Empty; 13 | mod manual_scaler; 14 | pub use crate::schematic::traits::manual_scaler::ManualScaler; 15 | mod volume_mounter; 16 | pub use crate::schematic::traits::volume_mounter::VolumeMounter; 17 | mod util; 18 | use crate::schematic::traits::util::*; 19 | use std::collections::BTreeMap; 20 | 21 | #[cfg(test)] 22 | mod autoscaler_test; 23 | #[cfg(test)] 24 | mod manual_scaler_test; 25 | #[cfg(test)] 26 | mod ingress_test; 27 | 28 | pub const INGRESS_V1ALPHA1: &str = "ingress"; 29 | pub const AUTOSCALER_V1ALPHA1: &str = "auto-scaler"; 30 | pub const MANUAL_SCALER_V1ALPHA1: &str = "manual-scaler"; 31 | pub const VOLUME_MOUNTER_V1ALPHA1: &str = "volume-mounter"; 32 | pub const EMPTY: &str = "empty"; 33 | 34 | /// Trait describes OAM traits. 35 | /// 36 | /// OAM traits are ops-oriented "add-ons" that can be attached to Components of the appropriate workloadType. 37 | /// For example, an autoscaler trait can attach to a workloadType (such as Server) that can be 38 | /// scaled up and down. 39 | #[derive(Serialize, Deserialize, Clone, Debug)] 40 | #[serde(rename_all = "camelCase")] 41 | pub struct Trait {} 42 | 43 | /// A TraitBinding attaches a trait to a component. 44 | /// 45 | /// Trait bindings appear in configuration stanzas for traits. 46 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] 47 | #[serde(rename_all = "camelCase")] 48 | pub struct TraitBinding { 49 | pub name: String, 50 | pub parameter_values: Option>, 51 | pub properties: Option, 52 | } 53 | 54 | /// OAMTrait is an enumeration of the known traits. 55 | /// 56 | /// This is a temporary solution. In the future, we really want to be able to proxy 57 | /// trait fulfillment down into Kubernetes and let individual trait controllers 58 | /// fulfill the contract. 59 | pub enum OAMTrait { 60 | Autoscaler(Autoscaler), 61 | ManualScaler(ManualScaler), 62 | Ingress(Ingress), 63 | VolumeMounter(Box), 64 | Empty(Empty), 65 | } 66 | impl OAMTrait { 67 | pub fn exec(&self, ns: &str, client: APIClient, phase: Phase) -> TraitResult { 68 | match self { 69 | OAMTrait::Autoscaler(a) => a.exec(ns, client, phase), 70 | OAMTrait::Ingress(i) => i.exec(ns, client, phase), 71 | OAMTrait::ManualScaler(m) => m.exec(ns, client, phase), 72 | OAMTrait::VolumeMounter(v) => v.exec(ns, client, phase), 73 | OAMTrait::Empty(e) => e.exec(ns, client, phase), 74 | } 75 | } 76 | pub fn status(&self, ns: &str, client: APIClient) -> Option> { 77 | match self { 78 | OAMTrait::Autoscaler(a) => a.status(ns, client), 79 | OAMTrait::Ingress(i) => i.status(ns, client), 80 | OAMTrait::ManualScaler(m) => m.status(ns, client), 81 | OAMTrait::Empty(e) => e.status(ns, client), 82 | OAMTrait::VolumeMounter(v) => v.status(ns, client), 83 | } 84 | } 85 | } 86 | 87 | /// A TraitImplementation is an implementation of an OAM Trait. 88 | /// 89 | /// For example, Ingress is an implementation of an OAM Trait. 90 | pub trait TraitImplementation { 91 | fn exec(&self, ns: &str, client: APIClient, phase: Phase) -> TraitResult { 92 | match phase { 93 | Phase::Add => self.add(ns, client), 94 | Phase::Modify => self.modify(ns, client), 95 | Phase::Delete => self.delete(ns, client), 96 | Phase::PreAdd => self.pre_add(ns, client), 97 | Phase::PreModify => self.pre_modify(ns, client), 98 | Phase::PreDelete => self.pre_delete(ns, client), 99 | } 100 | } 101 | fn add(&self, ns: &str, client: APIClient) -> TraitResult; 102 | fn modify(&self, _ns: &str, _client: APIClient) -> TraitResult { 103 | Err(format_err!("Trait updates not implemented for this type")) 104 | } 105 | fn delete(&self, _ns: &str, _client: APIClient) -> TraitResult { 106 | // Often, owner references mean you don't need to do anything here. 107 | // But if we invoke this delete function standalone, that means we hope to delete this sub resource actively. 108 | Err(format_err!("Trait delete not implemented for this type")) 109 | } 110 | fn supports_workload_type(name: &str) -> bool { 111 | info!("Support {} by default", name); 112 | true 113 | } 114 | fn pre_add(&self, _ns: &str, _client: APIClient) -> TraitResult { 115 | Ok(()) 116 | } 117 | fn pre_modify(&self, _ns: &str, _client: APIClient) -> TraitResult { 118 | Ok(()) 119 | } 120 | fn pre_delete(&self, _ns: &str, _client: APIClient) -> TraitResult { 121 | Ok(()) 122 | } 123 | fn status(&self, _ns: &str, _client: APIClient) -> Option> { 124 | None 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /src/schematic/traits/autoscaler_test.rs: -------------------------------------------------------------------------------- 1 | use crate::schematic::traits::*; 2 | use serde_json::json; 3 | use serde_json::map::Map; 4 | 5 | #[test] 6 | fn test_autoscaler_defaults() { 7 | let autoscaler = Autoscaler { 8 | name: "release".into(), 9 | instance_name: "instance".into(), 10 | component_name: "component".into(), 11 | cpu: None, 12 | memory: None, 13 | minimum: None, 14 | maximum: None, 15 | owner_ref: None, 16 | }; 17 | let kauto = autoscaler.to_horizontal_pod_autoscaler(); 18 | assert_eq!( 19 | Some("instance-trait-autoscaler".to_string()), 20 | kauto.metadata.expect("metadata").name 21 | ); 22 | let spec = kauto.spec.expect("spec"); 23 | assert_eq!(10, spec.max_replicas); 24 | } 25 | 26 | #[test] 27 | fn test_autoscaler_v1alpha1_cpu() { 28 | let autoscaler_alpha1_trait = TraitBinding { 29 | name: String::from("auto-scaler"), 30 | parameter_values: None, 31 | properties: Some(json!({ 32 | "cpu": 42, 33 | "minimum": 6, 34 | "maximum": 7 35 | })), 36 | }; 37 | 38 | let serialized = serde_json::to_string(&autoscaler_alpha1_trait).unwrap(); 39 | let deserialized_trait: TraitBinding = serde_json::from_str(&serialized).unwrap(); 40 | let prop_map: Option<&Map> = 41 | deserialized_trait.properties.as_ref().unwrap().as_object(); 42 | 43 | let autoscaler = Autoscaler::from_properties( 44 | "release".into(), 45 | "instance".into(), 46 | "component".into(), 47 | prop_map, 48 | None, 49 | ); 50 | let kauto = autoscaler.to_horizontal_pod_autoscaler(); 51 | assert_eq!( 52 | Some("instance-trait-autoscaler".to_string()), 53 | kauto.metadata.expect("metadata").name 54 | ); 55 | let spec = kauto.spec.expect("spec"); 56 | assert_eq!(7, spec.max_replicas); 57 | assert_eq!(Some(6), spec.min_replicas); 58 | 59 | // cpu is added first so index for cpu is 0 60 | let metrics = spec.metrics.expect("metrics").clone(); 61 | assert_eq!( 62 | Some(42), 63 | metrics[0] 64 | .clone() 65 | .resource 66 | .expect("a resource") 67 | .target_average_utilization 68 | ); 69 | } 70 | 71 | #[test] 72 | fn test_autoscaler_v1alpha1_memory() { 73 | let autoscaler_alpha1_trait = TraitBinding { 74 | name: String::from("auto-scaler"), 75 | parameter_values: None, 76 | properties: Some(json!({ 77 | "memory": 50, 78 | "minimum": 6, 79 | "maximum": 7 80 | })), 81 | }; 82 | 83 | let serialized = serde_json::to_string(&autoscaler_alpha1_trait).unwrap(); 84 | let deserialized_trait: TraitBinding = serde_json::from_str(&serialized).unwrap(); 85 | let prop_map: Option<&Map> = 86 | deserialized_trait.properties.as_ref().unwrap().as_object(); 87 | 88 | let autoscaler = Autoscaler::from_properties( 89 | "release".into(), 90 | "instance".into(), 91 | "component".into(), 92 | prop_map, 93 | None, 94 | ); 95 | let kauto = autoscaler.to_horizontal_pod_autoscaler(); 96 | assert_eq!( 97 | Some("instance-trait-autoscaler".to_string()), 98 | kauto.metadata.expect("metadata").name 99 | ); 100 | let spec = kauto.spec.expect("spec"); 101 | assert_eq!(7, spec.max_replicas); 102 | assert_eq!(Some(6), spec.min_replicas); 103 | 104 | // cpu is added first so index for memory is 1 105 | let metrics = spec.metrics.expect("metrics").clone(); 106 | assert_eq!( 107 | Some(50), 108 | metrics[0] 109 | .clone() 110 | .resource 111 | .expect("a resource") 112 | .target_average_utilization 113 | ); 114 | } 115 | 116 | #[test] 117 | fn test_autoscaler_v1alpha1_multi_metrics_resource() { 118 | let autoscaler_alpha1_trait = TraitBinding { 119 | name: String::from("auto-scaler"), 120 | parameter_values: None, 121 | properties: Some(json!({ 122 | "cpu": 42, 123 | "memory": 50, 124 | "minimum": 6, 125 | "maximum": 7 126 | })), 127 | }; 128 | 129 | let serialized = serde_json::to_string(&autoscaler_alpha1_trait).unwrap(); 130 | let deserialized_trait: TraitBinding = serde_json::from_str(&serialized).unwrap(); 131 | let prop_map: Option<&Map> = 132 | deserialized_trait.properties.as_ref().unwrap().as_object(); 133 | 134 | let autoscaler = Autoscaler::from_properties( 135 | "release".into(), 136 | "instance".into(), 137 | "component".into(), 138 | prop_map, 139 | None, 140 | ); 141 | let kauto = autoscaler.to_horizontal_pod_autoscaler(); 142 | assert_eq!( 143 | Some("instance-trait-autoscaler".to_string()), 144 | kauto.metadata.expect("metadata").name 145 | ); 146 | let spec = kauto.spec.expect("spec"); 147 | assert_eq!(7, spec.max_replicas); 148 | assert_eq!(Some(6), spec.min_replicas); 149 | 150 | // cpu is added first so index for cpu is 0 151 | let metrics = spec.metrics.expect("metrics").clone(); 152 | assert_eq!( 153 | Some(42), 154 | metrics[0] 155 | .clone() 156 | .resource 157 | .expect("a resource") 158 | .target_average_utilization 159 | ); 160 | 161 | // cpu is added first so index for memory is 1 162 | assert_eq!( 163 | Some(50), 164 | metrics[1] 165 | .clone() 166 | .resource 167 | .expect("a resource") 168 | .target_average_utilization 169 | ); 170 | } 171 | -------------------------------------------------------------------------------- /src/schematic/traits/empty.rs: -------------------------------------------------------------------------------- 1 | use crate::schematic::traits::{util::*, TraitImplementation}; 2 | use kube::client::APIClient; 3 | use std::collections::BTreeMap; 4 | 5 | pub struct Empty {} 6 | 7 | impl TraitImplementation for Empty { 8 | fn supports_workload_type(_name: &str) -> bool { 9 | true 10 | } 11 | fn add(&self, _ns: &str, _client: APIClient) -> TraitResult { 12 | Ok(()) 13 | } 14 | fn modify(&self, _ns: &str, _client: APIClient) -> TraitResult { 15 | Ok(()) 16 | } 17 | fn delete(&self, _ns: &str, _client: APIClient) -> TraitResult { 18 | Ok(()) 19 | } 20 | fn status(&self, _ns: &str, _client: APIClient) -> Option> { 21 | None 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/schematic/traits/manual_scaler_test.rs: -------------------------------------------------------------------------------- 1 | use k8s_openapi::api::{apps::v1 as apps, batch::v1 as batch}; 2 | use crate::{ 3 | schematic::traits::*, 4 | workload_type::{SERVER_NAME, SINGLETON_SERVER_NAME, SINGLETON_TASK_NAME, TASK_NAME}, 5 | }; 6 | use serde_json::json; 7 | use serde_json::map::Map; 8 | 9 | #[test] 10 | fn test_manual_scaler_workload_types() { 11 | let matches = vec![SERVER_NAME, TASK_NAME]; 12 | for m in matches { 13 | assert!(ManualScaler::supports_workload_type(m)); 14 | } 15 | let no_matches = vec![SINGLETON_TASK_NAME, SINGLETON_SERVER_NAME]; 16 | for m in no_matches { 17 | assert!(!ManualScaler::supports_workload_type(m)); 18 | } 19 | } 20 | 21 | #[test] 22 | fn test_scale_deployment() { 23 | let first = apps::Deployment { 24 | spec: Some(apps::DeploymentSpec { 25 | replicas: Some(5), 26 | ..Default::default() 27 | }), 28 | ..Default::default() 29 | }; 30 | let ms = ManualScaler { 31 | name: "name".into(), 32 | instance_name: "inst_name".into(), 33 | component_name: "comp_name".into(), 34 | owner_ref: None, 35 | replica_count: 9, 36 | workload_type: SERVER_NAME.into(), 37 | }; 38 | let second = ms.scale_deployment(first); 39 | assert_eq!(Some(9), second.spec.expect("spec is required").replicas); 40 | } 41 | 42 | #[test] 43 | fn test_scale_job() { 44 | let first = batch::Job { 45 | spec: Some(batch::JobSpec { 46 | parallelism: Some(5), 47 | ..Default::default() 48 | }), 49 | ..Default::default() 50 | }; 51 | let ms = ManualScaler { 52 | name: "name".into(), 53 | instance_name: "inst_name".into(), 54 | component_name: "comp_name".into(), 55 | owner_ref: None, 56 | replica_count: 9, 57 | workload_type: TASK_NAME.into(), 58 | }; 59 | let second = ms.scale_job(first); 60 | assert_eq!(Some(9), second.spec.expect("spec is required").parallelism); 61 | } 62 | 63 | #[test] 64 | fn test_manual_scaler_v1alpha1_properties() { 65 | let first = batch::Job { 66 | spec: Some(batch::JobSpec { 67 | parallelism: Some(3), 68 | ..Default::default() 69 | }), 70 | ..Default::default() 71 | }; 72 | 73 | let manualscaler_alpha1_trait = TraitBinding { 74 | name : String::from("manual-scaler"), 75 | parameter_values: None, 76 | properties: Some(json!({ 77 | "replicaCount": 3 78 | })) 79 | }; 80 | 81 | let serialized = serde_json::to_string(&manualscaler_alpha1_trait).unwrap(); 82 | let deserialized_trait: TraitBinding = serde_json::from_str(&serialized).unwrap(); 83 | let prop_map : Option<&Map> = deserialized_trait.properties.as_ref().unwrap().as_object(); 84 | 85 | let ms = ManualScaler::from_properties( 86 | "release".into(), 87 | "instance".into(), 88 | "component".into(), 89 | prop_map, 90 | None, 91 | "core.oam.dev/v1alpha1.Task".into(), 92 | ); 93 | 94 | let second = ms.scale_job(first); 95 | assert_eq!(Some(3), second.spec.expect("spec is required").parallelism); 96 | } -------------------------------------------------------------------------------- /src/schematic/traits/util.rs: -------------------------------------------------------------------------------- 1 | use failure::Error; 2 | use k8s_openapi::apimachinery::pkg::apis::meta::v1 as meta; 3 | use std::collections::BTreeMap; 4 | 5 | /// Alias for trait results. 6 | pub type TraitResult = Result<(), Error>; 7 | /// Alias for a vector of owner references. 8 | pub type OwnerRefs = Option>; 9 | /// Alias for a map of labels. 10 | type Labels = BTreeMap; 11 | 12 | /// Generate the common labels for a trait. 13 | pub fn trait_labels(name: String, inst_name: String) -> Labels { 14 | let mut labels: Labels = BTreeMap::new(); 15 | labels.insert("oam.dev/role".into(), "trait".into()); 16 | labels.insert("app.kubernetes.io/name".to_string(), name); 17 | labels.insert("oam.dev/instance-name".to_string(), inst_name); 18 | labels 19 | } 20 | 21 | #[cfg(test)] 22 | mod tests { 23 | use crate::schematic::traits::util::*; 24 | 25 | #[test] 26 | fn test_trait_labels() { 27 | let labels = trait_labels("name".to_string(), "inst".to_string()); 28 | assert_eq!( 29 | "trait".to_string(), 30 | *labels.get("oam.dev/role").expect("role must be a string") 31 | ); 32 | assert_eq!( 33 | "name".to_string(), 34 | *labels 35 | .get("app.kubernetes.io/name") 36 | .expect("name must be a string") 37 | ); 38 | assert_eq!( 39 | "inst".to_string(), 40 | *labels 41 | .get("oam.dev/instance-name") 42 | .expect("instance-name must be a string") 43 | ); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/schematic/traits_test.rs: -------------------------------------------------------------------------------- 1 | use crate::lifecycle::Phase; 2 | use crate::schematic::traits::*; 3 | use crate::workload_type::{SERVER_NAME, SINGLETON_SERVER_NAME}; 4 | use kube::{client::APIClient, config::Configuration}; 5 | 6 | #[test] 7 | fn test_ingress_workload_types() { 8 | assert!(Ingress::supports_workload_type(SERVER_NAME)); 9 | assert!(Ingress::supports_workload_type(SINGLETON_SERVER_NAME)); 10 | } 11 | 12 | #[test] 13 | fn test_autoscaler_workload_types() { 14 | assert!(Autoscaler::supports_workload_type(SERVER_NAME)); 15 | assert!(!Autoscaler::supports_workload_type(SINGLETON_SERVER_NAME)); 16 | } 17 | 18 | #[test] 19 | fn test_traits_exec() { 20 | let emptytrait = OAMTrait::Empty(Empty {}); 21 | match emptytrait { 22 | OAMTrait::Empty(empty) => assert!(empty.exec("test", mock_client(), Phase::Add).is_ok()), 23 | _ => panic!("Should be empty"), 24 | } 25 | } 26 | 27 | fn mock_client() -> APIClient { 28 | APIClient::new(Configuration { 29 | base_path: ".".into(), 30 | client: reqwest::Client::new(), 31 | }) 32 | } 33 | -------------------------------------------------------------------------------- /src/schematic/variable.rs: -------------------------------------------------------------------------------- 1 | use super::parameter::ParameterValue; 2 | use failure::Error; 3 | use regex::Regex; 4 | use std::cmp::Ordering; 5 | use std::collections::BTreeMap; 6 | 7 | fn parse_from_variable(input: String) -> Option { 8 | lazy_static! { 9 | static ref RE: Regex = Regex::new(r#"^\[fromVariable\((?P[[:word:]]+)\)\]$"#).unwrap(); 10 | } 11 | RE.captures(&input) 12 | .and_then(|cap| cap.name("var").map(|var| var.as_str().to_owned())) 13 | } 14 | 15 | /// Variables are common values that can be substituted into 16 | /// predefined locations within an application configuration 17 | /// using the [fromVariable(VARNAME)] syntax. 18 | #[derive(Serialize, Deserialize, Clone, Debug)] 19 | #[serde(rename_all = "camelCase")] 20 | pub struct Variable { 21 | /// The variable's name (must be unique per configuration). 22 | pub name: String, 23 | /// The variable's name scalar value. 24 | pub value: serde_json::Value, 25 | } 26 | 27 | impl From for ParameterValue { 28 | fn from(var: Variable) -> Self { 29 | ParameterValue { 30 | name: var.name.clone(), 31 | value: Some(var.value.clone()), 32 | from_param: None, 33 | } 34 | } 35 | } 36 | 37 | impl PartialOrd for Variable { 38 | fn partial_cmp(&self, other: &Self) -> Option { 39 | Some(self.name.cmp(&other.name)) 40 | } 41 | } 42 | 43 | impl PartialEq for Variable { 44 | fn eq(&self, other: &Self) -> bool { 45 | self.name == other.name 46 | } 47 | } 48 | 49 | impl Ord for Variable { 50 | fn cmp(&self, other: &Self) -> Ordering { 51 | self.name.cmp(&other.name) 52 | } 53 | } 54 | 55 | impl Eq for Variable {} 56 | 57 | /// Expand any variables in current values using the variables defined in the configuration. 58 | /// 59 | /// If a variable is referenced but undefined, the parameter value will be set to None. 60 | pub fn expand_variables( 61 | values: &mut Vec, 62 | vars: BTreeMap, 63 | ) -> Result<(), Error> { 64 | for param in values.iter_mut() { 65 | if let Some(value) = ¶m.value { 66 | if let serde_json::Value::String(s) = value { 67 | if let Some(ref var) = parse_from_variable(s.clone()) { 68 | param.value = vars.get(var).cloned(); 69 | ensure!( 70 | param.value.is_some(), 71 | format!( 72 | "parameter `{:?}` references undefined variable `{:?}`", 73 | ¶m.name, &var, 74 | ) 75 | ); 76 | } 77 | } 78 | } 79 | } 80 | Ok(()) 81 | } 82 | 83 | /// Resolve parameter values containing variables. 84 | pub fn resolve_variables( 85 | values: Vec, 86 | vars: Vec, 87 | ) -> Result, Error> { 88 | expand_variables( 89 | &mut values.clone(), 90 | vars.into_iter() 91 | .map(|var| (var.name.clone(), var.value.clone())) 92 | .collect::>(), 93 | )?; 94 | Ok(values.to_vec()) 95 | } 96 | 97 | /// Transform a vector of variables into parameter values. 98 | pub fn get_variable_values(vars: Option>) -> Vec { 99 | let mut vars = vars.unwrap_or_else(|| vec![]); 100 | dedup(&mut vars); 101 | vars.into_iter().map(|var| var.into()).collect() 102 | } 103 | 104 | // TODO: variables are unique per config should redefinition should be an error. 105 | pub fn dedup(values: &mut Vec) { 106 | values.sort_unstable(); 107 | values.dedup(); 108 | } 109 | 110 | #[cfg(test)] 111 | mod tests { 112 | use super::*; 113 | use serde_json::json; 114 | 115 | #[test] 116 | fn test_resolve_variables() { 117 | resolve_variables( 118 | vec![ 119 | ParameterValue { 120 | name: "dinner1".into(), 121 | value: Some(json!("[fromVariable(pet1)]")), 122 | from_param: None, 123 | }, 124 | ParameterValue { 125 | name: "dinner2".into(), 126 | value: Some(json!("[fromVariable(pet2)]")), 127 | from_param: None, 128 | }, 129 | ], 130 | vec![ 131 | Variable { 132 | name: "pet1".into(), 133 | value: json!("cat"), 134 | }, 135 | Variable { 136 | name: "pet2".into(), 137 | value: json!("dog"), 138 | }, 139 | ], 140 | ) 141 | .expect("resolve variables"); 142 | 143 | // test parameter value referencing undefine variable should error. 144 | resolve_variables( 145 | vec![ParameterValue { 146 | name: "dinner".into(), 147 | value: Some(json!("[fromVariable(cereal)]")), 148 | from_param: None, 149 | }], 150 | vec![], 151 | ) 152 | .expect_err(r#"undefined variable `"cereal"`"#); 153 | } 154 | 155 | #[test] 156 | fn test_parse_from_variable() { 157 | assert_eq!( 158 | Some("VAR_42".to_owned()), 159 | parse_from_variable("[fromVariable(VAR_42)]".into()) 160 | ); 161 | assert_eq!( 162 | Some("_".to_owned()), 163 | parse_from_variable("[fromVariable(_)]".into()) 164 | ); 165 | assert_eq!( 166 | Some("42".to_owned()), 167 | parse_from_variable("[fromVariable(42)]".into()) 168 | ); 169 | assert_eq!( 170 | Some("VAR".to_owned()), 171 | parse_from_variable("[fromVariable(VAR)]".into()) 172 | ); 173 | assert_eq!(None, parse_from_variable("[fromVariable (VAR)]".into())); // illegal 174 | assert_eq!(None, parse_from_variable("[fromVariable()]".into())); 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /src/trait_manager.rs: -------------------------------------------------------------------------------- 1 | use failure::Error; 2 | use k8s_openapi::apimachinery::pkg::apis::meta::v1 as meta; 3 | use kube::client::APIClient; 4 | use log::{debug, error}; 5 | use serde_json::json; 6 | use serde_json::map::Map; 7 | use std::collections::BTreeMap; 8 | 9 | use crate::{ 10 | lifecycle::Phase, 11 | schematic::{ 12 | component::Component, 13 | configuration::ComponentConfiguration, 14 | parameter::ParameterValue, 15 | traits::{ 16 | self, Autoscaler, Empty, Ingress, ManualScaler, OAMTrait, TraitBinding, VolumeMounter, 17 | }, 18 | }, 19 | }; 20 | 21 | // TraitManager maps a component to its traits, and handles trait lifecycle. 22 | // 23 | // Each component configuration is assigned a trait manager. That trait manager 24 | // can load all of the associated traits, and then executed phases for each of 25 | // the traits. 26 | pub struct TraitManager { 27 | pub config_name: String, 28 | pub instance_name: String, 29 | pub component: ComponentConfiguration, 30 | pub parent_params: Vec, 31 | pub owner_ref: Option>, 32 | pub workload_type: String, 33 | // Component schematic loaded from cluster. 34 | pub component_schematic: Component, 35 | pub traits: Vec, 36 | } 37 | 38 | impl TraitManager { 39 | pub fn load_traits(&mut self) -> Result<(), failure::Error> { 40 | let mut traits: Vec = vec![]; 41 | for t in self.component.traits.as_ref().unwrap_or(&vec![]).iter() { 42 | // Load all of the traits into the manager. 43 | let imp = self.load_trait(&t)?; 44 | traits.push(imp); 45 | } 46 | self.traits = traits; 47 | Ok(()) 48 | } 49 | fn load_trait(&self, binding: &TraitBinding) -> Result { 50 | debug!("Trait binding params: {:?}", &binding.parameter_values); 51 | let empty_value_ref: &serde_json::Value = &json!(""); 52 | let prop_map: Option<&Map> = binding 53 | .properties 54 | .as_ref() 55 | .unwrap_or_else(|| empty_value_ref) 56 | .as_object(); 57 | match binding.name.as_str() { 58 | traits::INGRESS_V1ALPHA1 => { 59 | let ing = Ingress::from_properties( 60 | self.config_name.clone(), 61 | self.instance_name.clone(), 62 | self.component.component_name.clone(), 63 | prop_map, 64 | self.owner_ref.clone(), 65 | ); 66 | debug!("INGRESS_V1ALPHA1: {:?}", ing); 67 | Ok(OAMTrait::Ingress(ing)) 68 | } 69 | traits::VOLUME_MOUNTER_V1ALPHA1 => { 70 | let volmount = VolumeMounter::from_properties( 71 | self.config_name.clone(), 72 | self.instance_name.clone(), 73 | self.component.component_name.clone(), 74 | prop_map, 75 | self.owner_ref.clone(), 76 | self.component_schematic.clone(), 77 | ); 78 | debug!("VOLUME_MOUNTER: {:?}", volmount); 79 | Ok(OAMTrait::VolumeMounter(Box::new(volmount))) 80 | } 81 | traits::AUTOSCALER_V1ALPHA1 => { 82 | let auto_scaler = Autoscaler::from_properties( 83 | self.config_name.clone(), 84 | self.instance_name.clone(), 85 | self.component.component_name.clone(), 86 | prop_map, 87 | self.owner_ref.clone(), 88 | ); 89 | debug!("Auto_scaler: {:?}", auto_scaler); 90 | Ok(OAMTrait::Autoscaler(auto_scaler)) 91 | } 92 | traits::MANUAL_SCALER_V1ALPHA1 => { 93 | let scaler = ManualScaler::from_properties( 94 | self.config_name.clone(), 95 | self.instance_name.clone(), 96 | self.component.component_name.clone(), 97 | prop_map, 98 | self.owner_ref.clone(), 99 | self.workload_type.clone(), 100 | ); 101 | debug!("Manual_scaler: {:?}", scaler); 102 | Ok(OAMTrait::ManualScaler(scaler)) 103 | } 104 | // Empty is a debugging tool for checking whether the traits system is functioning independently of 105 | // its environment. 106 | traits::EMPTY => { 107 | let empty = Empty {}; 108 | Ok(OAMTrait::Empty(empty)) 109 | } 110 | _ => Err(format_err!("unknown trait {}", binding.name)), 111 | } 112 | } 113 | pub fn exec(&self, ns: &str, client: APIClient, phase: Phase) -> Result<(), Error> { 114 | for imp in &self.traits { 115 | // At the moment, we don't return an error if a trait fails. 116 | let res = imp.exec(ns, client.clone(), phase.clone()); 117 | if let Err(err) = res { 118 | error!( 119 | "Trait phase {:?} failed for {}: {:?}", 120 | phase, 121 | self.config_name.as_str(), 122 | err 123 | ); 124 | } 125 | } 126 | Ok(()) 127 | } 128 | pub fn status(&self, ns: &str, client: APIClient) -> Option> { 129 | let mut all_status = BTreeMap::new(); 130 | for imp in &self.traits { 131 | if let Some(status) = imp.status(ns, client.clone()) { 132 | for (name, state) in status { 133 | //we don't need to worry about name conflict as K8s wouldn't allow this happen in the same namespace. 134 | all_status.insert(name, state); 135 | } 136 | }; 137 | } 138 | if all_status.is_empty() { 139 | return None; 140 | } 141 | Some(all_status) 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /src/workload_type/extended_workload.rs: -------------------------------------------------------------------------------- 1 | pub mod openfaas; 2 | pub mod others; 3 | -------------------------------------------------------------------------------- /src/workload_type/extended_workload/others.rs: -------------------------------------------------------------------------------- 1 | use crate::schematic::GroupVersionKind; 2 | use crate::workload_type::{ 3 | InstigatorResult, StatusResult, ValidationResult, WorkloadMetadata, WorkloadType, 4 | }; 5 | use failure::{format_err, Error}; 6 | use kube::api::{PatchParams, PostParams, RawApi}; 7 | use serde_json::json; 8 | use std::collections::BTreeMap; 9 | 10 | pub struct Others { 11 | pub meta: WorkloadMetadata, 12 | pub gvk: GroupVersionKind, 13 | } 14 | 15 | impl Others { 16 | pub fn new(meta: WorkloadMetadata, type_: &str) -> Result { 17 | let gvk: GroupVersionKind = std::str::FromStr::from_str(type_)?; 18 | if meta 19 | .definition 20 | .workload_settings 21 | .iter() 22 | .find(|&item| item.name == "spec") 23 | .is_none() 24 | { 25 | return Err(format_err!( 26 | "unknown workload type must have spec in workloadSettings" 27 | )); 28 | }; 29 | Ok(Others { meta, gvk }) 30 | } 31 | pub fn get_object(&self) -> serde_json::Value { 32 | let api_version = self.gvk.group.clone() + "/" + self.gvk.version.as_str(); 33 | let item = self 34 | .meta 35 | .definition 36 | .workload_settings 37 | .iter() 38 | .find(|&item| item.name == "spec") 39 | .unwrap(); 40 | 41 | json!({ 42 | "apiVersion": api_version, 43 | "kind": self.gvk.kind.clone(), 44 | "metadata": { 45 | "name": self.meta.instance_name.clone(), 46 | "ownerReferences": self.meta.owner_ref.clone(), 47 | }, 48 | "spec": item.value, 49 | }) 50 | //TODO now we only copy spec here, we could use json patch or something else to enable parameter override. 51 | } 52 | } 53 | 54 | fn form_plural(word: &str) -> String { 55 | if word.is_empty() { 56 | return word.to_string(); 57 | } 58 | let mut newword = word.to_string(); 59 | if newword.ends_with('y') { 60 | newword.pop(); 61 | newword = newword.to_string() + "ies"; 62 | return newword; 63 | } 64 | if newword.ends_with('s') 65 | || newword.ends_with('c') 66 | || newword.ends_with("ch") 67 | || newword.ends_with("sh") 68 | { 69 | return newword + "es"; 70 | } 71 | newword + "s" 72 | } 73 | 74 | impl WorkloadType for Others { 75 | fn add(&self) -> InstigatorResult { 76 | let crd_resource = RawApi::customResource( 77 | form_plural(self.gvk.kind.clone().to_lowercase().as_str()).as_str(), 78 | ) 79 | .version(self.gvk.version.as_str()) 80 | .group(self.gvk.group.as_str()) 81 | .within(self.meta.namespace.as_str()); 82 | let object = self.get_object(); 83 | let crd_req = crd_resource.create(&PostParams::default(), serde_json::to_vec(&object)?)?; 84 | let _: serde_json::Value = self.meta.client.request(crd_req)?; 85 | Ok(()) 86 | } 87 | fn modify(&self) -> InstigatorResult { 88 | let crd_resource = RawApi::customResource( 89 | form_plural(self.gvk.kind.clone().to_lowercase().as_str()).as_str(), 90 | ) 91 | .version(self.gvk.version.as_str()) 92 | .group(self.gvk.group.as_str()) 93 | .within(self.meta.namespace.as_str()); 94 | let object = self.get_object(); 95 | let crd_req = crd_resource.patch( 96 | self.meta.instance_name.clone().as_str(), 97 | &PatchParams::default(), 98 | serde_json::to_vec(&object)?, 99 | )?; 100 | let _: serde_json::Value = self.meta.client.request(crd_req)?; 101 | Ok(()) 102 | } 103 | fn delete(&self) -> InstigatorResult { 104 | Ok(()) 105 | } 106 | fn status(&self) -> StatusResult { 107 | // TODO: how to implement status while we don't know the spec? 108 | Ok(BTreeMap::new()) 109 | } 110 | fn validate(&self) -> ValidationResult { 111 | Ok(()) 112 | } 113 | } 114 | 115 | #[cfg(test)] 116 | mod test { 117 | use crate::schematic::component::{Component, WorkloadSetting}; 118 | use crate::schematic::parameter::ParameterType; 119 | use crate::workload_type::extended_workload::others::{form_plural, Others}; 120 | use crate::workload_type::WorkloadMetadata; 121 | use kube::client::APIClient; 122 | use kube::config::Configuration; 123 | use serde_json::json; 124 | use std::collections::BTreeMap; 125 | 126 | #[test] 127 | fn test_get_object() { 128 | let workload = Others::new( 129 | WorkloadMetadata { 130 | name: "test".to_string(), 131 | component_name: "test".to_string(), 132 | instance_name: "test".to_string(), 133 | namespace: "default".to_string(), 134 | definition: Component { 135 | workload_settings: vec![WorkloadSetting { 136 | name: "spec".to_string(), 137 | parameter_type: ParameterType::Object, 138 | value: Some( 139 | serde_json::to_value(json!({"image":"testrepo/test","name":"test"})) 140 | .unwrap(), 141 | ), 142 | from_param: None, 143 | required: true, 144 | description: None, 145 | }], 146 | ..Default::default() 147 | }, 148 | client: APIClient::new(Configuration { 149 | base_path: ".".into(), 150 | client: reqwest::Client::new(), 151 | }), 152 | params: BTreeMap::new(), 153 | owner_ref: None, 154 | annotations: None, 155 | }, 156 | "extend.oam.dev/v1alpha1.Test", 157 | ) 158 | .unwrap(); 159 | 160 | assert_eq!( 161 | json!({"apiVersion":"extend.oam.dev/v1alpha1","kind":"Test","metadata":{"name":"test","ownerReferences":null},"spec":{"image":"testrepo/test","name":"test"}}), 162 | workload.get_object() 163 | ) 164 | } 165 | 166 | #[test] 167 | fn test_form_plural() { 168 | assert_eq!("functions", form_plural("function").as_str()); 169 | assert_eq!("prometheuses", form_plural("prometheus").as_str()); 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /src/workload_type/statefulset_builder.rs: -------------------------------------------------------------------------------- 1 | use crate::schematic::component::Component; 2 | use crate::workload_type::workload_builder; 3 | use crate::workload_type::{InstigatorResult, ParamMap}; 4 | use k8s_openapi::api::apps::v1 as apps; 5 | use k8s_openapi::api::core::v1 as api; 6 | use k8s_openapi::apimachinery::pkg::apis::meta::v1 as meta; 7 | use kube::api::Object; 8 | use kube::client::APIClient; 9 | use std::collections::BTreeMap; 10 | 11 | /// StatefulsetBuilder builds new Singleton Server and Singleton worker use StatefulSet of K8s 12 | /// 13 | /// This hides many of the details of building a StatefulSet, exposing only 14 | /// parameters common to Rudr workload types. 15 | pub(crate) struct StatefulsetBuilder { 16 | component: Component, 17 | labels: workload_builder::Labels, 18 | annotations: Option, 19 | name: String, 20 | restart_policy: String, 21 | owner_ref: Option>, 22 | param_vals: ParamMap, 23 | } 24 | 25 | impl StatefulsetBuilder { 26 | /// Create a DeploymentBuilder 27 | pub fn new(instance_name: String, component: Component) -> Self { 28 | StatefulsetBuilder { 29 | component, 30 | name: instance_name, 31 | labels: workload_builder::Labels::new(), 32 | annotations: None, 33 | restart_policy: "Always".to_string(), 34 | owner_ref: None, 35 | param_vals: BTreeMap::new(), 36 | } 37 | } 38 | /// Add labels 39 | pub fn labels(mut self, labels: workload_builder::Labels) -> Self { 40 | self.labels = labels; 41 | self 42 | } 43 | 44 | /// Add annotations. 45 | /// 46 | /// In Kubernetes, these will be added to the pod specification. 47 | pub fn annotations(mut self, annotations: Option) -> Self { 48 | self.annotations = annotations; 49 | self 50 | } 51 | 52 | pub fn parameter_map(mut self, param_vals: ParamMap) -> Self { 53 | self.param_vals = param_vals; 54 | self 55 | } 56 | /// Set the owner refence for the job and the pod 57 | pub fn owner_ref(mut self, owner: Option>) -> Self { 58 | self.owner_ref = owner; 59 | self 60 | } 61 | 62 | pub fn to_statefulset(&self) -> apps::StatefulSet { 63 | apps::StatefulSet { 64 | metadata: workload_builder::form_metadata( 65 | self.name.clone(), 66 | self.labels.clone(), 67 | self.owner_ref.clone(), 68 | ), 69 | spec: Some(apps::StatefulSetSpec { 70 | selector: meta::LabelSelector { 71 | match_labels: Some(self.labels.clone()), 72 | ..Default::default() 73 | }, 74 | template: api::PodTemplateSpec { 75 | metadata: Some(meta::ObjectMeta { 76 | name: Some(self.name.clone()), 77 | labels: Some(self.labels.clone()), 78 | annotations: self.annotations.clone(), 79 | owner_references: self.owner_ref.clone(), 80 | ..Default::default() 81 | }), 82 | spec: Some(self.component.to_pod_spec_with_policy( 83 | self.param_vals.clone(), 84 | self.restart_policy.clone(), 85 | )), 86 | }, 87 | ..Default::default() 88 | }), 89 | ..Default::default() 90 | } 91 | } 92 | 93 | pub fn status(self, client: APIClient, namespace: String) -> Result { 94 | let sts: Object<_, apps::StatefulSetStatus> = 95 | match kube::api::Api::v1StatefulSet(client.clone()) 96 | .within(namespace.as_str()) 97 | .get_status(self.name.as_str()) 98 | { 99 | Ok(sts) => sts, 100 | Err(e) => return Err(e) 101 | }; 102 | let status: apps::StatefulSetStatus = sts.status.unwrap(); 103 | let replica = status.replicas; 104 | let available_replicas = status.ready_replicas.unwrap_or(0); 105 | let mut state = "updating".to_string(); 106 | if available_replicas == replica { 107 | state = "running".to_string() 108 | } 109 | Ok(state) 110 | } 111 | 112 | pub fn do_request(self, client: APIClient, namespace: String, phase: &str) -> InstigatorResult { 113 | let statefulset = self.to_statefulset(); 114 | match phase { 115 | "modify" => { 116 | let pp = kube::api::PatchParams::default(); 117 | kube::api::Api::v1StatefulSet(client) 118 | .within(namespace.as_str()) 119 | .patch(self.name.as_str(), &pp, serde_json::to_vec(&statefulset)?)?; 120 | Ok(()) 121 | } 122 | "delete" => { 123 | let pp = kube::api::DeleteParams::default(); 124 | kube::api::Api::v1StatefulSet(client) 125 | .within(namespace.as_str()) 126 | .delete(self.name.as_str(), &pp)?; 127 | Ok(()) 128 | } 129 | _ => { 130 | let pp = kube::api::PostParams::default(); 131 | kube::api::Api::v1StatefulSet(client) 132 | .within(namespace.as_str()) 133 | .create(&pp, serde_json::to_vec(&statefulset)?)?; 134 | Ok(()) 135 | } 136 | } 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /src/workload_type_test.rs: -------------------------------------------------------------------------------- 1 | use crate::workload_type::*; 2 | use failure::Error; 3 | 4 | struct MockWorkloadType {} 5 | 6 | impl WorkloadType for MockWorkloadType { 7 | fn add(&self) -> Result<(), Error> { 8 | Ok(()) 9 | } 10 | } 11 | 12 | /// This is a canary test to make sure that modify and delete have default implementations. 13 | #[test] 14 | fn test_workload_type() { 15 | let mwlt = MockWorkloadType {}; 16 | 17 | assert!(mwlt.modify().is_err()); 18 | assert!(mwlt.delete().is_ok()); 19 | } 20 | --------------------------------------------------------------------------------