├── .clippy.toml ├── .dockerignore ├── .gitignore ├── .space.kts ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── api_server ├── Cargo.toml └── src │ ├── etcd.rs │ ├── handler │ ├── binding.rs │ ├── function.rs │ ├── gpu_job.rs │ ├── hpa.rs │ ├── ingress.rs │ ├── metrics.rs │ ├── mod.rs │ ├── node.rs │ ├── pod.rs │ ├── replica_set.rs │ ├── response.rs │ ├── service.rs │ ├── utils.rs │ └── workflow.rs │ └── main.rs ├── assets ├── Control_Plane.png ├── Doc.pages ├── Doc.pdf ├── Pre.key ├── Pre.pdf └── Worker.png ├── controllers ├── Cargo.toml ├── examples │ └── dummy-controller │ │ └── main.rs └── src │ ├── endpoints │ ├── main.rs │ └── utils.rs │ ├── function │ ├── controller.rs │ ├── main.rs │ └── utils.rs │ ├── gpu_job │ ├── controller.rs │ ├── main.rs │ └── utils.rs │ ├── ingress │ ├── main.rs │ ├── nginx_ingress_config.rs │ └── utils.rs │ ├── podautoscaler │ ├── horizontal.rs │ ├── main.rs │ ├── metrics.rs │ ├── replica_calculator.rs │ └── utils.rs │ └── replica_set │ ├── controller.rs │ ├── main.rs │ └── utils.rs ├── examples ├── api-server │ └── config.yaml ├── config │ └── node │ │ └── node.env ├── function │ ├── .gitignore │ ├── README.md │ ├── handler.py │ ├── requirements.txt │ └── simple.yaml ├── gpu-code │ ├── Makefile │ ├── cuda.slurm │ ├── cuda_example.cu │ └── gpu.zip ├── gpu-job │ ├── gpu-job.yaml │ └── gpuserver-config.yaml ├── hpa │ ├── Dockerfile │ ├── README.md │ ├── index.php │ ├── prometheus.yml │ ├── replicaset.yaml │ ├── service.yaml │ └── simple.yaml ├── ingresses │ ├── nginx-httpd │ │ ├── httpd-pod.yaml │ │ ├── httpd-service.yaml │ │ ├── mix.yaml │ │ ├── multiple-host-ingress.yaml │ │ ├── multiple-path-ingress.yaml │ │ ├── nginx-pod.yaml │ │ ├── nginx-service.yaml │ │ ├── tomcat-pod.yaml │ │ └── tomcat-service.yaml │ └── simple-ingress.yaml ├── nodes │ ├── labels.yaml │ └── pod.yaml ├── pods │ ├── curl.yaml │ ├── demo.yaml │ ├── simple-httpd.yaml │ └── simple-nginx.yaml ├── replicasets │ └── simple.yaml ├── services │ └── simple-service.yaml └── workflow │ ├── .gitignore │ ├── guess │ ├── README.md │ ├── add │ │ ├── handler.py │ │ ├── requirements.txt │ │ └── simple.yaml │ ├── guess.yaml │ ├── minus3 │ │ ├── handler.py │ │ ├── requirements.txt │ │ └── simple.yaml │ ├── right │ │ ├── handler.py │ │ ├── requirements.txt │ │ └── simple.yaml │ └── wrong │ │ ├── handler.py │ │ ├── requirements.txt │ │ └── simple.yaml │ └── hello-workflow.yaml ├── resources ├── Cargo.toml └── src │ ├── config │ ├── kubelet.rs │ └── mod.rs │ ├── informer │ ├── mod.rs │ └── reflector.rs │ ├── lib.rs │ ├── models │ ├── etcd.rs │ └── mod.rs │ ├── objects │ ├── binding.rs │ ├── function.rs │ ├── gpu_job.rs │ ├── hpa.rs │ ├── ingress.rs │ ├── metrics.rs │ ├── mod.rs │ ├── node.rs │ ├── object_reference.rs │ ├── pod.rs │ ├── replica_set.rs │ ├── service.rs │ └── workflow.rs │ └── utils │ └── mod.rs ├── rkube_proxy ├── Cargo.toml └── src │ ├── k8s_iptables.rs │ ├── main.rs │ └── utils.rs ├── rkubectl ├── Cargo.toml └── src │ ├── completion.rs │ ├── create.rs │ ├── delete.rs │ ├── describe.rs │ ├── exec.rs │ ├── get.rs │ ├── logs.rs │ ├── main.rs │ ├── patch.rs │ └── utils.rs ├── rkubelet ├── Cargo.toml └── src │ ├── api.rs │ ├── config.rs │ ├── docker.rs │ ├── main.rs │ ├── models.rs │ ├── node_status_manager.rs │ ├── pod.rs │ ├── pod_worker.rs │ ├── status_manager.rs │ └── volume.rs ├── rustfmt.toml ├── scheduler ├── Cargo.toml └── src │ ├── algorithm │ ├── dummy.rs │ ├── mod.rs │ └── simple.rs │ ├── cache.rs │ ├── informer.rs │ ├── main.rs │ └── scheduler.rs ├── scripts ├── arm │ ├── build.sh │ ├── build_images.sh │ ├── docker │ │ ├── api_server │ │ │ └── Dockerfile │ │ ├── endpoints-controller │ │ │ └── Dockerfile │ │ ├── function-controller │ │ │ ├── Dockerfile │ │ │ └── function_wrapper │ │ │ │ ├── Dockerfile │ │ │ │ └── server.py │ │ ├── gpujob-controller │ │ │ └── Dockerfile │ │ ├── ingress-controller │ │ │ └── Dockerfile │ │ ├── podautoscaler │ │ │ └── Dockerfile │ │ ├── replicaset-controller │ │ │ └── Dockerfile │ │ ├── scheduler │ │ │ └── Dockerfile │ │ └── serverless-router │ │ │ └── Dockerfile │ ├── local-dev │ │ ├── .gitignore │ │ ├── coredns.service │ │ ├── daemon.example.json │ │ ├── dns │ │ │ ├── Corefile │ │ │ ├── ingress.db.template │ │ │ └── serverless_router.db.template │ │ ├── down.sh │ │ ├── flanneld.service │ │ ├── prometheus.yml │ │ └── up.sh │ ├── master │ │ ├── .gitignore │ │ ├── coredns.service │ │ ├── dns │ │ │ ├── Corefile │ │ │ ├── ingress.db.template │ │ │ └── serverless_router.db.template │ │ ├── docker-compose.yaml │ │ ├── down.sh │ │ ├── flanneld.service │ │ ├── prometheus.template.yml │ │ ├── rkubeproxy.service │ │ └── up.sh │ ├── node │ │ ├── .gitignore │ │ ├── daemon.example.json │ │ ├── down.sh │ │ ├── down_master.sh │ │ ├── flanneld.service │ │ ├── rkubelet.service │ │ ├── rkubelet.yaml │ │ ├── rkubeproxy.service │ │ ├── up.sh │ │ └── up_master.sh │ └── pull_images.sh ├── multipass │ ├── cloud-init-arm64.yaml │ └── init.sh └── x86 │ ├── docker │ ├── api_server │ │ └── Dockerfile │ ├── endpoints-controller │ │ └── Dockerfile │ ├── function-controller │ │ ├── Dockerfile │ │ └── function_wrapper │ │ │ ├── Dockerfile │ │ │ └── server.py │ ├── gpujob-controller │ │ └── Dockerfile │ ├── ingress-controller │ │ └── Dockerfile │ ├── podautoscaler │ │ └── Dockerfile │ ├── replicaset-controller │ │ └── Dockerfile │ ├── rust │ │ ├── Dockerfile │ │ ├── config.toml │ │ └── dummy │ │ │ ├── .gitignore │ │ │ ├── Cargo.lock │ │ │ ├── Cargo.toml │ │ │ └── src │ │ │ └── main.rs │ ├── scheduler │ │ └── Dockerfile │ └── serverless-router │ │ └── Dockerfile │ ├── local-dev │ ├── .gitignore │ ├── coredns.service │ ├── daemon.example.json │ ├── dns │ │ ├── Corefile │ │ ├── ingress.db.template │ │ └── serverless_router.db.template │ ├── down.sh │ ├── flanneld.service │ ├── prometheus.yml │ └── up.sh │ ├── master │ ├── .gitignore │ ├── dns │ │ ├── Corefile │ │ ├── ingress.db.template │ │ └── serverless_router.db.template │ ├── docker-compose.yaml │ ├── down.sh │ ├── flanneld.service │ ├── prometheus.yml │ ├── rkubeproxy.service │ └── up.sh │ └── node │ ├── deploy.sh │ └── rkubelet.service └── serverless ├── Cargo.toml └── src └── router ├── main.rs ├── route.rs ├── route └── workflow.rs ├── utils.rs └── workflow.rs /.clippy.toml: -------------------------------------------------------------------------------- 1 | enum-variant-size-threshold = 1000 2 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target 2 | .space.kts 3 | scripts 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | target 3 | .vscode 4 | .DS_Store -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "rkubectl", 4 | "api_server", 5 | "rkubelet", 6 | "resources", 7 | "controllers", 8 | "scheduler", 9 | "rkube_proxy", 10 | "serverless", 11 | ] 12 | 13 | [patch.crates-io] 14 | bollard = {git = 'https://hub.fastgit.xyz/y-young/bollard'} 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rMiniK8s 2 | 3 | A simple dockerized application management system like [Kubernetes](https://kubernetes.io/), written in Rust, plus a simple FaaS implementation. 4 | 5 | Course Project for SJTU-SE3356, 2022. 6 | 7 | ## Features 8 | 9 | - Nodes 10 | - Registration and status update 11 | - Label modification 12 | - Pods 13 | - Multiple containers inside single pod 14 | - Shared volumes 15 | - Resource limits 16 | - Round-robin scheduling over multiple nodes with node selector support 17 | - Query logs and attach shell 18 | - Services 19 | - Round-robin load balancing 20 | - Overlay network over multiple nodes 21 | - ReplicaSets 22 | - Reconciliation to desired replicas count 23 | - Horizontal Pod Autoscalers 24 | - Horizontal autoscaling based on CPU/Memory metrics 25 | - Scale policy and behavior 26 | - Ingresses 27 | - Connect to services via domain name 28 | - Routing based on URL path 29 | - GPU Jobs 30 | - Submit CUDA jobs to HPC server and return results 31 | - Fault Tolerance 32 | - Pod containers auto recovery 33 | - Re-synchronization after API server restart 34 | - Serverless (FaaS) 35 | - Scale-to-zero and horizontal autoscaling 36 | - Function workflow with conditional branch 37 | - rKubectl 38 | - Create, get, describe, patch and delete 39 | - Shell auto-completion 40 | 41 | ## Architecture Overview 42 | 43 | ![image](assets/Control_Plane.png) 44 | ![image](assets/Worker.png) 45 | 46 | Refer to [Document](assets/Doc.pdf) and [Presentation](assets/Pre.pdf) for further information. 47 | 48 | ## Getting Started 49 | 50 | ### Using Automation Scripts 51 | 52 | Use `./scripts/ARCH/master/up.sh` to deploy the control plane and `./scripts/ARCH/node/up.sh` to deploy a node. Only ARM architecture is supported and tested currently. 53 | 54 | P.S. You may want to build the dependencies(docker images, binaries) and set up a local registry first. Refer to the script for further information. 55 | 56 | ### Manually 57 | 58 | Manual deployment works on both x86 and Arm architecture, and allows you to test only part of the components and features. 59 | 60 | First of all, build all components using `cargo build`, and deploy a [etcd](https://etcd.io/) server, then start [API Server](api_server) using `cargo run -p api_server`. You can find help on [rKubectl](rkubectl) using `rkubectl help`. Configuration file templates and example resource definitions are available under [examples](examples) directory. 61 | 62 | Now, deploy the components needed based on the following rules: 63 | 64 | - For actions related to pods, [Scheduler](scheduler) and [rKubelet](rkubelet) are needed 65 | - For actions related to services, you'll need [Endpoints Controller](controllers/src/endpoints), [rKube-proxy](rkube_proxy) 66 | - For ReplicaSets to work, [ReplicaSet Controller](controllers/src/replica_set) is needed 67 | - To operate a cluster with multiple nodes, [Flannel](scripts/arm/master/up.sh#L13) is needed 68 | - To support Pod autoscaling (including function autoscaling), you need to run [cAdvisor](https://github.com/google/cadvisor) on worker node and [Prometheus](https://prometheus.io/) on control plane node, then start [Pod Autoscaler](controllers/src/podautoscaler) 69 | - [Ingress Controller](controllers/src/ingress) is needed for Ingresses to work 70 | - [GPU Job Controller](controllers/src/gpu_job) is needed to run GPU jobs 71 | - To run functions and function workflows, you need to deploy [Function Controller](controllers/src/function) and [Serverless Router](serverless/src/router) 72 | - You'll need extra configuration on local DNS for domain-based requests to work (Ingress and Serverless), refer to [configuration templates](scripts/arm/master/dns) and automation scripts for details 73 | 74 | 75 | ## License 76 | 77 | This project is licensed under the [GPL license]. 78 | 79 | [GPL license]: https://github.com/markcty/rMiniK8s/blob/main/LICENSE 80 | -------------------------------------------------------------------------------- /api_server/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "api_server" 4 | version = "0.1.0" 5 | 6 | [dependencies] 7 | anyhow = "1.0.56" 8 | async-trait = "0.1.53" 9 | axum = {version = "0.5.1", features = ["ws", "multipart"]} 10 | axum-macros = "0.2.0" 11 | chrono = "0.4.19" 12 | config = {version = "0.13.0", features = ["yaml"]} 13 | dashmap = "5.3.3" 14 | deadpool = {version = "0.9.3", features = ["rt_tokio_1"]} 15 | etcd-client = "0.9.0" 16 | futures = "0.3.21" 17 | hyper = "0.14.18" 18 | lazy_static = "1.4.0" 19 | prometheus-http-api = "0.2.0" 20 | rand = "0.8.5" 21 | resources = {path = "../resources"} 22 | reqwest = {version = "0.11"} 23 | serde = {version = "1.0.136", features = ["derive"]} 24 | serde_json = "1.0.79" 25 | serde_yaml = "0.8.23" 26 | tokio = {version = "1.17.0", features = ["full"]} 27 | tokio-tungstenite = "0.17.1" 28 | tokio-util = "0.7.2" 29 | tower-http = {version = "0.3.3", features = ["fs"]} 30 | tracing = "0.1.33" 31 | tracing-subscriber = "0.3.10" 32 | uuid = {version = "0.8", features = ["v4"]} 33 | -------------------------------------------------------------------------------- /api_server/src/handler/binding.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use axum::{Extension, Json}; 4 | use axum_macros::debug_handler; 5 | use resources::{ 6 | models::{ErrResponse, Response}, 7 | objects::{ 8 | node::NodeAddressType, 9 | pod::{PodCondition, PodConditionType}, 10 | KubeObject, Object, 11 | }, 12 | }; 13 | 14 | use crate::{ 15 | handler::{ 16 | response::HandlerResult, 17 | utils::{etcd_get_object, etcd_put}, 18 | }, 19 | AppState, 20 | }; 21 | 22 | #[debug_handler] 23 | #[allow(dead_code)] 24 | pub async fn bind( 25 | Extension(app_state): Extension>, 26 | Json(payload): Json, 27 | ) -> HandlerResult<()> { 28 | // check payload 29 | if let KubeObject::Binding(binding) = &payload { 30 | // get node 31 | let object = etcd_get_object( 32 | &app_state, 33 | format!("/api/v1/nodes/{}", binding.target.name), 34 | Some("node"), 35 | ) 36 | .await?; 37 | let node = match object { 38 | KubeObject::Node(node) => node, 39 | _ => { 40 | return Err(ErrResponse::new( 41 | String::from("bind error"), 42 | Some(format!( 43 | "Expecting bind target to be node kind, got {}", 44 | object.kind() 45 | )), 46 | )); 47 | }, 48 | }; 49 | 50 | // get pod object 51 | let mut object: KubeObject = etcd_get_object( 52 | &app_state, 53 | format!("/api/v1/pods/{}", binding.metadata.name), 54 | Some("pod"), 55 | ) 56 | .await?; 57 | // update pod 58 | if let KubeObject::Pod(ref mut pod) = object { 59 | let mut status = &mut pod.status.as_mut().expect("Pod should have a status"); 60 | status.conditions.insert( 61 | PodConditionType::PodScheduled, 62 | PodCondition { 63 | status: true, 64 | }, 65 | ); 66 | status.host_ip = node 67 | .status 68 | .addresses 69 | .get(&NodeAddressType::InternalIP) 70 | .map(|addr| addr.to_owned()); 71 | pod.spec.node_name = Some(binding.target.name.clone()); 72 | } else { 73 | tracing::error!("object kind error"); 74 | return Err(ErrResponse::new( 75 | String::from("bind error"), 76 | Some(format!( 77 | "the kind of object: {}, which is not pod", 78 | object.kind() 79 | )), 80 | )); 81 | } 82 | // put it back 83 | etcd_put(&app_state, &object).await?; 84 | etcd_put(&app_state, &payload).await?; 85 | let res = Response::new(Some("bind successfully".to_string()), None); 86 | Ok(Json(res)) 87 | } else { 88 | let res = ErrResponse::new( 89 | "bind error".to_string(), 90 | Some(format!( 91 | "the kind of payload: {}, which is not binding", 92 | payload.kind() 93 | )), 94 | ); 95 | Err(res) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /api_server/src/handler/ingress.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use axum::{ 4 | extract::{Path, WebSocketUpgrade}, 5 | response::IntoResponse, 6 | Extension, Json, 7 | }; 8 | use axum_macros::debug_handler; 9 | use resources::{ 10 | models::{ErrResponse, Response}, 11 | objects::{KubeObject, Object}, 12 | }; 13 | use uuid::Uuid; 14 | 15 | use super::{response::HandlerResult, utils::*}; 16 | use crate::{etcd::forward_watch_to_ws, AppState}; 17 | 18 | #[debug_handler] 19 | pub async fn create( 20 | Extension(app_state): Extension>, 21 | Json(mut payload): Json, 22 | ) -> HandlerResult<()> { 23 | // TODO: validate payload 24 | if let KubeObject::Ingress(ref mut ingress) = payload { 25 | ingress.metadata.uid = Some(Uuid::new_v4()); 26 | 27 | for rule in ingress.spec.rules.iter_mut() { 28 | if let Some(ref host) = rule.host { 29 | if !host.ends_with(".minik8s.com") { 30 | return Err(ErrResponse::new( 31 | format!("Error host {}. Host should ends with .minik8s.com", host), 32 | None, 33 | )); 34 | } 35 | } else { 36 | rule.host = Some(gen_rand_host()); 37 | } 38 | } 39 | 40 | etcd_put(&app_state, &payload).await?; 41 | let res = Response::new(Some(format!("ingress/{} created", payload.name())), None); 42 | Ok(Json(res)) 43 | } else { 44 | // TODO: fill business logic and error handling 45 | return Err(ErrResponse::new( 46 | String::from("Error creating ingress"), 47 | Some(format!("Expecting ingress, got {}", payload.kind())), 48 | )); 49 | } 50 | } 51 | 52 | #[debug_handler] 53 | pub async fn update( 54 | Extension(app_state): Extension>, 55 | Json(payload): Json, 56 | ) -> HandlerResult<()> { 57 | // TODO: validate payload 58 | if let KubeObject::Ingress(_) = payload { 59 | etcd_put(&app_state, &payload).await?; 60 | let res = Response::new(Some(format!("ingress/{} updated", payload.name())), None); 61 | Ok(Json(res)) 62 | } else { 63 | // TODO: fill business logic and error handling 64 | return Err(ErrResponse::new( 65 | String::from("Error creating ingress"), 66 | Some(format!("Expecting ingress, got {}", payload.kind())), 67 | )); 68 | } 69 | } 70 | 71 | #[debug_handler] 72 | pub async fn list( 73 | Extension(app_state): Extension>, 74 | ) -> HandlerResult> { 75 | let ingresses = 76 | etcd_get_objects_by_prefix(&app_state, "/api/v1/ingresses".to_string(), Some("ingress")) 77 | .await?; 78 | 79 | let res = Response::new(None, Some(ingresses)); 80 | Ok(Json(res)) 81 | } 82 | 83 | #[debug_handler] 84 | pub async fn get( 85 | Extension(app_state): Extension>, 86 | Path(name): Path, 87 | ) -> HandlerResult { 88 | let ingress = etcd_get_object( 89 | &app_state, 90 | format!("/api/v1/ingresses/{}", name), 91 | Some("ingress"), 92 | ) 93 | .await?; 94 | let res = Response::new(None, Some(ingress)); 95 | Ok(Json(res)) 96 | } 97 | 98 | #[debug_handler] 99 | pub async fn delete( 100 | Extension(app_state): Extension>, 101 | Path(name): Path, 102 | ) -> HandlerResult<()> { 103 | etcd_delete(&app_state, format!("/api/v1/ingresses/{}", name)).await?; 104 | let res = Response::new(Some(format!("ingresses/{} deleted", name)), None); 105 | Ok(Json(res)) 106 | } 107 | 108 | #[debug_handler] 109 | pub async fn watch_all( 110 | Extension(app_state): Extension>, 111 | ws: WebSocketUpgrade, 112 | ) -> Result { 113 | // open etcd watch connection 114 | let (watcher, stream) = etcd_watch_uri(&app_state, "/api/v1/ingresses").await?; 115 | 116 | Ok(ws.on_upgrade(|socket| async move { 117 | forward_watch_to_ws::(socket, watcher, stream).await 118 | })) 119 | } 120 | -------------------------------------------------------------------------------- /api_server/src/handler/mod.rs: -------------------------------------------------------------------------------- 1 | use resources::models::ErrResponse; 2 | 3 | use crate::{etcd::EtcdClient, AppState}; 4 | 5 | pub mod binding; 6 | pub mod function; 7 | pub mod gpu_job; 8 | pub mod hpa; 9 | pub mod ingress; 10 | pub mod metrics; 11 | pub mod node; 12 | pub mod pod; 13 | pub mod replica_set; 14 | mod response; 15 | pub mod service; 16 | mod utils; 17 | pub mod workflow; 18 | 19 | impl AppState { 20 | pub async fn get_client(&self) -> Result { 21 | let client = self.etcd_pool.get().await.map_err(|_| { 22 | tracing::error!("Failed to get etcd client"); 23 | ErrResponse::new("Failed to get etcd Client".to_string(), None) 24 | })?; 25 | Ok(client) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /api_server/src/handler/response.rs: -------------------------------------------------------------------------------- 1 | use axum::{http::StatusCode, Json}; 2 | use resources::models::{ErrResponse, Response}; 3 | 4 | use crate::etcd::EtcdError; 5 | 6 | pub type HandlerResult = Result>, ErrResponse>; 7 | 8 | impl From for ErrResponse { 9 | fn from(err: EtcdError) -> Self { 10 | if let Some(cause) = err.cause { 11 | tracing::debug!("Etcd Error: {}, caused by: {}", err.msg, cause); 12 | } else { 13 | tracing::debug!("Etcd Error: {}", err.msg); 14 | } 15 | Self { 16 | msg: "Etcd Error".to_string(), 17 | // the error of database should not be forwarded to client 18 | cause: None, 19 | status: StatusCode::INTERNAL_SERVER_ERROR, 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /api_server/src/handler/service.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use axum::{ 4 | extract::{Path, WebSocketUpgrade}, 5 | response::IntoResponse, 6 | Extension, Json, 7 | }; 8 | use axum_macros::debug_handler; 9 | use resources::{ 10 | models::{ErrResponse, Response}, 11 | objects::{KubeObject, Object}, 12 | }; 13 | use uuid::Uuid; 14 | 15 | use super::{response::HandlerResult, utils::*}; 16 | use crate::{etcd::forward_watch_to_ws, AppState}; 17 | 18 | #[debug_handler] 19 | pub async fn create( 20 | Extension(app_state): Extension>, 21 | Json(mut payload): Json, 22 | ) -> HandlerResult<()> { 23 | // TODO: validate payload 24 | if let KubeObject::Service(ref mut service) = payload { 25 | service.metadata.uid = Some(Uuid::new_v4()); 26 | 27 | if let Some(ip) = service.spec.cluster_ip { 28 | if app_state.service_ip_pool.contains(&ip) { 29 | return Err(ErrResponse::new( 30 | "ClusterIp exists, try assign a new ClusterIP".to_string(), 31 | None, 32 | )); 33 | } 34 | } else { 35 | service.spec.cluster_ip = Some(gen_service_ip(&app_state)); 36 | } 37 | 38 | etcd_put(&app_state, &payload).await?; 39 | let res = Response::new(Some(format!("service/{} created", payload.name())), None); 40 | Ok(Json(res)) 41 | } else { 42 | // TODO: fill business logic and error handling 43 | return Err(ErrResponse::new( 44 | String::from("Error creating service"), 45 | Some(format!("Expecting service, got {}", payload.kind())), 46 | )); 47 | } 48 | } 49 | 50 | #[debug_handler] 51 | pub async fn update( 52 | Extension(app_state): Extension>, 53 | Json(payload): Json, 54 | ) -> HandlerResult<()> { 55 | // TODO: validate payload 56 | if let KubeObject::Service(_) = payload { 57 | etcd_put(&app_state, &payload).await?; 58 | let res = Response::new(Some(format!("service/{} updated", payload.name())), None); 59 | Ok(Json(res)) 60 | } else { 61 | // TODO: fill business logic and error handling 62 | return Err(ErrResponse::new( 63 | String::from("Error creating service"), 64 | Some(format!("Expecting service, got {}", payload.kind())), 65 | )); 66 | } 67 | } 68 | 69 | #[debug_handler] 70 | pub async fn list( 71 | Extension(app_state): Extension>, 72 | ) -> HandlerResult> { 73 | let services = 74 | etcd_get_objects_by_prefix(&app_state, "/api/v1/services".to_string(), Some("service")) 75 | .await?; 76 | 77 | let res = Response::new(None, Some(services)); 78 | Ok(Json(res)) 79 | } 80 | 81 | #[debug_handler] 82 | pub async fn get( 83 | Extension(app_state): Extension>, 84 | Path(name): Path, 85 | ) -> HandlerResult { 86 | let service = etcd_get_object( 87 | &app_state, 88 | format!("/api/v1/services/{}", name), 89 | Some("service"), 90 | ) 91 | .await?; 92 | let res = Response::new(None, Some(service)); 93 | Ok(Json(res)) 94 | } 95 | 96 | #[debug_handler] 97 | pub async fn delete( 98 | Extension(app_state): Extension>, 99 | Path(name): Path, 100 | ) -> HandlerResult<()> { 101 | etcd_delete(&app_state, format!("/api/v1/services/{}", name)).await?; 102 | let res = Response::new(Some(format!("services/{} deleted", name)), None); 103 | Ok(Json(res)) 104 | } 105 | 106 | #[debug_handler] 107 | pub async fn watch_all( 108 | Extension(app_state): Extension>, 109 | ws: WebSocketUpgrade, 110 | ) -> Result { 111 | // open etcd watch connection 112 | let (watcher, stream) = etcd_watch_uri(&app_state, "/api/v1/services").await?; 113 | 114 | Ok(ws.on_upgrade(|socket| async move { 115 | forward_watch_to_ws::(socket, watcher, stream).await 116 | })) 117 | } 118 | -------------------------------------------------------------------------------- /assets/Control_Plane.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/assets/Control_Plane.png -------------------------------------------------------------------------------- /assets/Doc.pages: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/assets/Doc.pages -------------------------------------------------------------------------------- /assets/Doc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/assets/Doc.pdf -------------------------------------------------------------------------------- /assets/Pre.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/assets/Pre.key -------------------------------------------------------------------------------- /assets/Pre.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/assets/Pre.pdf -------------------------------------------------------------------------------- /assets/Worker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/assets/Worker.png -------------------------------------------------------------------------------- /controllers/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "controllers" 4 | version = "0.1.0" 5 | 6 | [[example]] 7 | name = "dummy-controller" 8 | 9 | [[bin]] 10 | name = "endpoints-controller" 11 | path = "src/endpoints/main.rs" 12 | 13 | [[bin]] 14 | name = "replicaset-controller" 15 | path = "src/replica_set/main.rs" 16 | 17 | [[bin]] 18 | name = "ingress-controller" 19 | path = "src/ingress/main.rs" 20 | 21 | [[bin]] 22 | name = "podautoscaler" 23 | path = "src/podautoscaler/main.rs" 24 | 25 | [[bin]] 26 | name = "gpujob-controller" 27 | path = "src/gpu_job/main.rs" 28 | 29 | [[bin]] 30 | name = "function-controller" 31 | path = "src/function/main.rs" 32 | 33 | [dependencies] 34 | anyhow = {version = "1.0.56", features = ["backtrace"]} 35 | chrono = "0.4.19" 36 | config = {version = "0.13.1", features = ["yaml"]} 37 | dotenv = "0.15.0" 38 | fs_extra = "1.2.0" 39 | futures-delay-queue = "0.5.2" 40 | futures-intrusive = "0.4" 41 | lazy_static = "1.4.0" 42 | nginx-config = "0.13.2" 43 | parking_lot = "0.12.0" 44 | reqwest = {version = "0.11", features = ["blocking", "json"]} 45 | resources = {path = "../resources"} 46 | serde = { version = "1.0.136", features = ["derive"] } 47 | tokio = {version = "1.17.0", features = ["full"]} 48 | tokio-tungstenite = "0.17.1" 49 | tracing = "0.1.32" 50 | tracing-subscriber = "0.3.10" 51 | -------------------------------------------------------------------------------- /controllers/examples/dummy-controller/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Error, Result}; 2 | use reqwest::Url; 3 | use resources::{ 4 | informer::{EventHandler, Informer, ListerWatcher, ResyncHandler, WsStream}, 5 | models, 6 | objects::pod::Pod, 7 | }; 8 | use tokio::sync::mpsc; 9 | use tokio_tungstenite::connect_async; 10 | 11 | // noinspection DuplicatedCode 12 | #[tokio::main] 13 | async fn main() -> Result<()> { 14 | tracing_subscriber::fmt::init(); 15 | 16 | // create list watcher closures 17 | // TODO: maybe some crate or macros can simplify the tedious boxed closure creation in heap 18 | let lw = ListerWatcher { 19 | lister: Box::new(|_| { 20 | Box::pin(async { 21 | let res = reqwest::get("http://localhost:8080/api/v1/pods") 22 | .await? 23 | .json::>>() 24 | .await?; 25 | let res = res.data.ok_or_else(|| anyhow!("Lister failed"))?; 26 | Ok::, Error>(res) 27 | }) 28 | }), 29 | watcher: Box::new(|_| { 30 | Box::pin(async { 31 | let url = Url::parse("ws://localhost:8080/api/v1/watch/pods")?; 32 | let (stream, _) = connect_async(url).await?; 33 | Ok::(stream) 34 | }) 35 | }), 36 | }; 37 | 38 | // create event handler closures 39 | let (tx_add, mut rx) = mpsc::channel::(16); 40 | let tx_update = tx_add.clone(); 41 | let tx_delete = tx_add.clone(); 42 | let eh = EventHandler:: { 43 | add_cls: Box::new(move |new| { 44 | // TODO: this is not good: tx is copied every time add_cls is called, but I can't find a better way 45 | let tx_add = tx_add.clone(); 46 | Box::pin(async move { 47 | let message = format!("add\n{:#?}", new); 48 | tx_add.send(message).await?; 49 | Ok(()) 50 | }) 51 | }), 52 | update_cls: Box::new(move |(old, new)| { 53 | let tx_update = tx_update.clone(); 54 | Box::pin(async move { 55 | let message = format!("update\n{:#?}\n{:#?}", old, new); 56 | tx_update.send(message).await?; 57 | Ok(()) 58 | }) 59 | }), 60 | delete_cls: Box::new(move |old| { 61 | let tx_delete = tx_delete.clone(); 62 | Box::pin(async move { 63 | let message = format!("delete\n{:#?}", old); 64 | tx_delete.send(message).await?; 65 | Ok(()) 66 | }) 67 | }), 68 | }; 69 | 70 | // TODO: fill real logic 71 | let rh = ResyncHandler(Box::new(move |_| { 72 | Box::pin(async move { 73 | println!("Resync\n"); 74 | Ok(()) 75 | }) 76 | })); 77 | // start the informer 78 | let informer = Informer::new(lw, eh, rh); 79 | let store = informer.get_store(); 80 | let informer_handler = tokio::spawn(async move { informer.run().await }); 81 | 82 | while let Some(s) = rx.recv().await { 83 | // you can do deserializing and controller related stuff here 84 | println!("{}", s); 85 | println!("store size: {}", store.read().await.len()); 86 | } 87 | 88 | informer_handler.await? 89 | } 90 | -------------------------------------------------------------------------------- /controllers/src/function/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | use anyhow::{Context, Result}; 5 | use config::{Config, File}; 6 | use controller::FunctionController; 7 | use resources::config::ClusterConfig; 8 | 9 | mod controller; 10 | mod utils; 11 | 12 | const TMP_DIR: &str = "/tmp/minik8s/function"; 13 | const TEMPLATES_DIR: &str = "/templates/function_wrapper"; 14 | const DOCKER_REGISTRY: &str = "minik8s.xyz"; 15 | 16 | lazy_static! { 17 | pub static ref CONFIG: ClusterConfig = Config::builder() 18 | .add_source(File::with_name("/etc/rminik8s/controller-manager.yaml").required(false)) 19 | .set_override_option("apiServerUrl", std::env::var("API_SERVER_URL").ok()) 20 | .unwrap() 21 | .set_override_option( 22 | "apiServerWatchUrl", 23 | std::env::var("API_SERVER_WATCH_URL").ok(), 24 | ) 25 | .unwrap() 26 | .build() 27 | .unwrap_or_default() 28 | .try_deserialize::() 29 | .with_context(|| "Failed to parse config".to_string()) 30 | .unwrap_or_default(); 31 | } 32 | 33 | #[tokio::main] 34 | async fn main() -> Result<()> { 35 | tracing_subscriber::fmt::init(); 36 | println!("api_server_url: {}", CONFIG.api_server_url); 37 | 38 | let mut controller = FunctionController::new(); 39 | controller.run().await?; 40 | Ok(()) 41 | } 42 | -------------------------------------------------------------------------------- /controllers/src/function/utils.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io::{BufRead, BufReader}, 3 | process::ChildStdout, 4 | }; 5 | 6 | use anyhow::{anyhow, Error}; 7 | use reqwest::Url; 8 | use resources::{ 9 | informer::{EventHandler, Informer, ListerWatcher, ResyncHandler, WsStream}, 10 | models::Response, 11 | objects::Object, 12 | }; 13 | use tokio::sync::mpsc::Sender; 14 | use tokio_tungstenite::connect_async; 15 | 16 | use crate::CONFIG; 17 | 18 | #[derive(Debug)] 19 | pub enum Event { 20 | Add(T), 21 | Update(T, T), 22 | Delete(T), 23 | } 24 | 25 | #[derive(Debug)] 26 | pub struct ResyncNotification; 27 | 28 | pub fn create_lister_watcher(path: String) -> ListerWatcher { 29 | let list_url = format!("{}/api/v1/{}", CONFIG.api_server_url, path); 30 | let watch_url = format!("{}/api/v1/watch/{}", CONFIG.api_server_watch_url, path); 31 | ListerWatcher { 32 | lister: Box::new(move |_| { 33 | let list_url = list_url.clone(); 34 | Box::pin(async { 35 | let res = reqwest::get(list_url) 36 | .await? 37 | .json::>>() 38 | .await?; 39 | let res = res.data.ok_or_else(|| anyhow!("Lister failed"))?; 40 | Ok::, Error>(res) 41 | }) 42 | }), 43 | watcher: Box::new(move |_| { 44 | let watch_url = watch_url.clone(); 45 | Box::pin(async move { 46 | let url = Url::parse(watch_url.as_str())?; 47 | let (stream, _) = connect_async(url).await?; 48 | Ok::(stream) 49 | }) 50 | }), 51 | } 52 | } 53 | 54 | pub fn create_informer( 55 | path: String, 56 | tx: Sender>, 57 | resync_tx: Sender, 58 | ) -> Informer { 59 | let lw = create_lister_watcher(path); 60 | 61 | let tx_add = tx; 62 | let tx_update = tx_add.clone(); 63 | let tx_delete = tx_add.clone(); 64 | let eh = EventHandler:: { 65 | add_cls: Box::new(move |new| { 66 | // TODO: this is not good: tx is copied every time add_cls is called, but I can't find a better way 67 | let tx_add = tx_add.clone(); 68 | Box::pin(async move { 69 | tx_add.send(Event::::Add(new)).await?; 70 | Ok(()) 71 | }) 72 | }), 73 | update_cls: Box::new(move |(old, new)| { 74 | let tx_update = tx_update.clone(); 75 | Box::pin(async move { 76 | tx_update.send(Event::::Update(old, new)).await?; 77 | Ok(()) 78 | }) 79 | }), 80 | delete_cls: Box::new(move |old| { 81 | let tx_delete = tx_delete.clone(); 82 | Box::pin(async move { 83 | tx_delete.send(Event::::Delete(old)).await?; 84 | Ok(()) 85 | }) 86 | }), 87 | }; 88 | let rh = ResyncHandler(Box::new(move |()| { 89 | let resync_tx = resync_tx.clone(); 90 | Box::pin(async move { 91 | resync_tx.send(ResyncNotification).await?; 92 | Ok(()) 93 | }) 94 | })); 95 | 96 | Informer::new(lw, eh, rh) 97 | } 98 | 99 | pub fn log_command(stdout: ChildStdout) { 100 | let reader = BufReader::new(stdout); 101 | 102 | reader 103 | .lines() 104 | .filter_map(|line| line.ok()) 105 | .for_each(|line| tracing::info!("{}", line)); 106 | } 107 | -------------------------------------------------------------------------------- /controllers/src/gpu_job/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | use anyhow::{Context, Result}; 5 | use config::{Config, Environment, File}; 6 | use controller::GpuJobController; 7 | use resources::config::ClusterConfig; 8 | use serde::{Deserialize, Serialize}; 9 | 10 | mod controller; 11 | mod utils; 12 | 13 | const TMP_DIR: &str = "/tmp/minik8s/job"; 14 | const DOCKER_REGISTRY: &str = "minik8s.xyz"; 15 | const BASE_IMG: &str = "minik8s.xyz/gpu_server:latest"; 16 | 17 | #[derive(Debug, Serialize, Deserialize, Clone)] 18 | pub struct ServerConfig { 19 | pub username: String, 20 | pub password: String, 21 | } 22 | 23 | lazy_static! { 24 | pub static ref CONFIG: ClusterConfig = Config::builder() 25 | .add_source(File::with_name("/etc/rminik8s/controller-manager.yaml").required(false)) 26 | .set_override_option("apiServerUrl", std::env::var("API_SERVER_URL").ok()) 27 | .unwrap() 28 | .set_override_option( 29 | "apiServerWatchUrl", 30 | std::env::var("API_SERVER_WATCH_URL").ok(), 31 | ) 32 | .unwrap() 33 | .build() 34 | .unwrap_or_default() 35 | .try_deserialize::() 36 | .with_context(|| "Failed to parse config".to_string()) 37 | .unwrap_or_default(); 38 | pub static ref GPU_SERVER_CONFIG: ServerConfig = Config::builder() 39 | .add_source(File::with_name("/etc/rminik8s/gpuserver-config.yaml").required(false)) 40 | .add_source(Environment::default()) 41 | .build() 42 | .unwrap_or_default() 43 | .try_deserialize::() 44 | .with_context(|| "Failed to parse config".to_string()) 45 | .unwrap(); 46 | } 47 | 48 | #[tokio::main] 49 | async fn main() -> Result<()> { 50 | tracing_subscriber::fmt::init(); 51 | 52 | let mut controller = GpuJobController::new(); 53 | controller.run().await?; 54 | Ok(()) 55 | } 56 | -------------------------------------------------------------------------------- /controllers/src/gpu_job/utils.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Context, Error, Result}; 2 | use reqwest::Url; 3 | use resources::{ 4 | informer::{EventHandler, Informer, ListerWatcher, ResyncHandler, WsStream}, 5 | models::Response, 6 | objects::{ 7 | gpu_job::{GpuJob, GpuJobStatus}, 8 | Object, 9 | }, 10 | }; 11 | use tokio::sync::mpsc::Sender; 12 | use tokio_tungstenite::connect_async; 13 | 14 | use crate::CONFIG; 15 | 16 | #[derive(Debug)] 17 | pub enum Event { 18 | Add(T), 19 | Update(T, T), 20 | Delete(T), 21 | } 22 | 23 | #[derive(Debug)] 24 | pub struct ResyncNotification; 25 | 26 | pub fn create_lister_watcher(path: String) -> ListerWatcher { 27 | let list_url = format!("{}/api/v1/{}", CONFIG.api_server_url, path); 28 | let watch_url = format!("{}/api/v1/watch/{}", CONFIG.api_server_watch_url, path); 29 | ListerWatcher { 30 | lister: Box::new(move |_| { 31 | let list_url = list_url.clone(); 32 | Box::pin(async { 33 | let res = reqwest::get(list_url) 34 | .await? 35 | .json::>>() 36 | .await?; 37 | let res = res.data.ok_or_else(|| anyhow!("Lister failed"))?; 38 | Ok::, Error>(res) 39 | }) 40 | }), 41 | watcher: Box::new(move |_| { 42 | let watch_url = watch_url.clone(); 43 | Box::pin(async move { 44 | let url = Url::parse(watch_url.as_str())?; 45 | let (stream, _) = connect_async(url).await?; 46 | Ok::(stream) 47 | }) 48 | }), 49 | } 50 | } 51 | 52 | pub fn create_informer( 53 | path: String, 54 | tx: Sender>, 55 | resync_tx: Sender, 56 | ) -> Informer { 57 | let lw = create_lister_watcher(path); 58 | 59 | let tx_add = tx; 60 | let tx_update = tx_add.clone(); 61 | let tx_delete = tx_add.clone(); 62 | let eh = EventHandler:: { 63 | add_cls: Box::new(move |new| { 64 | // TODO: this is not good: tx is copied every time add_cls is called, but I can't find a better way 65 | let tx_add = tx_add.clone(); 66 | Box::pin(async move { 67 | tx_add.send(Event::::Add(new)).await?; 68 | Ok(()) 69 | }) 70 | }), 71 | update_cls: Box::new(move |(old, new)| { 72 | let tx_update = tx_update.clone(); 73 | Box::pin(async move { 74 | tx_update.send(Event::::Update(old, new)).await?; 75 | Ok(()) 76 | }) 77 | }), 78 | delete_cls: Box::new(move |old| { 79 | let tx_delete = tx_delete.clone(); 80 | Box::pin(async move { 81 | tx_delete.send(Event::::Delete(old)).await?; 82 | Ok(()) 83 | }) 84 | }), 85 | }; 86 | let rh = ResyncHandler(Box::new(move |()| { 87 | let resync_tx = resync_tx.clone(); 88 | Box::pin(async move { 89 | resync_tx.send(ResyncNotification).await?; 90 | Ok(()) 91 | }) 92 | })); 93 | 94 | Informer::new(lw, eh, rh) 95 | } 96 | 97 | pub fn get_job_status(job: &GpuJob) -> Result { 98 | let job_status = job 99 | .status 100 | .as_ref() 101 | .with_context(|| "GpuJob has no status")?; 102 | Ok(job_status.to_owned()) 103 | } 104 | 105 | pub fn get_job_filename(job: &GpuJob) -> Result { 106 | let job_status = job 107 | .status 108 | .as_ref() 109 | .with_context(|| "GpuJob has no status")?; 110 | let filename = job_status 111 | .filename 112 | .as_ref() 113 | .with_context(|| "GpuJob has no code filname")?; 114 | Ok(filename.to_owned()) 115 | } 116 | -------------------------------------------------------------------------------- /controllers/src/ingress/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | use std::env; 5 | 6 | use nginx_ingress_config::{IngressHost, NginxIngressConfig}; 7 | 8 | mod nginx_ingress_config; 9 | mod utils; 10 | 11 | use anyhow::Result; 12 | use reqwest::Url; 13 | use resources::{ 14 | informer::Store, 15 | models::NodeConfig, 16 | objects::{ingress::Ingress, service::Service}, 17 | }; 18 | use tokio::sync::mpsc; 19 | 20 | use crate::utils::{create_ingress_informer, create_svc_informer}; 21 | 22 | lazy_static! { 23 | static ref CONFIG: NodeConfig = { 24 | dotenv::from_path("/etc/rminik8s/node.env").ok(); 25 | NodeConfig { 26 | etcd_endpoint: match env::var("ETCD_ENDPOINT") { 27 | Ok(url) => Url::parse(url.as_str()).unwrap(), 28 | Err(_) => Url::parse("http://127.0.0.1:2379/").unwrap(), 29 | }, 30 | api_server_endpoint: match env::var("API_SERVER_ENDPOINT") { 31 | Ok(url) => Url::parse(url.as_str()).unwrap(), 32 | Err(_) => Url::parse("http://127.0.0.1:8080/").unwrap(), 33 | }, 34 | } 35 | }; 36 | } 37 | 38 | #[derive(Debug)] 39 | pub struct Notification; 40 | 41 | #[tokio::main] 42 | async fn main() -> Result<()> { 43 | tracing_subscriber::fmt::init(); 44 | tracing::info!("Endpoints controller started"); 45 | 46 | let (tx, mut rx) = mpsc::channel::(16); 47 | 48 | let svc_informer = create_svc_informer(tx.clone()); 49 | let svc_store = svc_informer.get_store(); 50 | let ingress_informer = create_ingress_informer(tx.clone()); 51 | let ingress_store = ingress_informer.get_store(); 52 | 53 | let pod_informer_handler = tokio::spawn(async move { svc_informer.run().await }); 54 | let svc_informer_handler = tokio::spawn(async move { ingress_informer.run().await }); 55 | 56 | while rx.recv().await.is_some() { 57 | if let Err(e) = reconfigure_nginx(ingress_store.to_owned(), svc_store.to_owned()).await { 58 | tracing::warn!("Error handling notification, caused by: {}", e); 59 | } 60 | } 61 | 62 | pod_informer_handler.await??; 63 | svc_informer_handler.await??; 64 | 65 | Ok(()) 66 | } 67 | 68 | async fn reconfigure_nginx(ingress_store: Store, svc_store: Store) -> Result<()> { 69 | let mut config = NginxIngressConfig::new(); 70 | let ingress_store = ingress_store.read().await; 71 | let svc_store = svc_store.read().await; 72 | 73 | for (_, ingress) in ingress_store.iter() { 74 | let ingress_rules = &ingress.spec.rules; 75 | 76 | for rule in ingress_rules.iter() { 77 | let mut host = IngressHost::new(rule.host.as_ref().unwrap()); 78 | 79 | for path in rule.paths.iter() { 80 | let svc_uri = format!("/api/v1/services/{}", path.service.name); 81 | if let Some(svc) = svc_store.get(svc_uri.as_str()) { 82 | let cluster_ip = svc.spec.cluster_ip.as_ref().unwrap(); 83 | host.add_path(&path.path, cluster_ip, &path.service.port); 84 | tracing::info!( 85 | "add path {}{} for service {}:{}:{}", 86 | rule.host.as_ref().unwrap(), 87 | path.path, 88 | path.service.name, 89 | cluster_ip, 90 | path.service.port 91 | ); 92 | } else { 93 | tracing::warn!( 94 | "Failed to add service {} to path {}, no such service exists", 95 | path.service.name, 96 | path.path 97 | ); 98 | } 99 | } 100 | 101 | config.add_host(host); 102 | } 103 | } 104 | 105 | config.flush(); 106 | Ok(()) 107 | } 108 | -------------------------------------------------------------------------------- /controllers/src/ingress/nginx_ingress_config.rs: -------------------------------------------------------------------------------- 1 | use std::{fs::OpenOptions, io::Write, net::Ipv4Addr, process::Command}; 2 | 3 | const CONFIG_HEADER: &str = r#" 4 | user www-data; 5 | worker_processes auto; 6 | pid /run/nginx.pid; 7 | include /etc/nginx/modules-enabled/*.conf; 8 | 9 | events { 10 | worker_connections 768; 11 | } 12 | 13 | "#; 14 | 15 | const CONFIG_BASE: &str = r#" 16 | http { 17 | SERVERS 18 | 19 | server { 20 | listen 80 default_server; 21 | server_name _; 22 | 23 | # Everything is a 404 24 | location / { 25 | return 404; 26 | } 27 | } 28 | } 29 | "#; 30 | 31 | const SERVER_BASE: &str = r#" 32 | server { 33 | server_name SERVER_NAME; 34 | listen 80; 35 | 36 | LOCATIONS 37 | } 38 | "#; 39 | 40 | const LOCATION_BASE: &str = r#" 41 | location PATH { 42 | proxy_pass http://IP:PORT/; 43 | } 44 | "#; 45 | 46 | pub struct NginxIngressConfig { 47 | servers: Vec, 48 | } 49 | 50 | pub struct IngressHost { 51 | host: String, 52 | paths: Vec, 53 | } 54 | 55 | pub struct IngressPath { 56 | path: String, 57 | ip: Ipv4Addr, 58 | port: u16, 59 | } 60 | 61 | impl NginxIngressConfig { 62 | pub fn new() -> Self { 63 | Self { 64 | servers: vec![], 65 | } 66 | } 67 | 68 | pub fn add_host(&mut self, host: IngressHost) { 69 | self.servers.push(host); 70 | } 71 | 72 | pub fn flush(&self) { 73 | if self.servers.is_empty() { 74 | return; 75 | } 76 | let config = self.to_string(); 77 | let mut file = OpenOptions::new() 78 | .write(true) 79 | .truncate(true) 80 | .create(true) 81 | .open("/etc/nginx/nginx.conf") 82 | .unwrap(); 83 | file.write_all(config.as_bytes()).unwrap(); 84 | Command::new("nginx") 85 | .args(["-s", "reload"]) 86 | .output() 87 | .expect( 88 | "Failed to reload nginx configuration, please ensure nginx is installed and running", 89 | ); 90 | } 91 | } 92 | 93 | impl IngressHost { 94 | pub fn new(host: &String) -> Self { 95 | Self { 96 | host: host.to_owned(), 97 | paths: vec![], 98 | } 99 | } 100 | 101 | pub fn add_path(&mut self, path: &String, svc_ip: &Ipv4Addr, svc_port: &u16) { 102 | self.paths.push(IngressPath { 103 | path: path.to_owned(), 104 | ip: svc_ip.to_owned(), 105 | port: svc_port.to_owned(), 106 | }); 107 | } 108 | } 109 | 110 | impl ToString for NginxIngressConfig { 111 | fn to_string(&self) -> String { 112 | let servers = self.servers.iter().fold(String::new(), |acc, server| { 113 | acc + server.to_string().as_str() 114 | }); 115 | let out = CONFIG_BASE.to_string().replace("SERVERS", servers.as_str()); 116 | CONFIG_HEADER.to_string() 117 | + nginx_config::parse_main(out.as_str()) 118 | .unwrap() 119 | .to_string() 120 | .as_str() // format 121 | } 122 | } 123 | 124 | impl ToString for IngressHost { 125 | fn to_string(&self) -> String { 126 | let out = SERVER_BASE.to_string(); 127 | let out = out.replace("SERVER_NAME", &self.host); 128 | let locations = self 129 | .paths 130 | .iter() 131 | .fold(String::new(), |acc, path| acc + path.to_string().as_str()); 132 | out.replace("LOCATIONS", &locations) 133 | } 134 | } 135 | 136 | impl ToString for IngressPath { 137 | fn to_string(&self) -> String { 138 | let out = LOCATION_BASE.to_string(); 139 | let out = out.replace("PATH", self.path.as_str()); 140 | let out = out.replace("IP", self.ip.to_string().as_str()); 141 | out.replace("PORT", self.port.to_string().as_str()) 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /controllers/src/podautoscaler/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | use anyhow::{Context, Result}; 5 | use config::{Config, File}; 6 | use resources::config::ClusterConfig; 7 | 8 | mod horizontal; 9 | mod metrics; 10 | mod replica_calculator; 11 | mod utils; 12 | 13 | lazy_static! { 14 | pub static ref CONFIG: ClusterConfig = Config::builder() 15 | .add_source(File::with_name("/etc/rminik8s/controller-manager.yaml").required(false)) 16 | .set_override_option("apiServerUrl", std::env::var("API_SERVER_URL").ok()) 17 | .unwrap() 18 | .set_override_option( 19 | "apiServerWatchUrl", 20 | std::env::var("API_SERVER_WATCH_URL").ok(), 21 | ) 22 | .unwrap() 23 | .build() 24 | .unwrap_or_default() 25 | .try_deserialize::() 26 | .with_context(|| "Failed to parse config".to_string()) 27 | .unwrap_or_default(); 28 | } 29 | 30 | /// Default sync period in seconds 31 | pub static SYNC_PERIOD: u32 = 15; 32 | 33 | #[tokio::main] 34 | async fn main() -> Result<()> { 35 | tracing_subscriber::fmt::init(); 36 | 37 | let mut controller = horizontal::PodAutoscaler::new(); 38 | controller.run().await?; 39 | Ok(()) 40 | } 41 | -------------------------------------------------------------------------------- /controllers/src/podautoscaler/metrics.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use reqwest::Client; 3 | use resources::{ 4 | models::Response, 5 | objects::{ 6 | metrics::{FunctionMetric, PodMetric, PodMetrics, PodMetricsInfo, Resource}, 7 | Labels, 8 | }, 9 | }; 10 | 11 | use crate::CONFIG; 12 | 13 | pub struct MetricsClient { 14 | client: Client, 15 | } 16 | 17 | impl MetricsClient { 18 | pub fn new() -> Self { 19 | Self { 20 | client: reqwest::Client::new(), 21 | } 22 | } 23 | 24 | /// Get pod metrics in raw value 25 | pub async fn get_resource_metric_value( 26 | &self, 27 | resource: &Resource, 28 | selector: &Labels, 29 | ) -> Result { 30 | let metrics = self.get_pod_metrics(selector).await?; 31 | if metrics.is_empty() { 32 | Err(anyhow::anyhow!("No metrics found")) 33 | } else { 34 | let mut metric_info = PodMetricsInfo::new(); 35 | for pod in metrics { 36 | let mut sum = 0; 37 | if pod.containers.is_empty() { 38 | continue; 39 | } 40 | for container in pod.containers { 41 | let usage = container.usage.get(resource); 42 | match usage { 43 | Some(usage) => sum += *usage, 44 | None => { 45 | tracing::debug!( 46 | "Missing resource metric {} for container {} in pod {}", 47 | resource, 48 | container.name, 49 | pod.name 50 | ); 51 | break; 52 | }, 53 | } 54 | } 55 | metric_info.insert( 56 | pod.name, 57 | PodMetric { 58 | timestamp: pod.timestamp, 59 | window: pod.window, 60 | value: sum, 61 | }, 62 | ); 63 | } 64 | Ok(metric_info) 65 | } 66 | } 67 | 68 | async fn get_pod_metrics(&self, selector: &Labels) -> Result> { 69 | let response = self 70 | .client 71 | .get(format!("{}/api/v1/metrics/pods", CONFIG.api_server_url,)) 72 | .query::>(&vec![("selector", selector.to_string())]) 73 | .send() 74 | .await? 75 | .json::>>() 76 | .await?; 77 | match response.data { 78 | Some(data) => Ok(data), 79 | None => Err(anyhow::anyhow!("Failed to get pod metrics")), 80 | } 81 | } 82 | 83 | pub async fn get_function_metric(&self, func_name: &str) -> Result { 84 | let response = self 85 | .client 86 | .get(format!( 87 | "{}/api/v1/metrics/functions/{}", 88 | CONFIG.api_server_url, func_name 89 | )) 90 | .send() 91 | .await? 92 | .json::>() 93 | .await?; 94 | match response.data { 95 | Some(data) => Ok(data), 96 | None => Err(anyhow::anyhow!("Failed to get function metrics")), 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /controllers/src/podautoscaler/utils.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Error, Result}; 2 | use reqwest::Url; 3 | use resources::{ 4 | informer::{ListerWatcher, WsStream}, 5 | models::Response, 6 | objects::{object_reference::ObjectReference, KubeObject, Object}, 7 | }; 8 | use tokio_tungstenite::connect_async; 9 | 10 | use crate::CONFIG; 11 | 12 | pub fn create_lister_watcher(path: String) -> ListerWatcher { 13 | let list_url = format!("{}/api/v1/{}", CONFIG.api_server_url, path); 14 | let watch_url = format!("{}/api/v1/watch/{}", CONFIG.api_server_watch_url, path); 15 | ListerWatcher { 16 | lister: Box::new(move |_| { 17 | let list_url = list_url.clone(); 18 | Box::pin(async { 19 | let res = reqwest::get(list_url) 20 | .await? 21 | .json::>>() 22 | .await?; 23 | let res = res.data.ok_or_else(|| anyhow!("Lister failed"))?; 24 | Ok::, Error>(res) 25 | }) 26 | }), 27 | watcher: Box::new(move |_| { 28 | let watch_url = watch_url.clone(); 29 | Box::pin(async move { 30 | let url = Url::parse(watch_url.as_str())?; 31 | let (stream, _) = connect_async(url).await?; 32 | Ok::(stream) 33 | }) 34 | }), 35 | } 36 | } 37 | 38 | pub async fn get_scale_target(target: &ObjectReference) -> Result { 39 | let client = reqwest::Client::new(); 40 | let response = client 41 | .get(format!( 42 | "{}/api/v1/{}s/{}", 43 | CONFIG.api_server_url, 44 | target.kind.to_lowercase(), 45 | target.name, 46 | )) 47 | .send() 48 | .await? 49 | .json::>() 50 | .await?; 51 | match response.data { 52 | Some(data) => Ok(data), 53 | None => Err(anyhow!("Failed to get scale target")), 54 | } 55 | } 56 | 57 | pub async fn post_update(object: &KubeObject) -> Result<()> { 58 | let client = reqwest::Client::new(); 59 | let response = client 60 | .put(format!("{}{}", CONFIG.api_server_url, object.uri())) 61 | .json(object) 62 | .send() 63 | .await? 64 | .json::>() 65 | .await?; 66 | if let Some(msg) = response.msg { 67 | tracing::info!("{}", msg); 68 | } 69 | Ok(()) 70 | } 71 | -------------------------------------------------------------------------------- /controllers/src/replica_set/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | use anyhow::{Context, Result}; 5 | use config::{Config, File}; 6 | use controller::ReplicaSetController; 7 | use resources::config::ClusterConfig; 8 | 9 | mod controller; 10 | mod utils; 11 | 12 | lazy_static! { 13 | pub static ref CONFIG: ClusterConfig = Config::builder() 14 | .add_source(File::with_name("/etc/rminik8s/controller-manager.yaml").required(false)) 15 | .set_override_option("apiServerUrl", std::env::var("API_SERVER_URL").ok()) 16 | .unwrap() 17 | .set_override_option( 18 | "apiServerWatchUrl", 19 | std::env::var("API_SERVER_WATCH_URL").ok(), 20 | ) 21 | .unwrap() 22 | .build() 23 | .unwrap_or_default() 24 | .try_deserialize::() 25 | .with_context(|| "Failed to parse config".to_string()) 26 | .unwrap_or_default(); 27 | } 28 | 29 | #[tokio::main] 30 | async fn main() -> Result<()> { 31 | tracing_subscriber::fmt::init(); 32 | 33 | let mut controller = ReplicaSetController::new(); 34 | controller.run().await?; 35 | Ok(()) 36 | } 37 | -------------------------------------------------------------------------------- /controllers/src/replica_set/utils.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Error}; 2 | use reqwest::Url; 3 | use resources::{ 4 | informer::{EventHandler, Informer, ListerWatcher, ResyncHandler, WsStream}, 5 | models::Response, 6 | objects::Object, 7 | }; 8 | use tokio::sync::mpsc::Sender; 9 | use tokio_tungstenite::connect_async; 10 | 11 | use crate::CONFIG; 12 | 13 | #[derive(Debug)] 14 | pub enum Event { 15 | Add(T), 16 | Update(T, T), 17 | Delete(T), 18 | } 19 | 20 | #[derive(Debug)] 21 | pub struct ResyncNotification; 22 | 23 | pub fn create_lister_watcher(path: String) -> ListerWatcher { 24 | let list_url = format!("{}/api/v1/{}", CONFIG.api_server_url, path); 25 | let watch_url = format!("{}/api/v1/watch/{}", CONFIG.api_server_watch_url, path); 26 | ListerWatcher { 27 | lister: Box::new(move |_| { 28 | let list_url = list_url.clone(); 29 | Box::pin(async { 30 | let res = reqwest::get(list_url) 31 | .await? 32 | .json::>>() 33 | .await?; 34 | let res = res.data.ok_or_else(|| anyhow!("Lister failed"))?; 35 | Ok::, Error>(res) 36 | }) 37 | }), 38 | watcher: Box::new(move |_| { 39 | let watch_url = watch_url.clone(); 40 | Box::pin(async move { 41 | let url = Url::parse(watch_url.as_str())?; 42 | let (stream, _) = connect_async(url).await?; 43 | Ok::(stream) 44 | }) 45 | }), 46 | } 47 | } 48 | 49 | pub fn create_informer( 50 | path: String, 51 | tx: Sender>, 52 | resync_tx: Sender, 53 | ) -> Informer { 54 | let lw = create_lister_watcher(path); 55 | 56 | let tx_add = tx; 57 | let tx_update = tx_add.clone(); 58 | let tx_delete = tx_add.clone(); 59 | let eh = EventHandler:: { 60 | add_cls: Box::new(move |new| { 61 | // TODO: this is not good: tx is copied every time add_cls is called, but I can't find a better way 62 | let tx_add = tx_add.clone(); 63 | Box::pin(async move { 64 | tx_add.send(Event::::Add(new)).await?; 65 | Ok(()) 66 | }) 67 | }), 68 | update_cls: Box::new(move |(old, new)| { 69 | let tx_update = tx_update.clone(); 70 | Box::pin(async move { 71 | tx_update.send(Event::::Update(old, new)).await?; 72 | Ok(()) 73 | }) 74 | }), 75 | delete_cls: Box::new(move |old| { 76 | let tx_delete = tx_delete.clone(); 77 | Box::pin(async move { 78 | tx_delete.send(Event::::Delete(old)).await?; 79 | Ok(()) 80 | }) 81 | }), 82 | }; 83 | let rh = ResyncHandler(Box::new(move |()| { 84 | let resync_tx = resync_tx.clone(); 85 | Box::pin(async move { 86 | resync_tx.send(ResyncNotification).await?; 87 | Ok(()) 88 | }) 89 | })); 90 | 91 | Informer::new(lw, eh, rh) 92 | } 93 | -------------------------------------------------------------------------------- /examples/api-server/config.yaml: -------------------------------------------------------------------------------- 1 | log_level: "Debug" 2 | etcd_endpoint: "127.0.0.1:2379" 3 | metrics_server: "127.0.0.1:9090" 4 | -------------------------------------------------------------------------------- /examples/config/node/node.env: -------------------------------------------------------------------------------- 1 | ETCD_ENDPOINT="http://127.0.0.1:2379" 2 | API_SERVER_ENDPOINT="http://127.0.0.1:8080" 3 | -------------------------------------------------------------------------------- /examples/function/.gitignore: -------------------------------------------------------------------------------- 1 | function.zip 2 | -------------------------------------------------------------------------------- /examples/function/README.md: -------------------------------------------------------------------------------- 1 | # Serverless Function 2 | 3 | 1. Start all related components: 4 | - API Server 5 | - rKubelet 6 | - ReplicaSet Controller 7 | - Endpoints Controller 8 | - Function Controller 9 | - Pod Autoscaler 10 | - Serverless Router 11 | - rKube-proxy 12 | - cAdvisor 13 | - Prometheus 14 | 2. Prepare code file 15 | 3. Create Function: `rkubectl create -f simple.yaml -c function.zip` 16 | 4. Activate: `wget -q -O- http://test.func.minik8s.com` 17 | 5. Generate load: `while sleep 0.01; do wget -q -O- http://test.func.minik8s.com; done` 18 | 6. Check Function: `rkubectl get functions test` 19 | -------------------------------------------------------------------------------- /examples/function/handler.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | 4 | def handler(args): 5 | startTime = time.time() 6 | time.sleep(0.1) 7 | endTime = time.time() 8 | return str(round((endTime - startTime) * 1000)) + "ms" 9 | -------------------------------------------------------------------------------- /examples/function/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | -------------------------------------------------------------------------------- /examples/function/simple.yaml: -------------------------------------------------------------------------------- 1 | kind: Function 2 | metadata: 3 | name: test 4 | spec: 5 | maxReplicas: 10 6 | metrics: 7 | type: Function 8 | name: test 9 | target: 30 -------------------------------------------------------------------------------- /examples/gpu-code/Makefile: -------------------------------------------------------------------------------- 1 | cuda_example: 2 | module load cuda/10.1.243-gcc-9.2.0 && nvcc cuda_example.cu -o cuda_example -std=c++11 3 | 4 | clean: 5 | del *.exe *.exp *.lib *.o cuda_example 6 | -------------------------------------------------------------------------------- /examples/gpu-code/cuda.slurm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #SBATCH --job-name=cuda_example 4 | #SBATCH --partition=dgx2 5 | #SBATCH --output=%j.out 6 | #SBATCH --error=%j.err 7 | #SBATCH -N 1 8 | #SBATCH --ntasks-per-node=1 9 | #SBATCH --cpus-per-task=6 10 | #SBATCH --gres=gpu:1 11 | 12 | ulimit -s unlimited 13 | ulimit -l unlimited 14 | 15 | module load cuda/10.1.243-gcc-9.2.0 16 | 17 | ./cuda_example -------------------------------------------------------------------------------- /examples/gpu-code/gpu.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/examples/gpu-code/gpu.zip -------------------------------------------------------------------------------- /examples/gpu-job/gpu-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: GpuJob 3 | metadata: 4 | name: cuda 5 | spec: 6 | gpuConfig: 7 | slurmConfig: 8 | partition: dgx2 9 | totalCoreNumber: 1 10 | ntasksPerNode: 1 11 | cpusPerTask: 6 12 | gres: gpu:1 13 | scripts: 14 | [ 15 | "ulimit -s unlimited", 16 | "ulimit -l unlimited", 17 | "module load cuda/10.1.243-gcc-9.2.0", 18 | "./cuda_example", 19 | ] 20 | compileScripts: make 21 | -------------------------------------------------------------------------------- /examples/gpu-job/gpuserver-config.yaml: -------------------------------------------------------------------------------- 1 | username: '******' 2 | password: '******' 3 | -------------------------------------------------------------------------------- /examples/hpa/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:5-apache 2 | COPY index.php /var/www/html/index.php 3 | RUN chmod a+rx index.php 4 | -------------------------------------------------------------------------------- /examples/hpa/README.md: -------------------------------------------------------------------------------- 1 | # Horizontal Pod Autoscaler 2 | 3 | 1. Start all related components: 4 | - API Server 5 | - rKubelet 6 | - ReplicaSet Controller 7 | - Endpoints Controller 8 | - Pod Autoscaler 9 | - rKube-proxy 10 | - cAdvisor: 11 | 12 | ```shell 13 | docker run \ 14 | --volume=/:/rootfs:ro \ 15 | --volume=/var/run:/var/run:rw \ 16 | --volume=/sys:/sys:ro \ 17 | --volume=/var/lib/docker/:/var/lib/docker:ro \ 18 | --publish=8090:8080 \ 19 | --detach=true \ 20 | --name=cadvisor \ 21 | zcube/cadvisor:latest 22 | ``` 23 | 24 | - Prometheus: `./prometheus --config.file=prometheus.yml` 25 | 2. Apply ReplicaSet: `rkubectl create -f replicaset.yaml` 26 | 3. Create Service: `rkubectl create -f service.yaml` 27 | 4. Create HPA: `rkubectl create -f simple.yaml` 28 | 5. Check service IP: `rkubectl get services php-apache` 29 | 6. Generate load: `while sleep 0.01; do wget -q -O- http://; done` 30 | 7. Check HPA: `rkubectl get horizontal-pod-autoscalers php-apache` 31 | -------------------------------------------------------------------------------- /examples/hpa/index.php: -------------------------------------------------------------------------------- 1 | 8 | -------------------------------------------------------------------------------- /examples/hpa/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | 4 | # Attach these labels to any time series or alerts when communicating with 5 | # external systems (federation, remote storage, Alertmanager). 6 | external_labels: 7 | monitor: 'codelab-monitor' 8 | 9 | # A scrape configuration containing exactly one endpoint to scrape: 10 | # Here it's Prometheus itself. 11 | scrape_configs: 12 | # The job name is added as a label `job=` to any timeseries scraped from this config. 13 | - job_name: 'prometheus' 14 | 15 | # Override the global default and scrape targets from this job every 5 seconds. 16 | scrape_interval: 5s 17 | 18 | static_configs: 19 | - targets: ['localhost:9090'] 20 | 21 | - job_name: 'cadvisor' 22 | scrape_interval: 15s 23 | static_configs: 24 | - targets: ['localhost:8090'] 25 | -------------------------------------------------------------------------------- /examples/hpa/replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: php-apache 5 | labels: 6 | run: php-apache 7 | spec: 8 | replicas: 1 9 | selector: 10 | run: php-apache 11 | template: 12 | metadata: 13 | name: php-apache 14 | labels: 15 | run: php-apache 16 | spec: 17 | containers: 18 | - name: php-apache 19 | image: hpa-example:latest 20 | imagePullPolicy: Never 21 | ports: 22 | - containerPort: 80 23 | resources: 24 | limits: 25 | cpu: 500 26 | requests: 27 | cpu: 200 -------------------------------------------------------------------------------- /examples/hpa/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: php-apache 5 | labels: 6 | run: php-apache 7 | spec: 8 | ports: 9 | - port: 80 10 | targetPort: 80 11 | selector: 12 | run: php-apache -------------------------------------------------------------------------------- /examples/hpa/simple.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: php-apache 5 | spec: 6 | scaleTargetRef: 7 | apiVersion: apps/v1 8 | kind: ReplicaSet 9 | name: php-apache 10 | minReplicas: 1 11 | maxReplicas: 10 12 | metrics: 13 | type: Resource 14 | name: CPU 15 | target: 16 | averageUtilization: 50 -------------------------------------------------------------------------------- /examples/ingresses/nginx-httpd/httpd-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: httpd 5 | labels: 6 | app: httpd 7 | spec: 8 | containers: 9 | - name: httpd 10 | image: httpd:latest 11 | imagePullPolicy: IfNotPresent 12 | ports: 13 | - containerPort: 80 14 | -------------------------------------------------------------------------------- /examples/ingresses/nginx-httpd/httpd-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | metadata: 3 | name: httpd-service 4 | spec: 5 | selector: 6 | app: httpd 7 | ports: 8 | - port: 80 9 | targetPort: 80 10 | -------------------------------------------------------------------------------- /examples/ingresses/nginx-httpd/mix.yaml: -------------------------------------------------------------------------------- 1 | kind: Ingress 2 | metadata: 3 | name: mix 4 | spec: 5 | rules: 6 | - host: server.minik8s.com 7 | paths: 8 | - path: /nginx 9 | service: 10 | name: nginx-service 11 | port: 80 12 | - path: /httpd 13 | service: 14 | name: httpd-service 15 | port: 80 16 | - host: tomcat.minik8s.com 17 | paths: 18 | - path: / 19 | service: 20 | name: tomcat-service 21 | port: 80 22 | -------------------------------------------------------------------------------- /examples/ingresses/nginx-httpd/multiple-host-ingress.yaml: -------------------------------------------------------------------------------- 1 | kind: Ingress 2 | metadata: 3 | name: nh-host-ingress 4 | spec: 5 | rules: 6 | - host: nginx.minik8s.com 7 | paths: 8 | - path: / 9 | service: 10 | name: nginx-service 11 | port: 80 12 | - host: httpd.minik8s.com 13 | paths: 14 | - path: / 15 | service: 16 | name: httpd-service 17 | port: 80 18 | - host: tomcat.minik8s.com 19 | paths: 20 | - path: / 21 | service: 22 | name: tomcat-service 23 | port: 80 24 | -------------------------------------------------------------------------------- /examples/ingresses/nginx-httpd/multiple-path-ingress.yaml: -------------------------------------------------------------------------------- 1 | kind: Ingress 2 | metadata: 3 | name: nh-path-ingress 4 | spec: 5 | rules: 6 | - host: server.minik8s.com 7 | paths: 8 | - path: /nginx 9 | service: 10 | name: nginx-service 11 | port: 80 12 | - path: /httpd 13 | service: 14 | name: httpd-service 15 | port: 80 16 | - path: /tomcat 17 | service: 18 | name: tomcat-service 19 | port: 80 20 | -------------------------------------------------------------------------------- /examples/ingresses/nginx-httpd/nginx-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | imagePullPolicy: IfNotPresent 12 | ports: 13 | - containerPort: 80 14 | -------------------------------------------------------------------------------- /examples/ingresses/nginx-httpd/nginx-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | metadata: 3 | name: nginx-service 4 | spec: 5 | selector: 6 | app: nginx 7 | ports: 8 | - port: 80 9 | targetPort: 80 10 | -------------------------------------------------------------------------------- /examples/ingresses/nginx-httpd/tomcat-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: tomcat 5 | labels: 6 | app: tomcat 7 | spec: 8 | containers: 9 | - name: tomcat 10 | image: tomcat:latest 11 | imagePullPolicy: IfNotPresent 12 | ports: 13 | - containerPort: 8080 14 | -------------------------------------------------------------------------------- /examples/ingresses/nginx-httpd/tomcat-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | metadata: 3 | name: tomcat-service 4 | spec: 5 | selector: 6 | app: tomcat 7 | ports: 8 | - port: 80 9 | targetPort: 8080 10 | -------------------------------------------------------------------------------- /examples/ingresses/simple-ingress.yaml: -------------------------------------------------------------------------------- 1 | kind: Ingress 2 | metadata: 3 | name: minimal-ingress 4 | spec: 5 | rules: 6 | - host: server.minik8s.com 7 | paths: 8 | - path: /testpath 9 | service: 10 | name: server-service 11 | port: 80 12 | -------------------------------------------------------------------------------- /examples/nodes/labels.yaml: -------------------------------------------------------------------------------- 1 | kind: Node 2 | metadata: 3 | name: k8s1 4 | labels: 5 | gpu: nvidia 6 | -------------------------------------------------------------------------------- /examples/nodes/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: server 7 | spec: 8 | nodeSelector: 9 | gpu: nvidia 10 | containers: 11 | - name: nginx 12 | image: nginx:latest 13 | imagePullPolicy: IfNotPresent 14 | ports: 15 | - containerPort: 80 16 | -------------------------------------------------------------------------------- /examples/pods/curl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: curl 5 | labels: 6 | app: curl 7 | spec: 8 | containers: 9 | - name: httpd-curl 10 | image: httpd:2.4.53 11 | ports: 12 | - containerPort: 80 -------------------------------------------------------------------------------- /examples/pods/demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: demo 5 | labels: 6 | app: demo 7 | spec: 8 | volumes: 9 | - name: nginx-files 10 | emptyDir: 11 | - name: download-files 12 | emptyDir: 13 | 14 | containers: 15 | - name: busybox 16 | image: busybox:latest 17 | command: 18 | - ifconfig 19 | imagePullPolicy: IfNotPresent 20 | 21 | - name: viewer 22 | image: dplsming/nginx-fileserver:1.0 23 | ports: 24 | - containerPort: 80 25 | volumeMounts: 26 | - name: nginx-files 27 | mountPath: /usr/share/nginx/html/files 28 | - name: download-files 29 | mountPath: /data 30 | 31 | - name: downloader 32 | image: dplsming/aria2ng-downloader:1.0 33 | ports: 34 | - containerPort: 6800 35 | - containerPort: 6880 36 | volumeMounts: 37 | - name: nginx-files 38 | mountPath: /usr/share/nginx/html/files 39 | - name: download-files 40 | mountPath: /data 41 | resources: 42 | limits: 43 | # 100 milli-CPU 44 | cpu: 100 45 | # 128MB 46 | memory: 134217728 -------------------------------------------------------------------------------- /examples/pods/simple-httpd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: httpd 5 | labels: 6 | app: server 7 | spec: 8 | containers: 9 | - name: httpd 10 | image: httpd:2.4.53 11 | ports: 12 | - containerPort: 80 13 | -------------------------------------------------------------------------------- /examples/pods/simple-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: server 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | imagePullPolicy: IfNotPresent 12 | ports: 13 | - containerPort: 80 14 | -------------------------------------------------------------------------------- /examples/replicasets/simple.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: frontend 5 | labels: 6 | app: server 7 | spec: 8 | replicas: 3 9 | selector: 10 | app: server 11 | template: 12 | metadata: 13 | name: nginx 14 | labels: 15 | app: server 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:latest 20 | imagePullPolicy: IfNotPresent 21 | ports: 22 | - containerPort: 80 -------------------------------------------------------------------------------- /examples/services/simple-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | metadata: 3 | name: server-service 4 | spec: 5 | selector: 6 | app: server 7 | ports: 8 | - port: 80 9 | targetPort: 80 10 | -------------------------------------------------------------------------------- /examples/workflow/.gitignore: -------------------------------------------------------------------------------- 1 | function.zip 2 | -------------------------------------------------------------------------------- /examples/workflow/guess/README.md: -------------------------------------------------------------------------------- 1 | # Guess Workflow 2 | 3 | 1. create function: guess, minus3, right, wrong 4 | 2. create workflow: guess 5 | 6 | Try it out: 7 | 8 | * `http -v GET guess.workflow.func.minik8s.com a:=9 b:=4`: You should be able to get "Congratulations! You are right!" in the end. 9 | * `http -v GET guess.workflow.func.minik8s.com a:=1 b:=2`: You should be able to get "Sorry... Guess again..." in the end. 10 | -------------------------------------------------------------------------------- /examples/workflow/guess/add/handler.py: -------------------------------------------------------------------------------- 1 | def handler(args): 2 | a = args.get("a") 3 | b = args.get("b") 4 | return {"ans": a+b} 5 | 6 | 7 | args = {"a": 3, "b": 4} 8 | print(handler(args)) 9 | -------------------------------------------------------------------------------- /examples/workflow/guess/add/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/examples/workflow/guess/add/requirements.txt -------------------------------------------------------------------------------- /examples/workflow/guess/add/simple.yaml: -------------------------------------------------------------------------------- 1 | kind: Function 2 | metadata: 3 | name: add 4 | spec: 5 | maxReplicas: 10 6 | metrics: 7 | type: Function 8 | name: add 9 | target: 30 10 | -------------------------------------------------------------------------------- /examples/workflow/guess/guess.yaml: -------------------------------------------------------------------------------- 1 | kind: Workflow 2 | metadata: 3 | name: guess 4 | spec: 5 | startAt: add 6 | states: 7 | add: 8 | type: Task 9 | resource: add 10 | next: minus3 11 | minus3: 12 | type: Task 13 | resource: minus3 14 | next: guess 15 | guess: 16 | type: Choice 17 | rules: 18 | - type: FieldNumEquals 19 | field: ans 20 | content: 10 21 | next: right 22 | default: wrong 23 | right: 24 | type: Task 25 | resource: right 26 | wrong: 27 | type: Task 28 | resource: wrong 29 | -------------------------------------------------------------------------------- /examples/workflow/guess/minus3/handler.py: -------------------------------------------------------------------------------- 1 | def handler(args): 2 | a = args.get("ans") 3 | return {"ans": a-3} 4 | 5 | 6 | args = {"ans": 13} 7 | print(handler(args)) 8 | -------------------------------------------------------------------------------- /examples/workflow/guess/minus3/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/examples/workflow/guess/minus3/requirements.txt -------------------------------------------------------------------------------- /examples/workflow/guess/minus3/simple.yaml: -------------------------------------------------------------------------------- 1 | kind: Function 2 | metadata: 3 | name: minus3 4 | spec: 5 | maxReplicas: 10 6 | metrics: 7 | type: Function 8 | name: minus3 9 | target: 30 10 | -------------------------------------------------------------------------------- /examples/workflow/guess/right/handler.py: -------------------------------------------------------------------------------- 1 | def handler(args): 2 | return "Congratulations! You are right!" 3 | 4 | 5 | args = {"a": 3, "b": 4} 6 | print(handler(args)) 7 | -------------------------------------------------------------------------------- /examples/workflow/guess/right/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/examples/workflow/guess/right/requirements.txt -------------------------------------------------------------------------------- /examples/workflow/guess/right/simple.yaml: -------------------------------------------------------------------------------- 1 | kind: Function 2 | metadata: 3 | name: right 4 | spec: 5 | maxReplicas: 10 6 | metrics: 7 | type: Function 8 | name: right 9 | target: 30 10 | -------------------------------------------------------------------------------- /examples/workflow/guess/wrong/handler.py: -------------------------------------------------------------------------------- 1 | def handler(args): 2 | return "Sorry... Guess again..." 3 | 4 | 5 | args = {"a": 3, "b": 4} 6 | print(handler(args)) 7 | -------------------------------------------------------------------------------- /examples/workflow/guess/wrong/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/examples/workflow/guess/wrong/requirements.txt -------------------------------------------------------------------------------- /examples/workflow/guess/wrong/simple.yaml: -------------------------------------------------------------------------------- 1 | kind: Function 2 | metadata: 3 | name: wrong 4 | spec: 5 | maxReplicas: 10 6 | metrics: 7 | type: Function 8 | name: wrong 9 | target: 30 10 | -------------------------------------------------------------------------------- /examples/workflow/hello-workflow.yaml: -------------------------------------------------------------------------------- 1 | kind: Workflow 2 | metadata: 3 | name: hello_workflow 4 | spec: 5 | startAt: FirstState 6 | states: 7 | firstState: 8 | type: Task 9 | resource: echo_string 10 | next: ChoiceState 11 | choiceState: 12 | type: Choice 13 | choices: 14 | - type: FieldEquals 15 | field: ans 16 | content: "8" 17 | next: FirstMatchState 18 | - type: FieldNumEquals 19 | field: ans 20 | content: 8 21 | next: secondMatchState 22 | default: DefaultState 23 | firstMatchState: 24 | type: Task 25 | resource: append_world 26 | next: NextState 27 | secondMatchState: 28 | type: Task 29 | resource: append_rust 30 | next: NextState 31 | defaultState: 32 | type: Fail 33 | error: DefaultStateError 34 | cause: "No Matches!" 35 | nextState: 36 | type: Task 37 | resource: append_from_minik8s 38 | -------------------------------------------------------------------------------- /resources/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "resources" 4 | version = "0.1.0" 5 | 6 | [dependencies] 7 | anyhow = {version = "1.0.56", features = ["backtrace"]} 8 | axum = "0.5.1" 9 | bollard = "0.12.0" 10 | chrono = "0.4.19" 11 | dashmap = "5.3.3" 12 | derivative = "2.2.0" 13 | enum_dispatch = "0.3.8" 14 | etcd-client = "0.9.0" 15 | futures = "0.3.21" 16 | futures-util = "0.3.21" 17 | indenter = "0.3.3" 18 | rand = "0.8.5" 19 | reqwest = {version = "0.11", features = ["blocking", "json"]} 20 | serde = {version = "1.0.136", features = ["derive"]} 21 | serde_json = "1.0.79" 22 | serde_yaml = "0.8.23" 23 | strum = {version = "0.24", features = ["derive"]} 24 | tokio = {version = "1.17.0", features = ["full"]} 25 | tokio-tungstenite = "0.17.1" 26 | tracing = "0.1.32" 27 | uuid = {version = "0.8", features = ["serde", "v4"]} 28 | -------------------------------------------------------------------------------- /resources/src/config/kubelet.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use super::ClusterConfig; 4 | 5 | #[derive(Debug, Serialize, Deserialize, Clone)] 6 | #[serde(rename_all = "camelCase", default)] 7 | pub struct KubeletConfig { 8 | /// Path to the directory containing local (static) pods to run, 9 | /// or the path to a single static pod file. 10 | /// Defaults to "/etc/rminik8s/manifests". 11 | pub static_pod_path: String, 12 | /// Frequency that kubelet computes node status. 13 | /// In seconds. Default: 10 sec 14 | pub node_status_update_frequency: u64, 15 | /// Frequency that kubelet posts node status to master 16 | /// if node status does not change. 17 | /// Kubelet will ignore this frequency and 18 | /// post node status immediately if any change is detected. 19 | /// In seconds. Default: 5 min 20 | pub node_status_report_frequency: u64, 21 | /// Frequency that kubelet computes pod status. 22 | /// In seconds. Default: 10 sec 23 | pub pod_status_update_frequency: u64, 24 | pub cluster: ClusterConfig, 25 | pub port: u16, 26 | } 27 | 28 | impl Default for KubeletConfig { 29 | fn default() -> Self { 30 | KubeletConfig { 31 | static_pod_path: "/etc/rminik8s/manifests".to_string(), 32 | node_status_update_frequency: 10, 33 | node_status_report_frequency: 30, 34 | pod_status_update_frequency: 10, 35 | cluster: ClusterConfig::default(), 36 | port: 10250, 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /resources/src/config/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod kubelet; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | #[derive(Debug, Serialize, Deserialize, Clone)] 6 | #[serde(rename_all = "camelCase", default)] 7 | pub struct ClusterConfig { 8 | /// API server URL 9 | pub api_server_url: String, 10 | /// API server watch URL 11 | pub api_server_watch_url: String, 12 | } 13 | 14 | impl Default for ClusterConfig { 15 | fn default() -> Self { 16 | ClusterConfig { 17 | api_server_url: "http://localhost:8080".to_string(), 18 | api_server_watch_url: "ws://localhost:8080".to_string(), 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /resources/src/informer/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, sync::Arc}; 2 | 3 | use anyhow::Result; 4 | use futures_util::future::BoxFuture; 5 | use reflector::{Reflector, ReflectorNotification}; 6 | use tokio::{ 7 | net::TcpStream, 8 | select, 9 | sync::{mpsc, RwLock}, 10 | }; 11 | use tokio_tungstenite::{MaybeTlsStream, WebSocketStream}; 12 | 13 | use crate::{informer::reflector::ResyncNotification, objects::Object}; 14 | 15 | mod reflector; 16 | 17 | pub type WsStream = WebSocketStream>; 18 | 19 | pub type Store = Arc>>; 20 | 21 | pub type CLS = Box BoxFuture<'static, Result> + Send + Sync>; 22 | 23 | pub struct ListerWatcher { 24 | pub lister: CLS<(), Vec>, 25 | pub watcher: CLS<(), WsStream>, 26 | } 27 | 28 | pub struct EventHandler { 29 | pub add_cls: CLS, 30 | pub update_cls: CLS<(T, T), ()>, 31 | pub delete_cls: CLS, 32 | } 33 | 34 | pub struct ResyncHandler(pub CLS<(), ()>); 35 | 36 | pub struct Informer { 37 | reflector: Arc>, 38 | eh: EventHandler, 39 | rh: ResyncHandler, 40 | } 41 | 42 | impl Informer { 43 | pub fn new(lw: ListerWatcher, eh: EventHandler, rh: ResyncHandler) -> Self { 44 | let store = Arc::new(RwLock::new(HashMap::new())); 45 | let reflector = Reflector { 46 | lw, 47 | store, 48 | }; 49 | Self { 50 | reflector: Arc::new(reflector), 51 | eh, 52 | rh, 53 | } 54 | } 55 | 56 | pub fn get_store(&self) -> Arc>> { 57 | self.reflector.store.clone() 58 | } 59 | 60 | pub async fn run(&self) -> Result<()> { 61 | // start reflector 62 | let (tx, mut rx) = mpsc::channel::>(16); 63 | let (resync_tx, mut resync_rx) = mpsc::channel::(4); 64 | let r = self.reflector.clone(); 65 | let mut reflector_handle = tokio::spawn(async move { r.run(tx, resync_tx).await }); 66 | 67 | tracing::info!("Informer started"); 68 | 69 | loop { 70 | select! { 71 | Some(n) = rx.recv() => { 72 | match n { 73 | ReflectorNotification::Add(new) => { 74 | (self.eh.add_cls)(new).await?; 75 | }, 76 | ReflectorNotification::Update(old, new) => { 77 | (self.eh.update_cls)((old, new)).await?; 78 | }, 79 | ReflectorNotification::Delete(old) => { 80 | (self.eh.delete_cls)(old).await?; 81 | }, 82 | } 83 | }, 84 | Some(_) = resync_rx.recv() => { 85 | (self.rh.0)(()).await?; 86 | }, 87 | reflector_result = &mut reflector_handle => { 88 | if let Err(e) = reflector_result { 89 | tracing::error!("Reflector ended unexpectedly, caused by: {}", e); 90 | } 91 | } 92 | }; 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /resources/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod config; 2 | pub mod informer; 3 | pub mod models; 4 | pub mod objects; 5 | pub mod utils; 6 | -------------------------------------------------------------------------------- /resources/src/models/etcd.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use crate::objects::Object; 4 | 5 | #[derive(Debug, Serialize, Deserialize, Clone)] 6 | #[serde(tag = "type")] 7 | pub enum WatchEvent { 8 | Put(PutEvent), 9 | Delete(DeleteEvent), 10 | } 11 | 12 | #[derive(Debug, Serialize, Deserialize, Clone)] 13 | pub struct PutEvent { 14 | pub key: String, 15 | pub object: T, 16 | } 17 | 18 | #[derive(Debug, Serialize, Deserialize, Clone)] 19 | pub struct DeleteEvent { 20 | pub key: String, 21 | } 22 | 23 | impl WatchEvent { 24 | pub fn new_put(key: String, object: T) -> Self { 25 | WatchEvent::Put(PutEvent { 26 | key, 27 | object, 28 | }) 29 | } 30 | 31 | pub fn new_delete(key: String) -> Self { 32 | WatchEvent::Delete(DeleteEvent { 33 | key, 34 | }) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /resources/src/models/mod.rs: -------------------------------------------------------------------------------- 1 | use axum::{http::StatusCode, response::IntoResponse, Json}; 2 | use reqwest::Url; 3 | use serde::{Deserialize, Serialize}; 4 | 5 | pub mod etcd; 6 | 7 | #[derive(Debug, Serialize, Deserialize)] 8 | pub struct Response { 9 | pub msg: Option, 10 | pub data: Option, 11 | } 12 | 13 | #[derive(Debug, Serialize, Deserialize)] 14 | pub struct ErrResponse { 15 | pub msg: String, 16 | pub cause: Option, 17 | #[serde(skip)] 18 | pub status: StatusCode, 19 | } 20 | 21 | pub struct NodeConfig { 22 | pub etcd_endpoint: Url, 23 | pub api_server_endpoint: Url, 24 | } 25 | 26 | impl Response 27 | where 28 | T: Serialize, 29 | { 30 | pub fn new(msg: Option, data: Option) -> Self { 31 | Self { 32 | msg, 33 | data, 34 | } 35 | } 36 | } 37 | 38 | impl ErrResponse { 39 | pub fn new(msg: String, cause: Option) -> Self { 40 | Self { 41 | msg, 42 | cause, 43 | status: StatusCode::INTERNAL_SERVER_ERROR, 44 | } 45 | } 46 | pub fn not_found(msg: String, cause: Option) -> Self { 47 | Self { 48 | msg, 49 | cause, 50 | status: StatusCode::NOT_FOUND, 51 | } 52 | } 53 | pub fn bad_request(msg: String, cause: Option) -> Self { 54 | Self { 55 | msg, 56 | cause, 57 | status: StatusCode::BAD_REQUEST, 58 | } 59 | } 60 | pub fn json(&self) -> String { 61 | serde_json::to_string::(self).unwrap() 62 | } 63 | } 64 | 65 | impl IntoResponse for ErrResponse { 66 | fn into_response(self) -> axum::response::Response { 67 | (self.status, Json(self)).into_response() 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /resources/src/objects/binding.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use super::{object_reference::ObjectReference, Metadata, Object}; 4 | 5 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 6 | pub struct Binding { 7 | pub metadata: Metadata, 8 | pub target: ObjectReference, 9 | } 10 | 11 | impl Object for Binding { 12 | fn kind(&self) -> &'static str { 13 | "Binding" 14 | } 15 | 16 | fn name(&self) -> &String { 17 | &self.metadata.name 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /resources/src/objects/function.rs: -------------------------------------------------------------------------------- 1 | use std::default::Default; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use super::{ 6 | hpa::{HorizontalPodAutoscalerBehavior, MetricSource}, 7 | Labels, Metadata, Object, 8 | }; 9 | 10 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 11 | pub struct Function { 12 | pub metadata: Metadata, 13 | pub spec: FunctionSpec, 14 | pub status: Option, 15 | } 16 | 17 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 18 | #[serde(rename_all = "camelCase")] 19 | pub struct FunctionSpec { 20 | /// The upper limit for the number of replicas 21 | /// to which the autoscaler can scale up. 22 | #[serde(default = "default_max_replicas")] 23 | pub max_replicas: u32, 24 | /// Configures the scaling behavior of the target 25 | /// in both Up and Down directions 26 | /// (scaleUp and scaleDown fields respectively). 27 | /// If not set, the default HPAScalingRules 28 | /// for scale up and scale down are used. 29 | #[serde(default)] 30 | pub behavior: HorizontalPodAutoscalerBehavior, 31 | /// Contains the specifications for which to use 32 | /// to calculate the desired replica count 33 | /// (the maximum replica count across all metrics will be used). 34 | /// The desired replica count is calculated multiplying the ratio 35 | /// between the target value and the current value 36 | /// by the current number of pods. 37 | /// Ergo, metrics used must decrease as the pod count is increased, and vice-versa. 38 | pub metrics: MetricSource, 39 | } 40 | 41 | fn default_max_replicas() -> u32 { 42 | 10 43 | } 44 | 45 | #[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)] 46 | #[serde(rename_all = "camelCase")] 47 | pub struct FunctionStatus { 48 | pub service_ref: String, 49 | pub filename: String, 50 | pub host: String, 51 | /// the name of image which wraps this function 52 | pub image: Option, 53 | } 54 | 55 | impl Object for Function { 56 | fn kind(&self) -> &'static str { 57 | "Function" 58 | } 59 | 60 | fn name(&self) -> &String { 61 | &self.metadata.name 62 | } 63 | } 64 | 65 | impl Function { 66 | pub fn init(&mut self, svc_name: String, filename: String) { 67 | let name = &self.metadata.name; 68 | let host = format!("{}.func.minik8s.com", name); 69 | let mut labels = Labels::new(); 70 | labels.insert("function", name); 71 | self.metadata.uid = Some(uuid::Uuid::new_v4()); 72 | self.metadata.labels = labels; 73 | self.status = Some(FunctionStatus { 74 | service_ref: svc_name, 75 | filename, 76 | host, 77 | image: None, 78 | }); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /resources/src/objects/gpu_job.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use super::{pod::PodTemplateSpec, Metadata, Object}; 4 | 5 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 6 | pub struct GpuJob { 7 | /// Standard object's metadata. 8 | pub metadata: Metadata, 9 | /// Specification of the desired behavior of a job. 10 | pub spec: GpuJobSpec, 11 | /// Current status of a job. 12 | pub status: Option, 13 | } 14 | 15 | impl Object for GpuJob { 16 | fn kind(&self) -> &'static str { 17 | "GpuJob" 18 | } 19 | 20 | fn name(&self) -> &String { 21 | &self.metadata.name 22 | } 23 | } 24 | 25 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 26 | #[serde(rename_all = "camelCase")] 27 | pub struct GpuJobSpec { 28 | /// GPU config of the GpuJob. 29 | pub gpu_config: GpuConfig, 30 | /// Specifies the desired number of successfully finished pods the job should be run with. 31 | /// Setting to nil means that the success of any pod signals the success of all pods, 32 | /// and allows parallelism to have any positive value. 33 | /// Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. 34 | #[serde(default = "completions_default")] 35 | pub completions: u32, 36 | /// Specifies the maximum desired number of pods the job should run at any given time. 37 | /// The actual number of pods running in steady state will be less than this number 38 | /// when ((.spec.completions - .status.successful) < .spec.parallelism) 39 | #[serde(default = "parallelism_default")] 40 | pub parallelism: u32, 41 | /// Specifies the number of retries before marking this job failed. Defaults to 6 42 | #[serde(default = "back_off_limit_default")] 43 | pub back_off_limit: u32, 44 | } 45 | 46 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 47 | #[serde(rename_all = "camelCase")] 48 | pub struct GpuConfig { 49 | pub slurm_config: SlurmConfig, 50 | pub compile_scripts: String, 51 | } 52 | 53 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 54 | #[serde(rename_all = "camelCase")] 55 | pub struct SlurmConfig { 56 | pub partition: String, 57 | pub total_core_number: u32, 58 | pub ntasks_per_node: u32, 59 | pub cpus_per_task: u32, 60 | pub gres: String, 61 | pub scripts: Option>, 62 | } 63 | 64 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default)] 65 | pub struct GpuJobStatus { 66 | /// The number of pending and running pods. 67 | pub active: u32, 68 | /// The number of pods which reached phase Failed. 69 | pub failed: u32, 70 | /// The number of pods which reached phase Succeeded. 71 | pub succeeded: u32, 72 | /// filename of code zip file 73 | pub filename: Option, 74 | /// Describes the pod that will be created when executing a job. 75 | pub template: Option, 76 | } 77 | 78 | fn completions_default() -> u32 { 79 | 1 80 | } 81 | 82 | fn parallelism_default() -> u32 { 83 | 1 84 | } 85 | 86 | fn back_off_limit_default() -> u32 { 87 | 6 88 | } 89 | -------------------------------------------------------------------------------- /resources/src/objects/ingress.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use super::{Metadata, Object}; 4 | 5 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 6 | pub struct Ingress { 7 | pub metadata: Metadata, 8 | pub spec: IngressSpec, 9 | } 10 | 11 | impl Object for Ingress { 12 | fn kind(&self) -> &'static str { 13 | "Ingress" 14 | } 15 | 16 | fn kind_plural(&self) -> String { 17 | "Ingresses".to_string() 18 | } 19 | 20 | fn name(&self) -> &String { 21 | &self.metadata.name 22 | } 23 | } 24 | 25 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 26 | pub struct IngressSpec { 27 | /// A list of host rules used to configure the Ingress. 28 | pub rules: Vec, 29 | } 30 | 31 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 32 | pub struct IngressRule { 33 | /// Host is the fully qualified domain name of a network host. 34 | /// It should always end with .minik8s.com 35 | /// Host is automatically generated if not specified 36 | pub host: Option, 37 | /// A collection of paths that map requests to services. 38 | pub paths: Vec, 39 | } 40 | 41 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 42 | pub struct IngressPath { 43 | /// Path is matched against the path of an incoming request. 44 | pub path: String, 45 | /// Service references a Service as a Backend. 46 | pub service: IngressService, 47 | } 48 | 49 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 50 | pub struct IngressService { 51 | /// Name is the referenced service. The service must exist in the same namespace as the Ingress object. 52 | pub name: String, 53 | /// Port of the referenced service. A port number is required for a IngressServiceBackend. 54 | pub port: u16, 55 | } 56 | -------------------------------------------------------------------------------- /resources/src/objects/metrics.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use chrono::NaiveDateTime; 4 | use serde::{Deserialize, Serialize}; 5 | use strum::Display; 6 | 7 | #[derive(Debug, Serialize, Deserialize, Hash, Clone, Eq, PartialEq, Display)] 8 | pub enum Resource { 9 | CPU, 10 | Memory, 11 | } 12 | 13 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 14 | /// Metrics of containers in a pod. 15 | pub struct PodMetrics { 16 | /// Pod name 17 | pub name: String, 18 | pub timestamp: NaiveDateTime, 19 | /// Duration in seconds over which the metrics were gathered. 20 | pub window: u32, 21 | /// Metrics for all containers collected within the same time window. 22 | pub containers: Vec, 23 | } 24 | 25 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 26 | pub struct ContainerMetrics { 27 | pub name: String, 28 | pub usage: HashMap, 29 | } 30 | 31 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 32 | /// An overall summary of PodMetrics. 33 | pub struct PodMetric { 34 | pub timestamp: NaiveDateTime, 35 | /// Duration in seconds over which the metrics were gathered. 36 | pub window: u32, 37 | pub value: i64, 38 | } 39 | 40 | /// A mapping from pod names to metrics. 41 | pub type PodMetricsInfo = HashMap; 42 | 43 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 44 | pub struct FunctionMetric { 45 | /// Function name 46 | pub name: String, 47 | pub timestamp: NaiveDateTime, 48 | pub value: i64, 49 | } 50 | -------------------------------------------------------------------------------- /resources/src/objects/object_reference.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 4 | pub struct ObjectReference { 5 | /// Kind of the referent. 6 | pub kind: String, 7 | /// Name of the referent. 8 | pub name: String, 9 | } 10 | 11 | impl ObjectReference { 12 | pub fn new(kind: String, name: String) -> ObjectReference { 13 | ObjectReference { 14 | kind, 15 | name, 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /resources/src/objects/replica_set.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | use super::{ 4 | function::Function, object_reference::ObjectReference, pod::PodTemplateSpec, Labels, Metadata, 5 | Object, 6 | }; 7 | 8 | /// ReplicaSet ensures that a specified number of pod replicas are running 9 | /// at any given time. 10 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 11 | pub struct ReplicaSet { 12 | pub metadata: Metadata, 13 | /// Defines the specification of the desired behavior of the ReplicaSet. 14 | pub spec: ReplicaSetSpec, 15 | /// The most recently observed status of the ReplicaSet. 16 | /// This data may be out of date by some window of time. 17 | /// Populated by the system. Read-only. 18 | pub status: Option, 19 | } 20 | 21 | impl Object for ReplicaSet { 22 | fn kind(&self) -> &'static str { 23 | "ReplicaSet" 24 | } 25 | 26 | fn name(&self) -> &String { 27 | &self.metadata.name 28 | } 29 | } 30 | 31 | impl std::fmt::Display for ReplicaSet { 32 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 33 | writeln!(f, "{:<16} {}", "Name:", self.metadata.name)?; 34 | writeln!(f, "{:<16} {}", "Selector:", self.spec.selector.to_string())?; 35 | writeln!(f, "{:<16} {}", "Labels:", self.metadata.labels.to_string())?; 36 | if self.status.is_none() { 37 | return Ok(()); 38 | } 39 | let status = self.status.as_ref().unwrap(); 40 | writeln!( 41 | f, 42 | "{:<16} {} ready / {} current / {} desired", 43 | "Replicas:", status.ready_replicas, status.replicas, self.spec.replicas 44 | ) 45 | } 46 | } 47 | 48 | impl ReplicaSet { 49 | pub fn from_function(func: &Function) -> Self { 50 | let func_name = func.metadata.name.to_owned(); 51 | let metadata = Metadata { 52 | name: func_name.to_owned(), 53 | uid: None, 54 | labels: func.metadata.labels.clone(), 55 | owner_references: vec![ObjectReference { 56 | kind: "function".to_string(), 57 | name: func_name, 58 | }], 59 | }; 60 | let spec = ReplicaSetSpec { 61 | selector: func.metadata.labels.clone(), 62 | template: PodTemplateSpec::from_function(func), 63 | replicas: 0, 64 | }; 65 | Self { 66 | metadata, 67 | spec, 68 | status: None, 69 | } 70 | } 71 | } 72 | 73 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 74 | pub struct ReplicaSetSpec { 75 | /// A label query over pods that should match the replica count. 76 | /// Label keys and values that must match 77 | /// in order to be controlled by this replica set. 78 | /// It must match the pod template's labels. 79 | /// Required. 80 | pub selector: Labels, 81 | /// The object that describes the pod 82 | /// that will be created if insufficient replicas are detected. 83 | pub template: PodTemplateSpec, 84 | /// The number of desired replicas. 85 | /// This is a pointer to distinguish between explicit zero and unspecified. 86 | /// Defaults to 1. 87 | #[serde(default = "default_replicas")] 88 | pub replicas: u32, 89 | } 90 | 91 | fn default_replicas() -> u32 { 92 | 1 93 | } 94 | 95 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default)] 96 | #[serde(rename_all = "camelCase")] 97 | pub struct ReplicaSetStatus { 98 | /// The most recently oberved number of replicas. 99 | pub replicas: u32, 100 | /// The number of pods targeted by this ReplicaSet with a Ready Condition. 101 | pub ready_replicas: u32, 102 | } 103 | -------------------------------------------------------------------------------- /resources/src/objects/service.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{HashMap, HashSet}, 3 | fmt::Debug, 4 | net::Ipv4Addr, 5 | }; 6 | 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use super::{object_reference::ObjectReference, Metadata, Object}; 10 | use crate::objects::Labels; 11 | 12 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 13 | pub struct Service { 14 | pub metadata: Metadata, 15 | pub spec: ServiceSpec, 16 | } 17 | 18 | impl Object for Service { 19 | fn kind(&self) -> &'static str { 20 | "Service" 21 | } 22 | 23 | fn name(&self) -> &String { 24 | &self.metadata.name 25 | } 26 | } 27 | 28 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 29 | #[serde(rename_all = "camelCase")] 30 | pub struct ServiceSpec { 31 | /// Route service traffic to pods with label keys and values matching this selector. 32 | pub selector: Labels, 33 | /// The list of ports that are exposed by this service. 34 | pub ports: Vec, 35 | /// a collection of endpoints that implement the actual service 36 | #[serde(default)] 37 | pub endpoints: HashSet, 38 | /// clusterIP is the IP address of the service and is usually assigned randomly 39 | pub cluster_ip: Option, 40 | } 41 | 42 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 43 | #[serde(rename_all = "camelCase")] 44 | pub struct ServicePort { 45 | /// The port that will be exposed by this service. 46 | pub port: u16, 47 | /// Number of the port to access on the pods targeted by the service. 48 | pub target_port: u16, 49 | } 50 | 51 | impl Service { 52 | pub fn from_function(name: &str, func_name: &str, cluster_ip: Ipv4Addr) -> Self { 53 | let metadata = Metadata { 54 | name: name.to_owned(), 55 | uid: Some(uuid::Uuid::new_v4()), 56 | labels: Labels::default(), 57 | owner_references: vec![ObjectReference { 58 | kind: "function".to_string(), 59 | name: func_name.to_string(), 60 | }], 61 | }; 62 | let spec = ServiceSpec { 63 | selector: Labels(HashMap::from([( 64 | "function".to_string(), 65 | func_name.to_string(), 66 | )])), 67 | ports: vec![ServicePort { 68 | port: 80, 69 | target_port: 80, 70 | }], 71 | endpoints: HashSet::new(), 72 | cluster_ip: Some(cluster_ip), 73 | }; 74 | Self { 75 | metadata, 76 | spec, 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /resources/src/objects/workflow.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use serde_json::Value; 5 | 6 | use super::{Metadata, Object}; 7 | 8 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 9 | pub struct Workflow { 10 | pub metadata: Metadata, 11 | pub spec: WorkflowSpec, 12 | } 13 | 14 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 15 | #[serde(rename_all = "camelCase")] 16 | pub struct WorkflowSpec { 17 | /// A string that must exactly match (is case sensitive) the name of one of the state objects. 18 | pub start_at: String, 19 | /// An object containing a comma-delimited set of states. 20 | pub states: HashMap, 21 | } 22 | 23 | impl Object for Workflow { 24 | fn kind(&self) -> &'static str { 25 | "Workflow" 26 | } 27 | 28 | fn name(&self) -> &String { 29 | &self.metadata.name 30 | } 31 | } 32 | 33 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 34 | #[serde(tag = "type")] 35 | pub enum State { 36 | Task(Task), 37 | Choice(Choice), 38 | } 39 | 40 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 41 | pub struct Task { 42 | /// Identifies the specific function to excute. 43 | pub resource: String, 44 | /// The next field that is run when the task state is complete. 45 | /// If it's None, the state will end the execution. 46 | #[serde(default)] 47 | pub next: Option, 48 | } 49 | 50 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 51 | pub struct Choice { 52 | pub rules: Vec, 53 | pub default: String, 54 | } 55 | 56 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 57 | pub struct ChoiceRule { 58 | #[serde(flatten)] 59 | pub comparison: Comparison, 60 | pub next: String, 61 | } 62 | 63 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 64 | #[serde(tag = "type")] 65 | pub enum Comparison { 66 | FieldEquals { field: String, content: String }, 67 | FieldNumEquals { field: String, content: i32 }, 68 | } 69 | 70 | impl ChoiceRule { 71 | pub fn match_with(&self, text: &str) -> bool { 72 | let args: serde_json::Map = if let Ok(args) = serde_json::from_str(text) { 73 | args 74 | } else { 75 | return false; 76 | }; 77 | match self.comparison { 78 | Comparison::FieldEquals { 79 | ref field, 80 | ref content, 81 | } => { 82 | if let Some(Value::String(v)) = args.get(field) { 83 | v == content 84 | } else { 85 | false 86 | } 87 | }, 88 | Comparison::FieldNumEquals { 89 | ref field, 90 | ref content, 91 | } => { 92 | if let Some(Value::Number(v)) = args.get(field) { 93 | if let Some(v) = v.as_i64() { 94 | v == *content as i64 95 | } else { 96 | false 97 | } 98 | } else { 99 | false 100 | } 101 | }, 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /resources/src/utils/mod.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | 3 | pub fn first_error_or_ok(results: Vec>) -> Result<()> { 4 | results 5 | .into_iter() 6 | .find_map(|r| r.err()) 7 | .map_or(Ok(()), Err) 8 | } 9 | -------------------------------------------------------------------------------- /rkube_proxy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "rkube-proxy" 4 | version = "0.1.0" 5 | 6 | [dependencies] 7 | anyhow = "1.0.56" 8 | dashmap = "5.3.3" 9 | dotenv = "0.15.0" 10 | iptables = "0.5.0" 11 | lazy_static = "1.4.0" 12 | rand = "0.8.5" 13 | reqwest = {version = "0.11", features = ["blocking", "json"]} 14 | resources = {path = "../resources"} 15 | tokio = {version = "1.17.0", features = ["full"]} 16 | tokio-tungstenite = "0.17.1" 17 | tracing = "0.1.33" 18 | tracing-subscriber = "0.3.10" 19 | -------------------------------------------------------------------------------- /rkube_proxy/src/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | use std::env; 5 | 6 | use anyhow::Result; 7 | use k8s_iptables::K8sIpTables; 8 | use reqwest::Url; 9 | use resources::{ 10 | informer::Store, 11 | models::NodeConfig, 12 | objects::{service::Service, Object}, 13 | }; 14 | use tokio::{select, sync::mpsc}; 15 | 16 | use crate::utils::create_services_informer; 17 | 18 | mod k8s_iptables; 19 | mod utils; 20 | 21 | #[derive(Debug)] 22 | pub enum Notification { 23 | Add(Service), 24 | Update(Service, Service), 25 | Delete(Service), 26 | } 27 | 28 | #[derive(Debug)] 29 | pub struct ResyncNotification; 30 | 31 | lazy_static! { 32 | static ref CONFIG: NodeConfig = { 33 | dotenv::from_path("/etc/rminik8s/node.env").ok(); 34 | NodeConfig { 35 | etcd_endpoint: match env::var("ETCD_ENDPOINT") { 36 | Ok(url) => Url::parse(url.as_str()).unwrap(), 37 | Err(_) => Url::parse("http://127.0.0.1:2379/").unwrap(), 38 | }, 39 | api_server_endpoint: match env::var("API_SERVER_ENDPOINT") { 40 | Ok(url) => Url::parse(url.as_str()).unwrap(), 41 | Err(_) => Url::parse("http://127.0.0.1:8080/").unwrap(), 42 | }, 43 | } 44 | }; 45 | } 46 | 47 | #[tokio::main] 48 | async fn main() -> Result<()> { 49 | tracing_subscriber::fmt::init(); 50 | tracing::info!("rKube-Proxy started"); 51 | 52 | let mut ipt = K8sIpTables::new(); 53 | 54 | let (tx, mut rx) = mpsc::channel::(16); 55 | let (resync_tx, mut resync_rx) = mpsc::channel::(16); 56 | 57 | let (svc_informer, svc_store) = create_services_informer(tx.clone(), resync_tx); 58 | let informer_handler = tokio::spawn(async move { svc_informer.run().await }); 59 | 60 | loop { 61 | select! { 62 | _ = resync_rx.recv() => { 63 | handle_resync(&mut ipt, svc_store.to_owned()).await?; 64 | }, 65 | Some(n) = rx.recv() => { 66 | if let Err(e) = handle_notification(&mut ipt, n).await { 67 | tracing::warn!("Error handling notification, caused by: {}", e); 68 | } 69 | }, 70 | else => break, 71 | } 72 | } 73 | 74 | informer_handler.await??; 75 | 76 | Ok(()) 77 | } 78 | 79 | async fn handle_resync(ipt: &mut K8sIpTables, svc_store: Store) -> Result<()> { 80 | ipt.cleanup().expect("Failed to cleanup ip table"); 81 | let store = svc_store.read().await; 82 | for (_, svc) in store.iter() { 83 | ipt.add_svc(svc); 84 | } 85 | 86 | tracing::info!("Resync succeeded!"); 87 | Ok(()) 88 | } 89 | 90 | async fn handle_notification(ipt: &mut K8sIpTables, n: Notification) -> Result<()> { 91 | match n { 92 | Notification::Add(new) => { 93 | ipt.add_svc(&new); 94 | }, 95 | Notification::Update(old, new) => { 96 | // TODO: all right, I am lazy 97 | ipt.del_svc(old.name()); 98 | ipt.add_svc(&new); 99 | }, 100 | Notification::Delete(old) => { 101 | ipt.del_svc(old.name()); 102 | }, 103 | } 104 | 105 | Ok(()) 106 | } 107 | -------------------------------------------------------------------------------- /rkube_proxy/src/utils.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, Ok}; 2 | use resources::{ 3 | informer::{EventHandler, Informer, ListerWatcher, ResyncHandler, Store}, 4 | models::Response, 5 | objects::service::Service, 6 | }; 7 | use tokio::sync::mpsc::Sender; 8 | use tokio_tungstenite::connect_async; 9 | 10 | use crate::{Notification, ResyncNotification, CONFIG}; 11 | 12 | pub fn create_services_informer( 13 | tx: Sender, 14 | tx_resync: Sender, 15 | ) -> (Informer, Store) { 16 | let lw = ListerWatcher { 17 | lister: Box::new(|_| { 18 | Box::pin(async { 19 | let res = reqwest::get(CONFIG.api_server_endpoint.join("/api/v1/services")?) 20 | .await? 21 | .json::>>() 22 | .await?; 23 | let res = res.data.ok_or_else(|| anyhow!("Lister failed"))?; 24 | Ok(res) 25 | }) 26 | }), 27 | watcher: Box::new(|_| { 28 | Box::pin(async { 29 | let mut url = CONFIG.api_server_endpoint.join("/api/v1/watch/services")?; 30 | url.set_scheme("ws").ok(); 31 | let (stream, _) = connect_async(url).await?; 32 | Ok(stream) 33 | }) 34 | }), 35 | }; 36 | 37 | // create event handler closures 38 | let tx_add = tx.clone(); 39 | let tx_update = tx.clone(); 40 | let tx_delete = tx; 41 | let eh = EventHandler { 42 | add_cls: Box::new(move |new| { 43 | let tx_add = tx_add.clone(); 44 | Box::pin(async move { 45 | tx_add.send(Notification::Add(new)).await?; 46 | Ok(()) 47 | }) 48 | }), 49 | update_cls: Box::new(move |(old, new)| { 50 | let tx_update = tx_update.clone(); 51 | Box::pin(async move { 52 | tx_update.send(Notification::Update(old, new)).await?; 53 | Ok(()) 54 | }) 55 | }), 56 | delete_cls: Box::new(move |old| { 57 | let tx_delete = tx_delete.clone(); 58 | Box::pin(async move { 59 | tx_delete.send(Notification::Delete(old)).await?; 60 | Ok(()) 61 | }) 62 | }), 63 | }; 64 | let rh = ResyncHandler(Box::new(move |_| { 65 | let resync_tx = tx_resync.clone(); 66 | Box::pin(async move { 67 | resync_tx.send(ResyncNotification).await?; 68 | Ok(()) 69 | }) 70 | })); 71 | 72 | // start the informer 73 | let informer = Informer::new(lw, eh, rh); 74 | let store = informer.get_store(); 75 | (informer, store) 76 | } 77 | -------------------------------------------------------------------------------- /rkubectl/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "rkubectl" 4 | version = "0.1.0" 5 | 6 | [dependencies] 7 | anyhow = "1.0.56" 8 | chrono = "0.4.19" 9 | chrono-humanize = "0.2.1" 10 | clap = {version = "3.1.8", features = ["derive"]} 11 | clap_complete = "3.1.4" 12 | futures-util = "0.3.21" 13 | lazy_static = "1.4.0" 14 | reqwest = {version = "0.11", features = ["blocking", "json", "multipart"]} 15 | resources = {path = "../resources"} 16 | serde = {version = "1.0.136", features = ["derive"]} 17 | serde_yaml = "0.8.23" 18 | strum = "0.24.0" 19 | serde_json = "1.0.81" 20 | termion = "1.5.6" 21 | tokio = { version = "1", features = ["full"] } 22 | tokio-tungstenite = "0.17.1" 23 | -------------------------------------------------------------------------------- /rkubectl/src/completion.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use anyhow::Result; 4 | use clap::{Args, Command}; 5 | use clap_complete::{ 6 | generate, 7 | shells::{Bash, Elvish, Fish, PowerShell, Shell, Zsh}, 8 | }; 9 | 10 | #[derive(Args)] 11 | pub struct Arg { 12 | /// Type of shell 13 | #[clap(arg_enum)] 14 | shell: Shell, 15 | } 16 | 17 | impl Arg { 18 | pub async fn handle(&self, app: &mut Command<'_>) -> Result<()> { 19 | // NOTE: We should be able to use `self.shell.generate` 20 | // if bin_name can be automatically propagated to subcommands 21 | const BIN_NAME: &str = "rkubectl"; 22 | let buf = &mut io::stdout(); 23 | match self.shell { 24 | Shell::Bash => generate(Bash, app, BIN_NAME, buf), 25 | Shell::Elvish => generate(Elvish, app, BIN_NAME, buf), 26 | Shell::Fish => generate(Fish, app, BIN_NAME, buf), 27 | Shell::PowerShell => generate(PowerShell, app, BIN_NAME, buf), 28 | Shell::Zsh => generate(Zsh, app, BIN_NAME, buf), 29 | _ => println!("Unsupported shell"), 30 | } 31 | Ok(()) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /rkubectl/src/create.rs: -------------------------------------------------------------------------------- 1 | use std::{fs::File, path::PathBuf}; 2 | 3 | use anyhow::{anyhow, Context, Result}; 4 | use clap::Args; 5 | use reqwest::{ 6 | multipart::{self, Part}, 7 | Client, 8 | }; 9 | use resources::objects::Object; 10 | use serde::Deserialize; 11 | 12 | use crate::{objects::KubeObject, utils::gen_url}; 13 | 14 | #[derive(Args)] 15 | pub struct Arg { 16 | /// The definition YAML file of the resource to create 17 | #[clap(short, long, parse(from_os_str), value_name = "FILE")] 18 | file: PathBuf, 19 | /// ZIP code file to upload, required when creating jobs and functions 20 | #[clap(short, long, parse(from_os_str), value_name = "ZIP")] 21 | code_file: Option, 22 | } 23 | 24 | impl Arg { 25 | pub async fn handle(&self) -> Result<()> { 26 | let path = &self.file.as_path(); 27 | let file = 28 | File::open(path).with_context(|| format!("Failed to open file {}", path.display()))?; 29 | let object: KubeObject = serde_yaml::from_reader(file) 30 | .with_context(|| format!("Failed to parse file {}", path.display()))?; 31 | let msg: String = match object { 32 | KubeObject::GpuJob(..) | KubeObject::Function(..) => { 33 | let code_path = self 34 | .code_file 35 | .to_owned() 36 | .ok_or_else(|| anyhow!("Code file is not provided"))?; 37 | create_with_file(&object, code_path) 38 | .await 39 | .with_context(|| format!("Failed to create using file {}", path.display()))? 40 | }, 41 | _ => create(&object) 42 | .await 43 | .with_context(|| format!("Failed to create using file {}", path.display()))?, 44 | }; 45 | 46 | println!("{}", msg); 47 | Ok(()) 48 | } 49 | } 50 | 51 | async fn create(object: &KubeObject) -> Result { 52 | let client = Client::new(); 53 | let url = gen_url(object.kind_plural(), None)?; 54 | let res = client 55 | .post(url) 56 | .json(&object) 57 | .send() 58 | .await? 59 | .json::() 60 | .await?; 61 | match res.cause { 62 | Some(cause) => Err(anyhow::anyhow!("{}: {}", res.msg, cause)), 63 | None => Ok(res.msg), 64 | } 65 | } 66 | 67 | async fn create_with_file(object: &KubeObject, path: PathBuf) -> Result { 68 | let client = Client::builder().pool_idle_timeout(None).build()?; 69 | let url = gen_url(object.kind_plural(), None)?; 70 | 71 | // Load file as a part 72 | let bytes = std::fs::read(&path)?; 73 | let mut file = Part::bytes(bytes); 74 | if let Some(file_name) = path 75 | .file_name() 76 | .map(|filename| filename.to_string_lossy().into_owned()) 77 | { 78 | file = file.file_name(file_name); 79 | } 80 | 81 | let form = multipart::Form::new() 82 | .text( 83 | object.kind().to_lowercase(), 84 | serde_json::to_string(&object)?, 85 | ) 86 | .part("code", file); 87 | let res = client.post(url).multipart(form).send().await?; 88 | 89 | match res.error_for_status() { 90 | Ok(res) => Ok(res.json::().await?.msg), 91 | Err(res) => Err(anyhow!("{}", res.to_string())), 92 | } 93 | } 94 | 95 | #[derive(Debug, Deserialize)] 96 | struct CreateRes { 97 | msg: String, 98 | cause: Option, 99 | } 100 | -------------------------------------------------------------------------------- /rkubectl/src/delete.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Args; 3 | use reqwest::Client; 4 | use serde::Deserialize; 5 | 6 | use crate::{utils::gen_url, ResourceKind}; 7 | 8 | #[derive(Args)] 9 | pub struct Arg { 10 | /// Kind of resource 11 | #[clap(arg_enum)] 12 | kind: ResourceKind, 13 | /// Name of resource 14 | name: String, 15 | } 16 | 17 | impl Arg { 18 | pub async fn handle(&self) -> Result<()> { 19 | let client = Client::new(); 20 | let url = gen_url(self.kind.to_string(), Some(&self.name))?; 21 | let res = client.delete(url).send().await?.json::().await?; 22 | println!("{}", res.msg); 23 | Ok(()) 24 | } 25 | } 26 | 27 | #[derive(Debug, Deserialize)] 28 | struct DeleteRes { 29 | msg: String, 30 | } 31 | -------------------------------------------------------------------------------- /rkubectl/src/describe.rs: -------------------------------------------------------------------------------- 1 | use std::vec::Vec; 2 | 3 | use anyhow::Result; 4 | use clap::Args; 5 | use reqwest::Client; 6 | use resources::{models::Response, objects::KubeObject}; 7 | 8 | use crate::{utils::gen_url, ResourceKind}; 9 | 10 | #[derive(Args)] 11 | pub struct Arg { 12 | /// Kind of resource 13 | #[clap(arg_enum)] 14 | kind: ResourceKind, 15 | /// Name of resource 16 | name: Option, 17 | } 18 | 19 | impl Arg { 20 | pub async fn handle(&self) -> Result<()> { 21 | let client = Client::new(); 22 | let url = gen_url(self.kind.to_string(), self.name.as_ref())?; 23 | let data = if self.name.is_none() { 24 | let res = client 25 | .get(url) 26 | .send() 27 | .await? 28 | .json::>>() 29 | .await?; 30 | res.data.unwrap_or_default() 31 | } else { 32 | let res = client 33 | .get(url) 34 | .send() 35 | .await? 36 | .json::>() 37 | .await?; 38 | res.data.map_or_else(Vec::new, |data| vec![data]) 39 | }; 40 | 41 | for object in data { 42 | match object { 43 | KubeObject::Pod(pod) => { 44 | println!("{}", pod); 45 | }, 46 | KubeObject::ReplicaSet(rs) => { 47 | println!("{}", rs); 48 | }, 49 | KubeObject::Node(node) => { 50 | println!("{}", node); 51 | }, 52 | _ => { 53 | println!("{:#?}", object); 54 | }, 55 | } 56 | } 57 | Ok(()) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /rkubectl/src/exec.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io::{stdout, Read, Write}, 3 | time::Duration, 4 | }; 5 | 6 | use anyhow::Result; 7 | use clap::Args; 8 | use futures_util::{stream::StreamExt, SinkExt}; 9 | use reqwest::Url; 10 | use termion::{async_stdin, raw::IntoRawMode}; 11 | use tokio::{spawn, time::sleep}; 12 | use tokio_tungstenite::{connect_async, tungstenite::Message}; 13 | 14 | use crate::CONFIG; 15 | 16 | #[derive(Args)] 17 | pub struct Arg { 18 | /// Pod name 19 | pod_name: String, 20 | /// Container name 21 | container_name: String, 22 | /// Command to execute 23 | #[clap(short, long)] 24 | command: String, 25 | } 26 | 27 | impl Arg { 28 | pub async fn handle(&self) -> Result<()> { 29 | let mut base_url = CONFIG.base_url.to_owned(); 30 | base_url.set_scheme("ws").unwrap(); 31 | let mut url = Url::parse( 32 | format!( 33 | "{}api/v1/pods/{}/containers/{}/exec", 34 | base_url, self.pod_name, self.container_name 35 | ) 36 | .as_str(), 37 | )?; 38 | url.set_query(Some(&format!("command={}", self.command))); 39 | 40 | let (stream, _) = connect_async(url).await?; 41 | let (mut sender, mut receiver) = stream.split(); 42 | // Pipe stdin into docker exec input 43 | spawn(async move { 44 | let mut stdin = async_stdin().bytes(); 45 | loop { 46 | if let Some(Ok(byte)) = stdin.next() { 47 | sender.send(Message::Binary(vec![byte])).await.ok(); 48 | if byte == 4 { 49 | // Ctrl-D 50 | sender.close().await.ok(); 51 | return; 52 | } 53 | } else { 54 | sleep(Duration::from_nanos(10)).await; 55 | } 56 | } 57 | }); 58 | 59 | let stdout = stdout(); 60 | let mut stdout = stdout.lock().into_raw_mode()?; 61 | 62 | // Pipe docker exec output into stdout 63 | while let Some(Ok(output)) = receiver.next().await { 64 | if let Message::Binary(output) = output { 65 | stdout.write_all(&output)?; 66 | stdout.flush()?; 67 | } 68 | } 69 | 70 | Ok(()) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /rkubectl/src/logs.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use clap::Args; 3 | use reqwest::Client; 4 | use resources::models::{ErrResponse, Response}; 5 | use serde::Deserialize; 6 | 7 | use crate::utils::gen_url; 8 | 9 | #[derive(Args)] 10 | pub struct Arg { 11 | /// Job or Pod name 12 | name: String, 13 | /// Container name, optional if there's only one container 14 | container_name: Option, 15 | /// Tail option 16 | #[clap(short, long)] 17 | tail: Option, 18 | #[clap(short, parse(from_flag))] 19 | job: bool, 20 | } 21 | 22 | impl Arg { 23 | pub async fn handle(&self) -> Result<()> { 24 | let client = Client::new(); 25 | let url = if self.job { 26 | let base_url = gen_url("gpujobs".to_string(), Some(&self.name))?; 27 | format!("{base_url}/logs") 28 | } else { 29 | let base_url = gen_url("pods".to_string(), Some(&self.name))?; 30 | match self.container_name { 31 | Some(ref container_name) => format!("{base_url}/containers/{container_name}/logs"), 32 | None => format!("{base_url}/logs"), 33 | } 34 | }; 35 | 36 | let res = client 37 | .get(url) 38 | .query(&[("tail", &self.tail.as_ref().unwrap_or(&"all".to_string()))]) 39 | .send() 40 | .await? 41 | .json::() 42 | .await?; 43 | match res { 44 | LogsResponse::Ok(res) => print!("{}", res.data.unwrap_or_default()), 45 | LogsResponse::Err(res) => println!("{}: {}", res.msg, res.cause.unwrap_or_default()), 46 | } 47 | Ok(()) 48 | } 49 | } 50 | 51 | #[derive(Debug, Deserialize)] 52 | #[serde(untagged)] 53 | enum LogsResponse { 54 | // NOTE: must place error first 55 | // otherwise serde will incorrectly match ErrResponse as success 56 | Err(ErrResponse), 57 | Ok(Response), 58 | } 59 | -------------------------------------------------------------------------------- /rkubectl/src/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | use std::env; 5 | 6 | use anyhow::Result; 7 | use clap::{ArgEnum, IntoApp, Parser, Subcommand}; 8 | use reqwest::Url; 9 | use resources::objects; 10 | use strum::Display; 11 | 12 | mod completion; 13 | mod create; 14 | mod delete; 15 | mod describe; 16 | mod exec; 17 | mod get; 18 | mod logs; 19 | mod patch; 20 | mod utils; 21 | 22 | struct AppConfig { 23 | base_url: Url, 24 | } 25 | 26 | lazy_static! { 27 | static ref CONFIG: AppConfig = AppConfig { 28 | base_url: match env::var("API_SERVER_URL") { 29 | Ok(url) => Url::parse(url.as_str()).unwrap(), 30 | Err(_) => Url::parse("http://127.0.0.1:8080/").unwrap(), 31 | } 32 | }; 33 | } 34 | 35 | #[derive(Parser)] 36 | #[clap(author, version, about, long_about = None)] 37 | #[clap(propagate_version = true)] 38 | struct Cli { 39 | #[clap(subcommand)] 40 | command: Commands, 41 | } 42 | 43 | #[derive(Subcommand)] 44 | enum Commands { 45 | /// Create a resource using configuration file. 46 | Create(create::Arg), 47 | /// Delete a resource by name. 48 | Delete(delete::Arg), 49 | /// Get resources list. 50 | Get(get::Arg), 51 | /// Patch a resource. 52 | Patch(patch::Arg), 53 | /// Describe a resource. 54 | Describe(describe::Arg), 55 | /// Print pod container logs. 56 | Logs(logs::Arg), 57 | /// Execute commands in a pod container. 58 | Exec(exec::Arg), 59 | /// Generate shell completion. 60 | /// 61 | /// Usage: echo 'source <(rkubectl completion bash)' >> ~/.bashrc 62 | Completion(completion::Arg), 63 | } 64 | 65 | #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ArgEnum, Display)] 66 | #[strum(serialize_all = "lowercase")] 67 | enum ResourceKind { 68 | Pods, 69 | ReplicaSets, 70 | Services, 71 | Ingresses, 72 | HorizontalPodAutoscalers, 73 | GpuJobs, 74 | Nodes, 75 | Functions, 76 | Workflows, 77 | } 78 | 79 | #[tokio::main] 80 | async fn main() -> Result<()> { 81 | let cli = Cli::parse(); 82 | 83 | match &cli.command { 84 | Commands::Create(arg) => arg.handle().await?, 85 | Commands::Delete(arg) => arg.handle().await?, 86 | Commands::Get(arg) => arg.handle().await?, 87 | Commands::Patch(arg) => arg.handle().await?, 88 | Commands::Describe(arg) => arg.handle().await?, 89 | Commands::Logs(arg) => arg.handle().await?, 90 | Commands::Exec(arg) => arg.handle().await?, 91 | Commands::Completion(arg) => arg.handle(&mut Cli::command()).await?, 92 | } 93 | 94 | Ok(()) 95 | } 96 | -------------------------------------------------------------------------------- /rkubectl/src/patch.rs: -------------------------------------------------------------------------------- 1 | use std::{fs::File, path::PathBuf}; 2 | 3 | use anyhow::{anyhow, Context, Result}; 4 | use clap::Args; 5 | use reqwest::{ 6 | multipart::{self, Part}, 7 | Client, 8 | }; 9 | use resources::objects::Object; 10 | use serde::Deserialize; 11 | 12 | use crate::{ 13 | objects::KubeObject, 14 | utils::{gen_url, gen_url_from_object}, 15 | }; 16 | 17 | #[derive(Args)] 18 | pub struct Arg { 19 | #[clap(short, long, parse(from_os_str), value_name = "FILE")] 20 | file: PathBuf, 21 | #[clap(short, long, parse(from_os_str), value_name = "ZIP")] 22 | code_file: Option, 23 | } 24 | 25 | impl Arg { 26 | pub async fn handle(&self) -> Result<()> { 27 | let path = &self.file.as_path(); 28 | let file = 29 | File::open(path).with_context(|| format!("Failed to open file {}", path.display()))?; 30 | let object: KubeObject = serde_yaml::from_reader(file) 31 | .with_context(|| format!("Failed to parse file {}", path.display()))?; 32 | 33 | let msg: String = match object { 34 | KubeObject::Function(..) => { 35 | let code_path = self 36 | .code_file 37 | .to_owned() 38 | .ok_or_else(|| anyhow!("Code file is not provided"))?; 39 | patch_with_file(&object, code_path) 40 | .await 41 | .with_context(|| format!("Failed to update using file {}", path.display()))? 42 | }, 43 | _ => patch(&object) 44 | .await 45 | .with_context(|| format!("Failed to patch using file {}", path.display()))?, 46 | }; 47 | 48 | println!("{}", msg); 49 | Ok(()) 50 | } 51 | } 52 | 53 | async fn patch(object: &KubeObject) -> Result { 54 | let client = Client::new(); 55 | let url = gen_url_from_object(object)?; 56 | let res = client 57 | .patch(url) 58 | .json(&object) 59 | .send() 60 | .await? 61 | .json::() 62 | .await?; 63 | Ok(res.msg) 64 | } 65 | 66 | async fn patch_with_file(object: &KubeObject, path: PathBuf) -> Result { 67 | let client = Client::builder().pool_idle_timeout(None).build()?; 68 | let url = gen_url(object.kind_plural(), None)?; 69 | 70 | let res = client 71 | .delete(gen_url(object.kind_plural(), Some(object.name()))?) 72 | .send() 73 | .await? 74 | .json::() 75 | .await?; 76 | if let Some(cause) = res.cause { 77 | return Err(anyhow::anyhow!("{}: {}", res.msg, cause)); 78 | } 79 | 80 | // Load file as a part 81 | let bytes = std::fs::read(&path)?; 82 | let mut file = Part::bytes(bytes); 83 | if let Some(file_name) = path 84 | .file_name() 85 | .map(|filename| filename.to_string_lossy().into_owned()) 86 | { 87 | file = file.file_name(file_name); 88 | } 89 | 90 | let form = multipart::Form::new() 91 | .text( 92 | object.kind().to_lowercase(), 93 | serde_json::to_string(&object)?, 94 | ) 95 | .part("code", file); 96 | let res = client 97 | .post(url) 98 | .multipart(form) 99 | .send() 100 | .await? 101 | .json::() 102 | .await?; 103 | match res.cause { 104 | Some(cause) => Err(anyhow::anyhow!("{}: {}", res.msg, cause)), 105 | None => Ok(res.msg), 106 | } 107 | } 108 | 109 | #[derive(Debug, Deserialize)] 110 | struct PatchRes { 111 | msg: String, 112 | cause: Option, 113 | } 114 | -------------------------------------------------------------------------------- /rkubectl/src/utils.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use chrono::{Local, NaiveDateTime}; 3 | use chrono_humanize::{Accuracy, HumanTime, Tense}; 4 | use resources::objects::Object; 5 | 6 | use crate::{objects::KubeObject, Url, CONFIG}; 7 | 8 | pub fn gen_url_from_object(object: &KubeObject) -> Result { 9 | let url = CONFIG.base_url.to_owned(); 10 | let uri = object.uri(); 11 | Ok(url.join(uri.as_str())?) 12 | } 13 | 14 | pub fn gen_url(mut kind_plural: String, name: Option<&String>) -> Result { 15 | let url = CONFIG.base_url.to_owned(); 16 | kind_plural = kind_plural.to_lowercase(); 17 | let path = if let Some(name) = name { 18 | format!("api/v1/{}/{}", kind_plural, name) 19 | } else { 20 | format!("api/v1/{}", kind_plural) 21 | }; 22 | Ok(url.join(path.as_str())?) 23 | } 24 | 25 | pub fn calc_age(time: NaiveDateTime) -> String { 26 | let d = HumanTime::from(Local::now().naive_utc() - time); 27 | d.to_text_en(Accuracy::Rough, Tense::Present) 28 | } 29 | -------------------------------------------------------------------------------- /rkubelet/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "rkubelet" 4 | version = "0.1.0" 5 | 6 | [dependencies] 7 | anyhow = "1.0.56" 8 | axum = {version = "0.5.1", features = ["ws"]} 9 | axum-macros = "0.2.0" 10 | bollard = "0.12.0" 11 | chrono = "0.4.19" 12 | clap = { version = "3.1.13", features = ["derive"] } 13 | config = { version = "0.13.1", features = ["yaml"] } 14 | futures = "0.3" 15 | interfaces = "0.0.8" 16 | lazy_static = "1.4.0" 17 | reqwest = {version = "0.11", features = ["blocking", "json"]} 18 | resources = {path = "../resources"} 19 | serde = "1.0.136" 20 | serde_json = "1.0.79" 21 | serde_yaml = "0.8.23" 22 | sysinfo = "0.23.12" 23 | tokio = { version = "1", features = ["full"] } 24 | tokio-tungstenite = "0.17.1" 25 | tracing = "0.1.33" 26 | tracing-subscriber = "0.3.10" 27 | uuid = {version = "0.8", features = ["v4"]} 28 | -------------------------------------------------------------------------------- /rkubelet/src/config.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Context; 2 | use bollard::Docker; 3 | use config::{Config, File}; 4 | use lazy_static::lazy_static; 5 | use resources::config::kubelet::KubeletConfig; 6 | 7 | lazy_static! { 8 | pub static ref DOCKER: Docker = 9 | Docker::connect_with_local_defaults().expect("Failed to connect to docker daemon"); 10 | pub static ref CONFIG: KubeletConfig = Config::builder() 11 | .add_source(File::with_name("/var/lib/rkubelet/config.yaml")) 12 | .build() 13 | .unwrap_or_default() 14 | .try_deserialize::() 15 | .with_context(|| "Failed to parse config".to_string()) 16 | .unwrap_or_default(); 17 | } 18 | 19 | pub const PAUSE_IMAGE_NAME: &str = "docker/desktop-kubernetes-pause:3.5"; 20 | pub const CONTAINER_NAME_PREFIX: &str = "minik8s"; 21 | pub const SANDBOX_NAME: &str = "POD"; 22 | pub const POD_DIR_PATH: &str = "/var/lib/rkubelet/pods"; 23 | -------------------------------------------------------------------------------- /rkubelet/src/models.rs: -------------------------------------------------------------------------------- 1 | use resources::objects::pod::Pod; 2 | 3 | #[derive(Debug)] 4 | pub enum PodUpdate { 5 | Add(Pod), 6 | Update(Pod, Pod), 7 | Delete(Pod), 8 | } 9 | -------------------------------------------------------------------------------- /rkubelet/src/status_manager.rs: -------------------------------------------------------------------------------- 1 | use anyhow::Result; 2 | use resources::{informer::Store, models::Response, objects::pod}; 3 | use tokio::time::sleep; 4 | 5 | use crate::{config::CONFIG, pod::Pod, PodList}; 6 | 7 | pub struct StatusManager { 8 | pods: PodList, 9 | pod_store: Store, 10 | } 11 | 12 | impl StatusManager { 13 | pub fn new(pods: PodList, pod_store: Store) -> Self { 14 | Self { 15 | pods, 16 | pod_store, 17 | } 18 | } 19 | 20 | pub async fn run(&mut self) -> Result<()> { 21 | tracing::info!("Status manager started"); 22 | loop { 23 | let store = self.pods.read().await; 24 | for name in store.iter() { 25 | let store = self.pod_store.read().await; 26 | let pod = store.get(&format!("/api/v1/pods/{}", name)); 27 | if pod.is_none() { 28 | tracing::warn!("Pod {} not found", name); 29 | drop(store); 30 | continue; 31 | } 32 | let mut pod = Pod::load(pod.unwrap().to_owned())?; 33 | drop(store); 34 | 35 | let changed = pod.update_status().await.unwrap_or_else(|err| { 36 | tracing::error!("Failed to update status for pod {}: {:#?}", name, err); 37 | false 38 | }); 39 | if changed { 40 | tracing::info!("Pod {} status changed", name); 41 | let res = self.post_status(&pod).await; 42 | match res { 43 | Ok(_) => { 44 | tracing::info!("Posted status for pod {}", name); 45 | }, 46 | Err(err) => { 47 | tracing::error!("Failed to post status for {}: {:#?}", name, err); 48 | }, 49 | } 50 | } 51 | } 52 | drop(store); 53 | sleep(std::time::Duration::from_secs( 54 | CONFIG.pod_status_update_frequency, 55 | )) 56 | .await; 57 | } 58 | } 59 | 60 | async fn post_status(&self, pod: &Pod) -> Result<()> { 61 | let client = reqwest::Client::new(); 62 | let payload = pod.object(); 63 | client 64 | .put(format!( 65 | "{}/api/v1/pods/{}", 66 | CONFIG.cluster.api_server_url, 67 | pod.metadata().name 68 | )) 69 | .json(&payload) 70 | .send() 71 | .await? 72 | .json::>() 73 | .await?; 74 | Ok(()) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /rkubelet/src/volume.rs: -------------------------------------------------------------------------------- 1 | use std::{fs::create_dir_all, path::PathBuf}; 2 | 3 | use anyhow::{Context, Result}; 4 | use resources::objects::{pod, pod::VolumeConfig}; 5 | 6 | #[derive(Debug)] 7 | pub struct Volume { 8 | pod_dir: PathBuf, 9 | volume: pod::Volume, 10 | } 11 | 12 | impl Volume { 13 | pub fn new(pod_dir: PathBuf, volume: pod::Volume) -> Self { 14 | Self { 15 | pod_dir, 16 | volume, 17 | } 18 | } 19 | 20 | pub fn create(pod_dir: PathBuf, volume: pod::Volume) -> Result { 21 | let volume = Volume::new(pod_dir, volume); 22 | volume.provision()?; 23 | Ok(volume) 24 | } 25 | 26 | fn provision(&self) -> Result<()> { 27 | match &self.volume.config { 28 | VolumeConfig::EmptyDir(_) => self.provision_empty_dir(), 29 | VolumeConfig::HostPath(path) => self.provision_host_path(path), 30 | } 31 | } 32 | 33 | fn provision_host_path(&self, _path: &str) -> Result<()> { 34 | Ok(()) 35 | } 36 | 37 | fn provision_empty_dir(&self) -> Result<()> { 38 | create_dir_all(self.host_src()) 39 | .with_context(|| format!("Failed to create empty dir volume {}", self.volume.name)) 40 | } 41 | 42 | pub fn host_src(&self) -> String { 43 | match &self.volume.config { 44 | VolumeConfig::EmptyDir(_) => { 45 | let mut path = PathBuf::from(&self.pod_dir); 46 | path.push("volumes"); 47 | path.push(&self.volume.name); 48 | path.to_str().unwrap().to_string() 49 | }, 50 | VolumeConfig::HostPath(path) => path.to_owned(), 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | unstable_features = true 2 | imports_granularity = "Crate" 3 | match_block_trailing_comma = true 4 | normalize_comments = true 5 | condense_wildcard_suffixes = true 6 | struct_lit_single_line = false 7 | group_imports = "StdExternalCrate" 8 | -------------------------------------------------------------------------------- /scheduler/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "scheduler" 4 | version = "0.1.0" 5 | 6 | [dependencies] 7 | anyhow = {version = "1.0.56", features = ["backtrace"]} 8 | dashmap = "5.3.3" 9 | dotenv = "0.15.0" 10 | lazy_static = "1.4.0" 11 | reqwest = {version = "0.11", features = ["blocking", "json"]} 12 | resources = {path = "../resources"} 13 | serde_json = "1.0.79" 14 | tokio = {version = "1.17.0", features = ["full"]} 15 | tokio-tungstenite = "0.17.1" 16 | tracing = "0.1.32" 17 | tracing-subscriber = "0.3.10" 18 | -------------------------------------------------------------------------------- /scheduler/src/algorithm/dummy.rs: -------------------------------------------------------------------------------- 1 | use resources::objects::{object_reference::ObjectReference, pod::Pod}; 2 | 3 | use crate::cache::Cache; 4 | 5 | #[allow(dead_code)] 6 | pub fn dummy(_: &Pod, _: &Cache) -> Option { 7 | Some(ObjectReference { 8 | kind: "node".to_string(), 9 | name: "localhost".to_string(), 10 | }) 11 | } 12 | -------------------------------------------------------------------------------- /scheduler/src/algorithm/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod dummy; 2 | pub mod simple; 3 | -------------------------------------------------------------------------------- /scheduler/src/algorithm/simple.rs: -------------------------------------------------------------------------------- 1 | use resources::objects::{object_reference::ObjectReference, pod::Pod}; 2 | 3 | use crate::cache::{Cache, NodeState}; 4 | 5 | pub fn simple(pod: &Pod, cache: &Cache) -> Option { 6 | let mut candidate = None; 7 | cache 8 | .node_states 9 | .iter() 10 | .filter(|(_, state)| state.is_ready) 11 | .filter(|(_, state)| state.labels.matches(&pod.spec.node_selector)) 12 | .for_each(|(_, state)| { 13 | if is_better_than(Some(state), candidate) { 14 | candidate = Some(state); 15 | } 16 | }); 17 | candidate.map(|state| ObjectReference { 18 | kind: "node".to_string(), 19 | name: state.name.to_owned(), 20 | }) 21 | } 22 | 23 | /// Determine if node1 is a better candidate then node2 24 | fn is_better_than(node1: Option<&NodeState>, node2: Option<&NodeState>) -> bool { 25 | if node1.is_none() { 26 | return false; 27 | } 28 | if node2.is_none() { 29 | return true; 30 | } 31 | node1.unwrap().pod_count < node2.unwrap().pod_count 32 | } 33 | -------------------------------------------------------------------------------- /scheduler/src/cache.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use resources::{ 4 | informer::Store, 5 | objects::{node::Node, pod::Pod, Labels}, 6 | }; 7 | 8 | #[derive(Debug, Default)] 9 | pub struct NodeState { 10 | pub name: String, 11 | pub labels: Labels, 12 | pub is_ready: bool, 13 | pub pod_count: u32, 14 | } 15 | 16 | pub struct Cache { 17 | pub pod_cache: Store, 18 | pub node_cache: Store, 19 | pub node_states: HashMap, 20 | } 21 | 22 | impl Cache { 23 | /// Initialize a new cache, 24 | /// node states should be manually refreshed before using. 25 | pub fn new(pod_cache: Store, node_cache: Store) -> Cache { 26 | Cache { 27 | pod_cache, 28 | node_cache, 29 | node_states: HashMap::new(), 30 | } 31 | } 32 | 33 | /// Re-calculate node states. 34 | pub async fn refresh(&mut self) { 35 | self.node_states.clear(); 36 | let store = self.node_cache.read().await; 37 | for node in store.values() { 38 | let state = self.calculate_node_state(node).await; 39 | self.node_states 40 | .insert(node.metadata.name.to_owned(), state); 41 | } 42 | } 43 | 44 | pub async fn handle_pod_add(&mut self, pod: Pod, node_name: &String) { 45 | match self.node_states.get_mut(node_name) { 46 | Some(node_state) => { 47 | node_state.pod_count += 1; 48 | }, 49 | None => { 50 | tracing::warn!( 51 | "Pod {} is scheduled to node {} which is not in the node cache", 52 | pod.metadata.name, 53 | node_name 54 | ); 55 | }, 56 | } 57 | } 58 | 59 | pub async fn handle_pod_delete(&mut self, pod: Pod) { 60 | if let Some(node_name) = &pod.spec.node_name { 61 | match self.node_states.get_mut(node_name) { 62 | Some(node_state) => { 63 | node_state.pod_count -= 1; 64 | }, 65 | None => { 66 | tracing::warn!( 67 | "Pod {} is scheduled to node {} which is not in the node cache", 68 | pod.metadata.name, 69 | node_name 70 | ); 71 | }, 72 | } 73 | } 74 | } 75 | 76 | pub async fn handle_node_add(&mut self, node: Node) { 77 | let is_ready = node.is_ready(); 78 | self.node_states.insert( 79 | node.metadata.name.to_owned(), 80 | NodeState { 81 | name: node.metadata.name, 82 | labels: node.metadata.labels, 83 | is_ready, 84 | ..Default::default() 85 | }, 86 | ); 87 | } 88 | 89 | pub async fn handle_node_update(&mut self, new_node: Node) { 90 | let new_state = self.calculate_node_state(&new_node).await; 91 | self.node_states.insert(new_node.metadata.name, new_state); 92 | } 93 | 94 | pub async fn handle_node_delete(&mut self, node: Node) { 95 | self.node_states.remove(&node.metadata.name); 96 | } 97 | 98 | pub async fn calculate_node_state(&self, node: &Node) -> NodeState { 99 | let mut node_state = NodeState { 100 | name: node.metadata.name.to_owned(), 101 | labels: node.metadata.labels.to_owned(), 102 | is_ready: node.is_ready(), 103 | ..Default::default() 104 | }; 105 | let pods = self.pod_cache.read().await; 106 | for pod in pods.values() { 107 | if pod.spec.node_name == Some(node.metadata.name.to_owned()) { 108 | node_state.pod_count += 1; 109 | } 110 | } 111 | node_state 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /scheduler/src/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | use std::env; 5 | 6 | use anyhow::Result; 7 | use reqwest::Url; 8 | use resources::{ 9 | models::NodeConfig, 10 | objects::{node::Node, pod::Pod}, 11 | }; 12 | use tokio::sync::mpsc; 13 | 14 | use crate::{cache::Cache, informer::*, scheduler::Scheduler}; 15 | 16 | mod algorithm; 17 | mod cache; 18 | mod informer; 19 | mod scheduler; 20 | 21 | #[derive(Debug)] 22 | pub enum PodUpdate { 23 | Add(Pod), 24 | Update(Pod, Pod), 25 | Delete(Pod), 26 | } 27 | 28 | #[derive(Debug)] 29 | pub enum NodeUpdate { 30 | Add(Node), 31 | Update(Node, Node), 32 | Delete(Node), 33 | } 34 | 35 | lazy_static! { 36 | static ref CONFIG: NodeConfig = { 37 | dotenv::from_path("/etc/rminik8s/node.env").ok(); 38 | NodeConfig { 39 | etcd_endpoint: match env::var("ETCD_ENDPOINT") { 40 | Ok(url) => Url::parse(url.as_str()).unwrap(), 41 | Err(_) => Url::parse("http://127.0.0.1:2379/").unwrap(), 42 | }, 43 | api_server_endpoint: match env::var("API_SERVER_ENDPOINT") { 44 | Ok(url) => Url::parse(url.as_str()).unwrap(), 45 | Err(_) => Url::parse("http://127.0.0.1:8080/").unwrap(), 46 | }, 47 | } 48 | }; 49 | } 50 | 51 | #[tokio::main] 52 | async fn main() -> Result<()> { 53 | tracing_subscriber::fmt::init(); 54 | 55 | let (resync_tx, resync_rx) = mpsc::channel::(16); 56 | 57 | let (pod_tx, pod_rx, pod_store, pod_informer_handler) = run_pod_informer(resync_tx.clone()); 58 | let (_, node_rx, node_store, node_informer_handler) = run_node_informer(resync_tx); 59 | 60 | let cache = Cache::new(pod_store.clone(), node_store.clone()); 61 | let mut sched = Scheduler::new(algorithm::simple::simple, cache, resync_rx, pod_tx); 62 | let scheduler_handle = tokio::spawn(async move { sched.run(pod_rx, node_rx).await }); 63 | 64 | scheduler_handle.await?; 65 | pod_informer_handler.await??; 66 | node_informer_handler.await? 67 | // TODO: Gracefully shutdown 68 | } 69 | -------------------------------------------------------------------------------- /scripts/arm/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ ! -f ./build.sh ]; then 3 | echo "wrong directory" 4 | exit 5 | fi 6 | cd ../.. 7 | cargo build --release 8 | cd ./target/release 9 | for file in *; do 10 | cp "$file" "${file}-arm" 11 | done 12 | 13 | curl -F "api_server=@api_server-arm" http://minik8s.xyz:8008/api/upload 14 | curl -F "endpoints-controller-arm=@endpoints-controller-arm" http://minik8s.xyz:8008/api/upload 15 | curl -F "ingress-controller-arm=@ingress-controller-arm" http://minik8s.xyz:8008/api/upload 16 | curl -F "podautoscaler-arm=@podautoscaler-arm" http://minik8s.xyz:8008/api/upload 17 | curl -F "replicaset-controller-arm=@replicaset-controller-arm" http://minik8s.xyz:8008/api/upload 18 | curl -F "rkube-proxy-arm=@rkube-proxy-arm" http://minik8s.xyz:8008/api/upload 19 | curl -F "rkubectl-arm=@rkubectl-arm" http://minik8s.xyz:8008/api/upload 20 | curl -F "rkubelet-arm=@rkubelet-arm" http://minik8s.xyz:8008/api/upload 21 | curl -F "scheduler-arm=@scheduler-arm" http://minik8s.xyz:8008/api/upload 22 | curl -F "gpujob-controller-arm=@gpujob-controller-arm" http://minik8s.xyz:8008/api/upload 23 | curl -F "function-controller-arm=@function-controller-arm" http://minik8s.xyz:8008/api/upload 24 | curl -F "serverless-router-arm=@serverless-router-arm" http://minik8s.xyz:8008/api/upload -------------------------------------------------------------------------------- /scripts/arm/build_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker build -t minik8s.xyz/api_server-arm:latest ./docker/api_server 3 | docker build -t minik8s.xyz/endpoints-controller-arm:latest ./docker/endpoints-controller 4 | docker build -t minik8s.xyz/ingress-controller-arm:latest ./docker/ingress-controller 5 | docker build -t minik8s.xyz/podautoscaler-arm:latest ./docker/podautoscaler 6 | docker build -t minik8s.xyz/replicaset-controller-arm:latest ./docker/replicaset-controller 7 | docker build -t minik8s.xyz/scheduler-arm:latest ./docker/scheduler 8 | docker build -t minik8s.xyz/gpujob-controller-arm:latest ./docker/gpujob-controller 9 | docker build -t minik8s.xyz/serverless-router-arm:latest ./docker/serverless-router 10 | docker build -t minik8s.xyz/function-controller-arm:latest ./docker/function-controller 11 | 12 | docker push minik8s.xyz/api_server-arm:latest 13 | docker push minik8s.xyz/endpoints-controller-arm:latest 14 | docker push minik8s.xyz/ingress-controller-arm:latest 15 | docker push minik8s.xyz/podautoscaler-arm:latest 16 | docker push minik8s.xyz/replicaset-controller-arm:latest 17 | docker push minik8s.xyz/scheduler-arm:latest 18 | docker push minik8s.xyz/gpujob-controller-arm:latest 19 | docker push minik8s.xyz/serverless-router-arm:latest 20 | docker push minik8s.xyz/function-controller-arm:latest 21 | -------------------------------------------------------------------------------- /scripts/arm/docker/api_server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/api_server-arm ./api_server 4 | RUN chmod +x api_server 5 | CMD ["./api_server"] 6 | -------------------------------------------------------------------------------- /scripts/arm/docker/endpoints-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/endpoints-controller-arm ./endpoints-controller 4 | RUN chmod +x endpoints-controller 5 | CMD ["./endpoints-controller"] 6 | -------------------------------------------------------------------------------- /scripts/arm/docker/function-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | RUN sed -i "s|http://deb.debian.org/debian|http://mirror.sjtu.edu.cn/debian|g" /etc/apt/sources.list && sed -i "s|http://security.debian.org|http://mirror.sjtu.edu.cn|g" /etc/apt/sources.list 3 | RUN apt-get update && apt-get -y install zip docker.io && apt-get clean 4 | ADD http://minik8s.xyz:8008/function-controller-arm ./function-controller 5 | RUN chmod +x function-controller 6 | COPY ./function_wrapper /templates/function_wrapper 7 | CMD ["./function-controller"] 8 | -------------------------------------------------------------------------------- /scripts/arm/docker/function-controller/function_wrapper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3 2 | WORKDIR /app 3 | RUN pip config set global.index-url https://mirror.sjtu.edu.cn/pypi/web/simple && pip install flask 4 | COPY . . 5 | RUN pip install --no-cache-dir -r requirements.txt 6 | EXPOSE 80 7 | CMD ["python", "server.py"] 8 | -------------------------------------------------------------------------------- /scripts/arm/docker/function-controller/function_wrapper/server.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request, jsonify 2 | from handler import handler 3 | 4 | app = Flask(__name__) 5 | 6 | 7 | @app.route("/") 8 | def function(): 9 | print("received request") 10 | args = {} 11 | if request.data: 12 | args = request.get_json() 13 | print(args) 14 | 15 | return jsonify(handler(args)) 16 | 17 | 18 | app.run(host='0.0.0.0', port=80) 19 | -------------------------------------------------------------------------------- /scripts/arm/docker/gpujob-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/gpujob-controller-arm ./gpujob-controller 4 | RUN chmod +x gpujob-controller 5 | CMD ["./gpujob-controller"] 6 | -------------------------------------------------------------------------------- /scripts/arm/docker/ingress-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | RUN sed -i "s|http://deb.debian.org/debian|http://mirror.sjtu.edu.cn/debian|g" /etc/apt/sources.list && sed -i "s|http://security.debian.org|http://mirror.sjtu.edu.cn|g" /etc/apt/sources.list 4 | RUN apt-get update && apt-get -y install nginx && apt-get clean 5 | ADD http://minik8s.xyz:8008/ingress-controller-arm ./ingress-controller 6 | RUN chmod +x ingress-controller 7 | CMD ["/bin/bash", "-c", "nginx;./ingress-controller"] 8 | -------------------------------------------------------------------------------- /scripts/arm/docker/podautoscaler/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/podautoscaler-arm ./podautoscaler 4 | RUN chmod +x podautoscaler 5 | CMD ["./podautoscaler"] 6 | -------------------------------------------------------------------------------- /scripts/arm/docker/replicaset-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/replicaset-controller-arm ./replicaset-controller 4 | RUN chmod +x replicaset-controller 5 | CMD ["./replicaset-controller"] 6 | -------------------------------------------------------------------------------- /scripts/arm/docker/scheduler/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/scheduler-arm ./scheduler 4 | RUN chmod +x scheduler 5 | CMD ["./scheduler"] 6 | -------------------------------------------------------------------------------- /scripts/arm/docker/serverless-router/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/serverless-router-arm ./serverless-router 4 | RUN chmod +x serverless-router 5 | CMD ["./serverless-router"] 6 | -------------------------------------------------------------------------------- /scripts/arm/local-dev/.gitignore: -------------------------------------------------------------------------------- 1 | dns/serverless_router.db 2 | dns/ingress.db 3 | -------------------------------------------------------------------------------- /scripts/arm/local-dev/coredns.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=DNS for rminik8s 3 | After=network-online.target firewalld.service containerd.service time-set.target 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/conf.env 7 | ExecStart=/usr/local/bin/coredns -conf /config/Corefile 8 | Restart=on-failure 9 | RestartSec=3s 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/arm/local-dev/daemon.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "registry-mirrors": [ 3 | "https://docker.mirrors.sjtug.sjtu.edu.cn" 4 | ], 5 | "bip": "$FLANNEL_SUBNET" 6 | } -------------------------------------------------------------------------------- /scripts/arm/local-dev/dns/Corefile: -------------------------------------------------------------------------------- 1 | .:53 { 2 | forward . 119.29.29.29 114.114.114.114 3 | log 4 | errors 5 | } 6 | 7 | minik8s.com:53 { 8 | file /config/ingress.db 9 | log 10 | errors 11 | } 12 | 13 | func.minik8s.com:53 { 14 | file /config/serverless_router.db 15 | log 16 | errors 17 | } 18 | -------------------------------------------------------------------------------- /scripts/arm/local-dev/dns/ingress.db.template: -------------------------------------------------------------------------------- 1 | minik8s.com. IN SOA dns.minik8s.com. email.minik8s.com. 2015082541 7200 3600 1209600 3600 2 | * IN A ${INGRESS_IP} 3 | -------------------------------------------------------------------------------- /scripts/arm/local-dev/dns/serverless_router.db.template: -------------------------------------------------------------------------------- 1 | minik8s.com. IN SOA dns.minik8s.com. email.minik8s.com. 2015082541 7200 3600 1209600 3600 2 | * IN A ${SERVERLESS_ROUTER_IP} 3 | -------------------------------------------------------------------------------- /scripts/arm/local-dev/down.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | printf "nameserver 119.29.29.29\n" > /etc/resolv.conf 8 | 9 | docker rm -f etcd 10 | docker rm -f prometheus 11 | docker rm -f cadvisor 12 | systemctl daemon-reload 13 | systemctl stop coredns 14 | -------------------------------------------------------------------------------- /scripts/arm/local-dev/flanneld.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Flannel CNI Daemon for rMiniK8s 3 | After=network-online.target firewalld.service containerd.service time-set.target 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/conf.env 7 | ExecStart=/usr/local/bin/flanneld --etcd-endpoints=${ETCD_ENDPOINT} 8 | Restart=on-failure 9 | RestartSec=3s 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/arm/local-dev/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | 4 | # Attach these labels to any time series or alerts when communicating with 5 | # external systems (federation, remote storage, Alertmanager). 6 | external_labels: 7 | monitor: "codelab-monitor" 8 | 9 | # A scrape configuration containing exactly one endpoint to scrape: 10 | # Here it's Prometheus itself. 11 | scrape_configs: 12 | # The job name is added as a label `job=` to any timeseries scraped from this config. 13 | - job_name: "prometheus" 14 | 15 | # Override the global default and scrape targets from this job every 5 seconds. 16 | scrape_interval: 5s 17 | 18 | static_configs: 19 | - targets: ["localhost:9090"] 20 | 21 | - job_name: "cadvisor" 22 | scrape_interval: 15s 23 | static_configs: 24 | - targets: ["localhost:8090"] 25 | 26 | - job_name: "serverless_router" 27 | static_configs: 28 | - targets: ["localhost:80"] 29 | -------------------------------------------------------------------------------- /scripts/arm/local-dev/up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | mkdir -p /etc/rminik8s 8 | ARCH=$(dpkg --print-architecture) 9 | printf "ARCH: $ARCH\n" 10 | export IP=$(ip route get 114.114.114.114 | awk '{ print $7; exit }') 11 | printf "IP: $IP\n\n" 12 | 13 | # install flannel 14 | if [ ! -f /usr/local/bin/flanneld ]; then 15 | printf "Installing flanneld ...\n" 16 | wget -q --show-progress -O /usr/local/bin/flanneld "http://minik8s.xyz:8008/flanneld-$ARCH" 17 | chmod +x /usr/local/bin/flanneld 18 | else 19 | printf "flanneld installed\n\n" 20 | fi 21 | cp ./flanneld.service /etc/systemd/system/flanneld.service 22 | printf "flanneld service installed\n\n" 23 | 24 | # start etcd 25 | printf "Starting etcd... \n" 26 | docker run -d \ 27 | --network host \ 28 | --restart=always \ 29 | --name etcd quay.io/coreos/etcd:v3.5.4-arm64 \ 30 | etcd \ 31 | -enable-v2 \ 32 | -advertise-client-urls http://0.0.0.0:2379 \ 33 | -listen-client-urls http://0.0.0.0:2379 34 | docker run -d \ 35 | --network host \ 36 | --rm \ 37 | -e ETCDCTL_API=2 \ 38 | quay.io/coreos/etcd:v3.5.4-arm64 \ 39 | etcdctl \ 40 | set /coreos.com/network/config '{ "Network": "10.66.0.0/16", "Backend": {"Type": "vxlan"}}' 41 | export ETCD_ENDPOINT=http://${IP}:2379 42 | printf "ETCD_ENDPOINT=$ETCD_ENDPOINT\n" > /etc/rminik8s/conf.env 43 | printf "ETCD started, endpoint=$ETCD_ENDPOINT\n\n" 44 | 45 | # start flannel 46 | systemctl daemon-reload 47 | systemctl restart flanneld.service 48 | while [ ! -f /run/flannel/subnet.env ]; do 49 | echo "Waiting for flanneld service" 50 | sleep 0.5 51 | done 52 | set -a; source /run/flannel/subnet.env; set +a 53 | printf "flanneld service ok\n\n" 54 | 55 | # assign ip for each component 56 | SUBNET_BASE=${FLANNEL_SUBNET:0:-5} 57 | export API_SERVER_IP=${SUBNET_BASE}.100 58 | export INGRESS_IP=${SUBNET_BASE}.101 59 | export SERVERLESS_ROUTER_IP=$IP 60 | export PROMETHEUS_IP=${SUBNET_BASE}.103 61 | 62 | # start dns 63 | systemctl stop systemd-resolved 64 | printf "nameserver 119.29.29.29\n" > /etc/resolv.conf 65 | envsubst <./dns/serverless_router.db.template > ./dns/serverless_router.db 66 | envsubst <./dns/ingress.db.template > ./dns/ingress.db 67 | if [ ! -f /usr/local/bin/coredns ]; then 68 | printf "Installing coredns ...\n" 69 | wget -q --show-progress -O /usr/local/bin/coredns http://minik8s.xyz:8008/coredns 70 | chmod +x /usr/local/bin/coredns 71 | else 72 | printf "coredns installed\n\n" 73 | fi 74 | mkdir -p /config 75 | cp ./dns/* /config 76 | cp ./coredns.service /etc/systemd/system/coredns.service 77 | systemctl daemon-reload 78 | systemctl restart coredns.service 79 | printf "nameserver $IP\n" > /etc/resolv.conf 80 | printf "DNS server started\n\n" 81 | 82 | # configure docker 83 | envsubst < ./daemon.example.json > ./daemon.json 84 | mv ./daemon.json /etc/docker/daemon.json 85 | systemctl restart docker 86 | 87 | docker run --rm -d --net=host \ 88 | -v $(pwd)/prometheus.yml:/etc/prometheus/prometheus.yml \ 89 | --name prometheus \ 90 | prom/prometheus:latest \ 91 | --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml 92 | 93 | docker run \ 94 | --volume=/:/rootfs:ro \ 95 | --volume=/var/run:/var/run:rw \ 96 | --volume=/sys:/sys:ro \ 97 | --volume=/var/lib/docker/:/var/lib/docker:ro \ 98 | --publish=8090:8080 \ 99 | --detach=true \ 100 | --name=cadvisor \ 101 | zcube/cadvisor:latest -------------------------------------------------------------------------------- /scripts/arm/master/.gitignore: -------------------------------------------------------------------------------- 1 | dns/serverless_router.db 2 | dns/ingress.db 3 | prometheus.yml 4 | -------------------------------------------------------------------------------- /scripts/arm/master/coredns.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=DNS for rminik8s 3 | After=network-online.target firewalld.service containerd.service time-set.target 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/conf.env 7 | ExecStart=/usr/local/bin/coredns -conf /config/Corefile 8 | Restart=on-failure 9 | RestartSec=3s 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/arm/master/dns/Corefile: -------------------------------------------------------------------------------- 1 | .:53 { 2 | forward . 119.29.29.29 114.114.114.114 3 | log 4 | errors 5 | } 6 | 7 | minik8s.com:53 { 8 | file /config/ingress.db 9 | log 10 | errors 11 | } 12 | 13 | func.minik8s.com:53 { 14 | file /config/serverless_router.db 15 | log 16 | errors 17 | } 18 | -------------------------------------------------------------------------------- /scripts/arm/master/dns/ingress.db.template: -------------------------------------------------------------------------------- 1 | minik8s.com. IN SOA dns.minik8s.com. email.minik8s.com. 2015082541 7200 3600 1209600 3600 2 | * IN A ${INGRESS_IP} 3 | -------------------------------------------------------------------------------- /scripts/arm/master/dns/serverless_router.db.template: -------------------------------------------------------------------------------- 1 | minik8s.com. IN SOA dns.minik8s.com. email.minik8s.com. 2015082541 7200 3600 1209600 3600 2 | * IN A ${SERVERLESS_ROUTER_IP} 3 | -------------------------------------------------------------------------------- /scripts/arm/master/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | api_server: 5 | image: minik8s.xyz/api_server-arm:latest 6 | container_name: api_server 7 | ports: 8 | - "8080:8080" 9 | environment: 10 | - "ETCD_ENDPOINT=${ETCD_ENDPOINT}" 11 | - "METRICS_SERVER=prometheus:9090" 12 | volumes: 13 | - ./prometheus.yml:/etc/prometheus/prometheus.yml 14 | depends_on: 15 | - prometheus 16 | networks: 17 | default: 18 | ipv4_address: "$API_SERVER_IP" 19 | 20 | prometheus: 21 | image: prom/prometheus:latest 22 | container_name: prometheus 23 | volumes: 24 | - ./prometheus.yml:/etc/prometheus/prometheus.yml 25 | command: --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml 26 | ports: 27 | - "9090:9090" 28 | networks: 29 | default: 30 | ipv4_address: "$PROMETHEUS_IP" 31 | 32 | endpoints-controller: 33 | image: minik8s.xyz/endpoints-controller-arm:latest 34 | container_name: endpoints-controller 35 | environment: 36 | - API_SERVER_ENDPOINT=http://api_server:8080 37 | depends_on: 38 | - api_server 39 | 40 | ingress-controller: 41 | image: minik8s.xyz/ingress-controller-arm:latest 42 | container_name: ingress 43 | environment: 44 | - API_SERVER_ENDPOINT=http://api_server:8080 45 | depends_on: 46 | - api_server 47 | networks: 48 | default: 49 | ipv4_address: "$INGRESS_IP" 50 | 51 | replicaset-controller: 52 | image: minik8s.xyz/replicaset-controller-arm:latest 53 | container_name: replicaset-controller 54 | environment: 55 | - API_SERVER_URL=http://api_server:8080 56 | - API_SERVER_WATCH_URL=ws://api_server:8080 57 | depends_on: 58 | - api_server 59 | 60 | # cadvisor: 61 | # image: zcube/cadvisor:latest 62 | # container_name: cadvisor 63 | # ports: 64 | # - "8090:8080" 65 | # volumes: 66 | # - /:/rootfs:ro 67 | # - /var/run:/var/run:rw 68 | # - /sys:/sys:ro 69 | # - /var/lib/docker/:/var/lib/docker:ro 70 | 71 | function-controller: 72 | image: minik8s.xyz/function-controller-arm:latest 73 | container_name: function-controller 74 | environment: 75 | - API_SERVER_URL=http://api_server:8080 76 | - API_SERVER_WATCH_URL=ws://api_server:8080 77 | volumes: 78 | - /var/run/docker.sock:/var/run/docker.sock 79 | depends_on: 80 | - api_server 81 | 82 | serverless-router: 83 | image: minik8s.xyz/serverless-router-arm:latest 84 | container_name: serverless-router 85 | environment: 86 | - API_SERVER_ENDPOINT=http://api_server:8080 87 | depends_on: 88 | - api_server 89 | networks: 90 | default: 91 | ipv4_address: "$SERVERLESS_ROUTER_IP" 92 | 93 | podautoscaler: 94 | image: minik8s.xyz/podautoscaler-arm:latest 95 | container_name: podautoscaler 96 | environment: 97 | - API_SERVER_URL=http://api_server:8080 98 | - API_SERVER_WATCH_URL=ws://api_server:8080 99 | depends_on: 100 | - api_server 101 | 102 | gpujob_controller: 103 | image: minik8s.xyz/gpujob-controller-arm:latest 104 | container_name: gpujob-controller 105 | environment: 106 | - API_SERVER_URL=http://api_server:8080 107 | - API_SERVER_WATCH_URL=ws://api_server:8080 108 | - USERNAME=stu607 109 | - PASSWORD=JP^L75kq 110 | depends_on: 111 | - api_server 112 | 113 | scheduler: 114 | image: minik8s.xyz/scheduler-arm:latest 115 | container_name: scheduler 116 | environment: 117 | - API_SERVER_ENDPOINT=http://api_server:8080 118 | depends_on: 119 | - api_server 120 | 121 | networks: 122 | default: 123 | ipam: 124 | driver: default 125 | config: 126 | - subnet: "${FLANNEL_SUBNET}" 127 | -------------------------------------------------------------------------------- /scripts/arm/master/down.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | export IP=$(ip route get 114.114.114.114 | awk '{ print $7; exit }') 8 | export API_SERVER_IP=${SUBNET_BASE}.100 9 | export INGRESS_IP=${SUBNET_BASE}.101 10 | export SERVERLESS_ROUTER_IP=${SUBNET_BASE}.102 11 | export PROMETHEUS_IP=${SUBNET_BASE}.103 12 | export ETCD_ENDPOINT=http://${IP}:2379 13 | set -a; source /run/flannel/subnet.env; set +a 14 | 15 | printf "nameserver 119.29.29.29\n" > /etc/resolv.conf 16 | 17 | docker-compose -p minik8s-control-plane down -t 1 -v --remove-orphans 18 | docker rm -f etcd 19 | systemctl daemon-reload 20 | systemctl stop rkubeproxy 21 | systemctl stop flanneld 22 | systemctl stop coredns 23 | -------------------------------------------------------------------------------- /scripts/arm/master/flanneld.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Flannel CNI Daemon for rMiniK8s 3 | After=network-online.target firewalld.service containerd.service time-set.target 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/conf.env 7 | ExecStart=/usr/local/bin/flanneld --etcd-endpoints=${ETCD_ENDPOINT} 8 | Restart=on-failure 9 | RestartSec=3s 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/arm/master/prometheus.template.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | 4 | # Attach these labels to any time series or alerts when communicating with 5 | # external systems (federation, remote storage, Alertmanager). 6 | external_labels: 7 | monitor: "codelab-monitor" 8 | 9 | # A scrape configuration containing exactly one endpoint to scrape: 10 | # Here it's Prometheus itself. 11 | scrape_configs: 12 | # The job name is added as a label `job=` to any timeseries scraped from this config. 13 | - job_name: "prometheus" 14 | 15 | # Override the global default and scrape targets from this job every 5 seconds. 16 | scrape_interval: 5s 17 | 18 | static_configs: 19 | - targets: ["localhost:9090"] 20 | 21 | - job_name: "cadvisor" 22 | scrape_interval: 15s 23 | static_configs: 24 | - targets: [] 25 | 26 | - job_name: "serverless_router" 27 | static_configs: 28 | - targets: ["$SERVERLESS_ROUTER_IP:80"] 29 | -------------------------------------------------------------------------------- /scripts/arm/master/rkubeproxy.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=rKubeProxy 3 | After=network-online.target firewalld.service containerd.service time-set.target flanneld.service 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/node.env 7 | ExecStart=/usr/local/bin/rkube-proxy 8 | Restart=on-failure 9 | RestartSec=3s 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/arm/master/up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | mkdir -p /etc/rminik8s 8 | ARCH=$(dpkg --print-architecture) 9 | printf "ARCH: $ARCH\n" 10 | export IP=$(ip route get 114.114.114.114 | awk '{ print $7; exit }') 11 | printf "IP: $IP\n\n" 12 | 13 | # install flannel 14 | if [ ! -f /usr/local/bin/flanneld ]; then 15 | printf "Installing flanneld ...\n" 16 | wget -q --show-progress -O /usr/local/bin/flanneld "http://minik8s.xyz:8008/flanneld-$ARCH" 17 | chmod +x /usr/local/bin/flanneld 18 | else 19 | printf "flanneld installed\n\n" 20 | fi 21 | cp ./flanneld.service /etc/systemd/system/flanneld.service 22 | printf "flanneld service installed\n\n" 23 | 24 | # start etcd 25 | printf "Starting etcd... \n" 26 | docker run -d \ 27 | --network host \ 28 | --restart=always \ 29 | --name etcd quay.io/coreos/etcd:v3.5.4-arm64 \ 30 | etcd \ 31 | -enable-v2 \ 32 | -advertise-client-urls http://0.0.0.0:2379 \ 33 | -listen-client-urls http://0.0.0.0:2379 34 | docker run -d \ 35 | --network host \ 36 | --rm \ 37 | -e ETCDCTL_API=2 \ 38 | quay.io/coreos/etcd:v3.5.4-arm64 \ 39 | etcdctl \ 40 | set /coreos.com/network/config '{ "Network": "10.66.0.0/16", "Backend": {"Type": "vxlan"}}' 41 | export ETCD_ENDPOINT=http://${IP}:2379 42 | printf "ETCD_ENDPOINT=$ETCD_ENDPOINT\n" > /etc/rminik8s/conf.env 43 | printf "ETCD started, endpoint=$ETCD_ENDPOINT\n\n" 44 | 45 | # start flannel 46 | systemctl daemon-reload 47 | systemctl restart flanneld.service 48 | while [ ! -f /run/flannel/subnet.env ]; do 49 | echo "Waiting for flanneld service" 50 | sleep 0.5 51 | done 52 | set -a; source /run/flannel/subnet.env; set +a 53 | printf "flanneld service ok\n\n" 54 | 55 | # assign ip for each component 56 | SUBNET_BASE=${FLANNEL_SUBNET:0:-5} 57 | export API_SERVER_IP=${SUBNET_BASE}.100 58 | export INGRESS_IP=${SUBNET_BASE}.101 59 | export SERVERLESS_ROUTER_IP=${SUBNET_BASE}.102 60 | export PROMETHEUS_IP=${SUBNET_BASE}.103 61 | 62 | # start dns 63 | systemctl stop systemd-resolved 64 | printf "nameserver 119.29.29.29\n" > /etc/resolv.conf 65 | envsubst <./dns/serverless_router.db.template > ./dns/serverless_router.db 66 | envsubst <./dns/ingress.db.template > ./dns/ingress.db 67 | if [ ! -f /usr/local/bin/coredns ]; then 68 | printf "Installing coredns ...\n" 69 | wget -q --show-progress -O /usr/local/bin/coredns http://minik8s.xyz:8008/coredns 70 | chmod +x /usr/local/bin/coredns 71 | else 72 | printf "coredns installed\n\n" 73 | fi 74 | mkdir -p /config 75 | cp ./dns/* /config 76 | cp ./coredns.service /etc/systemd/system/coredns.service 77 | systemctl daemon-reload 78 | systemctl restart coredns.service 79 | printf "nameserver $IP\n" > /etc/resolv.conf 80 | printf "DNS server started\n\n" 81 | 82 | # start control plane 83 | envsubst <./prometheus.template.yml > ./prometheus.yml 84 | docker-compose -p minik8s-control-plane up -d 85 | printf "control plane started\n\n" 86 | 87 | # start rkube-proxy 88 | if [ ! -f /usr/local/bin/rkube-proxy ]; then 89 | wget -q --show-progress -O /usr/local/bin/rkube-proxy http://minik8s.xyz:8008/rkube-proxy-arm 90 | chmod +x /usr/local/bin/rkube-proxy 91 | fi 92 | cp ./rkubeproxy.service /etc/systemd/system/rkubeproxy.service 93 | printf "ETCD_ENDPOINT=$ETCD_ENDPOINT\nAPI_SERVER_ENDPOINT=http://${API_SERVER_IP}:8080\n" > /etc/rminik8s/node.env 94 | systemctl daemon-reload 95 | systemctl restart rkubeproxy.service 96 | printf "rkube-proxy started\n\n" 97 | 98 | printf "Installing rkubectl ...\n" 99 | if [ ! -f /usr/local/bin/rkubectl ]; then 100 | wget -q --show-progress -O /usr/local/bin/rkubectl http://minik8s.xyz:8008/rkubectl-arm 101 | chmod +x /usr/local/bin/rkubectl 102 | fi 103 | printf "rkubectl installed\n\n" 104 | 105 | # display ip 106 | printf "The following endpoints are exposed:\n" 107 | printf " api-server: http://${IP}:8080\n" 108 | printf " ingress: http://${INGRESS_IP}\n" 109 | printf " serverless-router: http://${SERVERLESS_ROUTER_IP}\n" 110 | printf " prometheus: http://${PROMETHEUS_IP}:9090\n" 111 | printf " etcd: $ETCD_ENDPOINT\n" 112 | printf " dns: $IP:53\n\n" 113 | printf "You can connect to the control plane by this IP: $IP\n" 114 | -------------------------------------------------------------------------------- /scripts/arm/node/.gitignore: -------------------------------------------------------------------------------- 1 | daemon.json 2 | kubelet.yaml 3 | -------------------------------------------------------------------------------- /scripts/arm/node/daemon.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "registry-mirrors": [ 3 | "https://docker.mirrors.sjtug.sjtu.edu.cn" 4 | ], 5 | "bip": "$FLANNEL_SUBNET" 6 | } -------------------------------------------------------------------------------- /scripts/arm/node/down.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | printf "nameserver 119.29.29.29\n" > /etc/resolv.conf 8 | sudo systemctl stop rkubelet.service 9 | sudo systemctl stop rkubeproxy.service 10 | docker rm -f cadvisor 11 | -------------------------------------------------------------------------------- /scripts/arm/node/down_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | sudo systemctl stop rkubelet.service 8 | docker rm -f cadvisor 9 | -------------------------------------------------------------------------------- /scripts/arm/node/flanneld.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Flannel CNI Daemon for rMiniK8s 3 | After=network-online.target firewalld.service containerd.service time-set.target 4 | Before=kdocker.service 5 | 6 | [Service] 7 | EnvironmentFile=/etc/rminik8s/conf.env 8 | ExecStart=/usr/local/bin/flanneld --etcd-endpoints=${ETCD_ENDPOINT} 9 | Restart=on-failure 10 | RestartSec=3s 11 | 12 | [Install] 13 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/arm/node/rkubelet.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=rKubeProxy 3 | After=network-online.target firewalld.service containerd.service time-set.target flanneld.service 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/node.env 7 | ExecStart=/usr/local/bin/rkubelet 8 | Restart=on-failure 9 | RestartSec=3s 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /scripts/arm/node/rkubelet.yaml: -------------------------------------------------------------------------------- 1 | #staticPodPath: "/etc/rminik8s/manifests" 2 | #node_status_update_frequency: 10 3 | #node_status_report_frequency: 30 4 | #pod_status_update_frequency: 10 5 | #port: 10250 6 | cluster: 7 | apiServerUrl: "http://${MASTER_IP}:8080" 8 | apiServerWatchUrl: "ws://${MASTER_IP}:8080" -------------------------------------------------------------------------------- /scripts/arm/node/rkubeproxy.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=rKubeProxy 3 | After=network-online.target firewalld.service containerd.service time-set.target flanneld.service 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/node.env 7 | ExecStart=/usr/local/bin/rkube-proxy 8 | Restart=on-failure 9 | RestartSec=3s 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/arm/node/up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | mkdir -p /etc/rminik8s 8 | ARCH=$(dpkg --print-architecture) 9 | printf "ARCH: $ARCH\n" 10 | export IP=$(ip route get 114.114.114.114 | awk '{ print $7; exit }') 11 | printf "IP: $IP\n\n" 12 | 13 | # get config 14 | echo -n "Please input Master IP: " 15 | read -r MASTER_IP 16 | export MASTER_IP=$MASTER_IP 17 | ETCD_ENDPOINT="http://$MASTER_IP:2379" 18 | printf 'ETCD_ENDPOINT="%s"\n' "$ETCD_ENDPOINT" > /etc/rminik8s/conf.env 19 | DNS_IP=$MASTER_IP 20 | 21 | # install flannel 22 | if [ ! -f /usr/local/bin/rkubelet ]; then 23 | printf "Installing flanneld ...\n" 24 | wget -q --show-progress -O /usr/local/bin/flanneld "http://minik8s.xyz:8008/flanneld-$ARCH" 25 | chmod +x /usr/local/bin/flanneld 26 | else 27 | printf "flanneld installed\n\n" 28 | fi 29 | cp ./flanneld.service /etc/systemd/system/flanneld.service 30 | printf "flanneld service installed\n\n" 31 | 32 | # start flannel service 33 | systemctl daemon-reload 34 | systemctl restart flanneld.service 35 | while [ ! -f /run/flannel/subnet.env ]; do 36 | echo "Waiting for flanneld service" 37 | sleep 0.1 38 | done 39 | set -a; source /run/flannel/subnet.env; set +a 40 | printf "flanneld service ok\n\n" 41 | 42 | # configure dns 43 | systemctl stop systemd-resolved 44 | printf "nameserver $DNS_IP\n" > /etc/resolv.conf 45 | 46 | # configure docker 47 | envsubst < ./daemon.example.json > ./daemon.json 48 | mv ./daemon.json /etc/docker/daemon.json 49 | systemctl restart docker 50 | 51 | # run cadvisor 52 | docker run \ 53 | --volume=/:/rootfs:ro \ 54 | --volume=/var/run:/var/run:rw \ 55 | --volume=/sys:/sys:ro \ 56 | --volume=/var/lib/docker/:/var/lib/docker:ro \ 57 | --publish=8090:8080 \ 58 | --detach=true \ 59 | --name=cadvisor \ 60 | zcube/cadvisor:latest 61 | 62 | # run rkubelet 63 | if [ ! -f /usr/local/bin/rkubelet ]; then 64 | wget -q --show-progress -O /usr/local/bin/rkubelet http://minik8s.xyz:8008/rkubelet-arm 65 | chmod +x /usr/local/bin/rkubelet 66 | fi 67 | mkdir -p /var/lib/rkubelet 68 | envsubst < ./rkubelet.yaml > /var/lib/rkubelet/config.yaml 69 | cp ./rkubelet.service /etc/systemd/system/rkubelet.service 70 | systemctl daemon-reload 71 | systemctl restart rkubelet.service 72 | printf "rkubelet service started\n\n" 73 | 74 | # start rkube-proxy 75 | printf "Installing rkube-proxy ...\n" 76 | if [ ! -f /usr/local/bin/rkube-proxy ]; then 77 | wget -q --show-progress -O /usr/local/bin/rkube-proxy http://minik8s.xyz:8008/rkube-proxy-arm 78 | chmod +x /usr/local/bin/rkube-proxy 79 | fi 80 | cp ./rkubeproxy.service /etc/systemd/system/rkubeproxy.service 81 | printf "ETCD_ENDPOINT=$ETCD_ENDPOINT\nAPI_SERVER_ENDPOINT=http://$MASTER_IP:8080\n" > /etc/rminik8s/node.env 82 | systemctl daemon-reload 83 | systemctl restart rkubeproxy.service 84 | printf "rkube-proxy started\n\n" 85 | 86 | printf "Installing rkubectl ...\n" 87 | if [ ! -f /usr/local/bin/rkubectl ]; then 88 | wget -q --show-progress -O /usr/local/bin/rkubectl http://minik8s.xyz:8008/rkubectl-arm 89 | chmod +x /usr/local/bin/rkubectl 90 | fi 91 | printf "rkubectl installed\n\n" 92 | 93 | 94 | -------------------------------------------------------------------------------- /scripts/arm/node/up_master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | mkdir -p /etc/rminik8s 8 | ARCH=$(dpkg --print-architecture) 9 | printf "ARCH: $ARCH\n" 10 | export IP=$(ip route get 114.114.114.114 | awk '{ print $7; exit }') 11 | printf "IP: $IP\n\n" 12 | 13 | # get config 14 | export MASTER_IP=$IP 15 | ETCD_ENDPOINT="http://$MASTER_IP:2379" 16 | printf 'ETCD_ENDPOINT="%s"\n' "$ETCD_ENDPOINT" > /etc/rminik8s/conf.env 17 | DNS_IP=$MASTER_IP 18 | 19 | # run cadvisor 20 | docker run \ 21 | --volume=/:/rootfs:ro \ 22 | --volume=/var/run:/var/run:rw \ 23 | --volume=/sys:/sys:ro \ 24 | --volume=/var/lib/docker/:/var/lib/docker:ro \ 25 | --publish=8090:8080 \ 26 | --detach=true \ 27 | --name=cadvisor \ 28 | zcube/cadvisor:latest 29 | 30 | # run rkubelet 31 | if [ ! -f /usr/local/bin/rkubelet ]; then 32 | wget -q --show-progress -O /usr/local/bin/rkubelet http://minik8s.xyz:8008/rkubelet-arm 33 | chmod +x /usr/local/bin/rkubelet 34 | fi 35 | mkdir -p /var/lib/rkubelet 36 | envsubst < ./rkubelet.yaml > /var/lib/rkubelet/config.yaml 37 | cp ./rkubelet.service /etc/systemd/system/rkubelet.service 38 | systemctl daemon-reload 39 | systemctl restart rkubelet.service 40 | printf "rkubelet service started\n\n" 41 | -------------------------------------------------------------------------------- /scripts/arm/pull_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker pull minik8s.xyz/api_server-arm:latest 3 | docker pull minik8s.xyz/endpoints-controller-arm:latest 4 | docker pull minik8s.xyz/ingress-controller-arm:latest 5 | docker pull minik8s.xyz/podautoscaler-arm:latest 6 | docker pull minik8s.xyz/replicaset-controller-arm:latest 7 | docker pull minik8s.xyz/scheduler-arm:latest 8 | docker pull minik8s.xyz/gpujob-controller-arm:latest 9 | docker pull minik8s.xyz/serverless-router-arm:latest 10 | docker pull minik8s.xyz/function-controller-arm:latest 11 | -------------------------------------------------------------------------------- /scripts/multipass/cloud-init-arm64.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | package_upgrade: true 3 | 4 | packages: 5 | - apt-transport-https 6 | - ca-certificates 7 | - curl 8 | - gnupg 9 | - lsb-release 10 | - zip 11 | - etcd-client 12 | - tmux 13 | - httpie 14 | - fzf 15 | - fd-find 16 | - bat 17 | - fish 18 | # create the docker group 19 | groups: 20 | - docker 21 | 22 | apt: 23 | primary: 24 | - arches: [default] 25 | uri: http://mirror.sjtu.edu.cn/ubuntu-ports/ 26 | 27 | system_info: 28 | default_user: 29 | groups: [docker] 30 | 31 | runcmd: 32 | - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 33 | - echo "deb [arch=$(dpkg --print-architecture)] https://mirror.sjtu.edu.cn/docker-ce/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list 34 | - apt-get update && apt-get install -y docker-ce docker-compose-plugin 35 | - chsh -s /bin/fish 36 | -------------------------------------------------------------------------------- /scripts/multipass/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ -z "$RMINIK8S" ]; then 3 | echo "Please specify rminik8s source dir" 4 | exit 5 | fi 6 | 7 | # master 8 | multipass launch --name k8s --disk 20G --cpus 4 --mem 4G --cloud-init cloud-init-arm64.yaml lts 9 | # 2 nodes: 2v8g, 6v4g 10 | multipass launch --name k8s1 --disk 20G --cpus 2 --mem 8G --cloud-init cloud-init-arm64.yaml lts 11 | multipass launch --name k8s2 --disk 20G --cpus 6 --mem 4G --cloud-init cloud-init-arm64.yaml lts 12 | 13 | multipass mount $RMINIK8S k8s:~/rminik8s 14 | multipass mount $RMINIK8S k8s1:~/rminik8s 15 | multipass mount $RMINIK8S k8s2:~/rminik8s 16 | -------------------------------------------------------------------------------- /scripts/x86/docker/api_server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/api_server ./ 4 | RUN chmod +x api_server 5 | CMD ["./api_server"] 6 | -------------------------------------------------------------------------------- /scripts/x86/docker/endpoints-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/endpoints-controller ./ 4 | RUN chmod +x endpoints-controller 5 | CMD ["./endpoints-controller"] 6 | -------------------------------------------------------------------------------- /scripts/x86/docker/function-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | RUN sed -i "s|http://deb.debian.org/debian|http://mirror.sjtu.edu.cn/debian|g" /etc/apt/sources.list && sed -i "s|http://security.debian.org|http://mirror.sjtu.edu.cn|g" /etc/apt/sources.list 3 | RUN apt-get update && apt-get -y install zip docker.io && apt-get clean 4 | ADD http://minik8s.xyz:8008/function-controller ./function-controller 5 | RUN chmod +x function-controller 6 | COPY ./function_wrapper /templates/function_wrapper 7 | CMD ["./function-controller"] 8 | -------------------------------------------------------------------------------- /scripts/x86/docker/function-controller/function_wrapper/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3 2 | WORKDIR /app 3 | RUN pip config set global.index-url https://mirror.sjtu.edu.cn/pypi/web/simple && pip install flask 4 | COPY . . 5 | RUN pip install --no-cache-dir -r requirements.txt 6 | EXPOSE 80 7 | CMD ["python", "server.py"] 8 | -------------------------------------------------------------------------------- /scripts/x86/docker/function-controller/function_wrapper/server.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request, jsonify 2 | from handler import handler 3 | 4 | app = Flask(__name__) 5 | 6 | 7 | @app.route("/") 8 | def function(): 9 | print("received request") 10 | args = {} 11 | if request.data: 12 | args = request.get_json() 13 | print(args) 14 | 15 | return jsonify(handler(args)) 16 | 17 | 18 | app.run(host='0.0.0.0', port=80) 19 | -------------------------------------------------------------------------------- /scripts/x86/docker/gpujob-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/gpujob-controller ./gpujob-controller 4 | RUN chmod +x gpujob-controller 5 | CMD ["./gpujob-controller"] 6 | -------------------------------------------------------------------------------- /scripts/x86/docker/ingress-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | RUN sed -i "s|http://deb.debian.org/debian|http://mirror.sjtu.edu.cn/debian|g" /etc/apt/sources.list && sed -i "s|http://security.debian.org|http://mirror.sjtu.edu.cn|g" /etc/apt/sources.list 4 | RUN apt-get update && apt-get -y install nginx && apt-get clean 5 | ADD http://minik8s.xyz:8008/ingress-controller ./ 6 | RUN chmod +x ingress-controller 7 | CMD ["/bin/bash", "-c", "nginx;./ingress-controller"] 8 | -------------------------------------------------------------------------------- /scripts/x86/docker/podautoscaler/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/podautoscaler ./ 4 | RUN chmod +x podautoscaler 5 | CMD ["./podautoscaler"] 6 | -------------------------------------------------------------------------------- /scripts/x86/docker/replicaset-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/replicaset-controller ./ 4 | RUN chmod +x replicaset-controller 5 | CMD ["./replicaset-controller"] 6 | -------------------------------------------------------------------------------- /scripts/x86/docker/rust/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:latest 2 | RUN sed -i "s|http://deb.debian.org/debian|http://mirror.sjtu.edu.cn/debian|g" /etc/apt/sources.list && sed -i "s|http://security.debian.org|http://mirror.sjtu.edu.cn|g" /etc/apt/sources.list 3 | RUN apt-get update && apt-get install -y cmake iptables && apt-get clean 4 | ENV CARGO_HOME=/.cargo 5 | COPY ./config.toml /.cargo/config.toml 6 | COPY ./dummy /root/dummy 7 | WORKDIR /root/dummy 8 | RUN cargo fetch 9 | WORKDIR /project -------------------------------------------------------------------------------- /scripts/x86/docker/rust/config.toml: -------------------------------------------------------------------------------- 1 | [source] 2 | 3 | [source.mirror] 4 | registry = "https://mirrors.sjtug.sjtu.edu.cn/git/crates.io-index/" 5 | 6 | [source.crates-io] 7 | replace-with = "mirror" 8 | -------------------------------------------------------------------------------- /scripts/x86/docker/rust/dummy/.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /scripts/x86/docker/rust/dummy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "dummy" 4 | version = "0.1.0" 5 | 6 | [patch.crates-io] 7 | bollard = {git = 'https://hub.fastgit.xyz/y-young/bollard'} 8 | 9 | [dependencies] 10 | anyhow = {version = "1.0.56", features = ["backtrace"]} 11 | async-trait = "0.1.53" 12 | axum = {version = "0.5.1", features = ["ws", "multipart"]} 13 | axum-macros = "0.2.0" 14 | bollard = "0.12.0" 15 | chrono = "0.4.19" 16 | chrono-humanize = "0.2.1" 17 | clap = {version = "3.1.8", features = ["derive"]} 18 | config = {version = "0.13.1", features = ["yaml"]} 19 | dashmap = "5.3.3" 20 | deadpool = {version = "0.9.3", features = ["rt_tokio_1"]} 21 | dotenv = "0.15.0" 22 | enum_dispatch = "0.3.8" 23 | etcd-client = "0.9.0" 24 | futures = "0.3.21" 25 | futures-delay-queue = "0.5.2" 26 | futures-intrusive = "0.4" 27 | futures-util = "0.3.21" 28 | iptables = "0.5.0" 29 | lazy_static = "1.4.0" 30 | nginx-config = "0.13.2" 31 | parking_lot = "0.12.0" 32 | prometheus-http-api = "0.2.0" 33 | rand = "0.8.5" 34 | reqwest = {version = "0.11", features = ["blocking", "json"]} 35 | serde = {version = "1.0.136", features = ["derive"]} 36 | serde_json = "1.0.79" 37 | serde_yaml = "0.8.23" 38 | strum = {version = "0.24", features = ["derive"]} 39 | tokio = {version = "1.17.0", features = ["full"]} 40 | tokio-tungstenite = "0.17.1" 41 | tokio-util = "0.7.2" 42 | tower-http = {version = "0.3.3", features = ["fs"]} 43 | tracing = "0.1.32" 44 | tracing-subscriber = "0.3.10" 45 | uuid = {version = "0.8", features = ["serde", "v4"]} 46 | -------------------------------------------------------------------------------- /scripts/x86/docker/rust/dummy/src/main.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | println!("Hello, world!"); 3 | } 4 | -------------------------------------------------------------------------------- /scripts/x86/docker/scheduler/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/scheduler ./ 4 | RUN chmod +x scheduler 5 | CMD ["./scheduler"] 6 | -------------------------------------------------------------------------------- /scripts/x86/docker/serverless-router/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | WORKDIR /minik8s 3 | ADD http://minik8s.xyz:8008/serverless-router ./serverless-router 4 | RUN chmod +x serverless-router 5 | CMD ["./serverless-router"] 6 | -------------------------------------------------------------------------------- /scripts/x86/local-dev/.gitignore: -------------------------------------------------------------------------------- 1 | dns/serverless_router.db 2 | dns/ingress.db 3 | -------------------------------------------------------------------------------- /scripts/x86/local-dev/coredns.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=DNS for rminik8s 3 | After=network-online.target firewalld.service containerd.service time-set.target 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/conf.env 7 | ExecStart=/usr/local/bin/coredns -conf /config/Corefile 8 | Restart=on-failure 9 | RestartSec=3s 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/x86/local-dev/daemon.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "registry-mirrors": [ 3 | "https://docker.mirrors.sjtug.sjtu.edu.cn" 4 | ], 5 | "bip": "$FLANNEL_SUBNET" 6 | } -------------------------------------------------------------------------------- /scripts/x86/local-dev/dns/Corefile: -------------------------------------------------------------------------------- 1 | .:53 { 2 | forward . 119.29.29.29 114.114.114.114 3 | log 4 | errors 5 | } 6 | 7 | minik8s.com:53 { 8 | file /config/ingress.db 9 | log 10 | errors 11 | } 12 | 13 | func.minik8s.com:53 { 14 | file /config/serverless_router.db 15 | log 16 | errors 17 | } 18 | -------------------------------------------------------------------------------- /scripts/x86/local-dev/dns/ingress.db.template: -------------------------------------------------------------------------------- 1 | minik8s.com. IN SOA dns.minik8s.com. email.minik8s.com. 2015082541 7200 3600 1209600 3600 2 | * IN A ${INGRESS_IP} 3 | -------------------------------------------------------------------------------- /scripts/x86/local-dev/dns/serverless_router.db.template: -------------------------------------------------------------------------------- 1 | minik8s.com. IN SOA dns.minik8s.com. email.minik8s.com. 2015082541 7200 3600 1209600 3600 2 | * IN A ${SERVERLESS_ROUTER_IP} 3 | -------------------------------------------------------------------------------- /scripts/x86/local-dev/down.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | printf "nameserver 119.29.29.29\n" > /etc/resolv.conf 8 | 9 | docker rm -f etcd 10 | docker rm -f prometheus 11 | systemctl daemon-reload 12 | docker rm -f dns 13 | docker rm -f cadvisor 14 | -------------------------------------------------------------------------------- /scripts/x86/local-dev/flanneld.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Flannel CNI Daemon for rMiniK8s 3 | After=network-online.target firewalld.service containerd.service time-set.target 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/conf.env 7 | ExecStart=/usr/local/bin/flanneld --etcd-endpoints=${ETCD_ENDPOINT} 8 | Restart=on-failure 9 | RestartSec=3s 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/x86/local-dev/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | 4 | # Attach these labels to any time series or alerts when communicating with 5 | # external systems (federation, remote storage, Alertmanager). 6 | external_labels: 7 | monitor: "codelab-monitor" 8 | 9 | # A scrape configuration containing exactly one endpoint to scrape: 10 | # Here it's Prometheus itself. 11 | scrape_configs: 12 | # The job name is added as a label `job=` to any timeseries scraped from this config. 13 | - job_name: "prometheus" 14 | 15 | # Override the global default and scrape targets from this job every 5 seconds. 16 | scrape_interval: 5s 17 | 18 | static_configs: 19 | - targets: ["localhost:9090"] 20 | 21 | - job_name: "cadvisor" 22 | scrape_interval: 15s 23 | static_configs: 24 | - targets: ["localhost:8090"] 25 | 26 | - job_name: "serverless_router" 27 | static_configs: 28 | - targets: ["localhost:80"] 29 | -------------------------------------------------------------------------------- /scripts/x86/local-dev/up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | mkdir -p /etc/rminik8s 8 | ARCH=$(dpkg --print-architecture) 9 | printf "ARCH: $ARCH\n" 10 | export IP=$(ip route get 114.114.114.114 | awk '{ print $7; exit }') 11 | printf "IP: $IP\n\n" 12 | 13 | # install flannel 14 | if [ ! -f /usr/local/bin/flanneld ]; then 15 | printf "Installing flanneld ...\n" 16 | wget -q --show-progress -O /usr/local/bin/flanneld "http://minik8s.xyz:8008/flanneld-$ARCH" 17 | chmod +x /usr/local/bin/flanneld 18 | else 19 | printf "flanneld installed\n\n" 20 | fi 21 | cp ./flanneld.service /etc/systemd/system/flanneld.service 22 | printf "flanneld service installed\n\n" 23 | 24 | # start etcd 25 | printf "Starting etcd... \n" 26 | docker run -d \ 27 | --network host \ 28 | --restart=always \ 29 | --name etcd quay.io/coreos/etcd:v3.5.4 \ 30 | etcd \ 31 | -enable-v2 \ 32 | -advertise-client-urls http://0.0.0.0:2379 \ 33 | -listen-client-urls http://0.0.0.0:2379 34 | docker run -d \ 35 | --network host \ 36 | --rm \ 37 | -e ETCDCTL_API=2 \ 38 | quay.io/coreos/etcd:v3.5.4 \ 39 | etcdctl \ 40 | set /coreos.com/network/config '{ "Network": "10.66.0.0/16", "Backend": {"Type": "vxlan"}}' 41 | export ETCD_ENDPOINT=http://${IP}:2379 42 | printf "ETCD_ENDPOINT=$ETCD_ENDPOINT\n" > /etc/rminik8s/conf.env 43 | printf "ETCD started, endpoint=$ETCD_ENDPOINT\n\n" 44 | 45 | # start flannel 46 | systemctl daemon-reload 47 | systemctl restart flanneld.service 48 | while [ ! -f /run/flannel/subnet.env ]; do 49 | echo "Waiting for flanneld service" 50 | sleep 0.5 51 | done 52 | set -a; source /run/flannel/subnet.env; set +a 53 | printf "flanneld service ok\n\n" 54 | 55 | # assign ip for each component 56 | SUBNET_BASE=${FLANNEL_SUBNET:0:-5} 57 | export API_SERVER_IP=${SUBNET_BASE}.100 58 | export INGRESS_IP=${SUBNET_BASE}.101 59 | export SERVERLESS_ROUTER_IP=$IP 60 | export PROMETHEUS_IP=${SUBNET_BASE}.103 61 | 62 | # start dns 63 | envsubst <./dns/serverless_router.db.template > ./dns/serverless_router.db 64 | envsubst <./dns/ingress.db.template > ./dns/ingress.db 65 | docker run -d --name dns \ 66 | --restart=always \ 67 | -v $(pwd)/dns:/config \ 68 | --network host \ 69 | coredns/coredns \ 70 | -conf /config/Corefile 71 | printf "nameserver $IP\n" > /etc/resolv.conf 72 | printf "DNS server started\n\n" 73 | 74 | # configure docker 75 | envsubst < ./daemon.example.json > ./daemon.json 76 | mv ./daemon.json /etc/docker/daemon.json 77 | systemctl restart docker 78 | 79 | docker run --rm -d --net=host \ 80 | -v $(pwd)/prometheus.yml:/etc/prometheus/prometheus.yml \ 81 | --name prometheus \ 82 | prom/prometheus:latest \ 83 | --web.enable-lifecycle --config.file=/etc/prometheus/prometheus.yml 84 | 85 | docker run \ 86 | --volume=/:/rootfs:ro \ 87 | --volume=/var/run:/var/run:rw \ 88 | --volume=/sys:/sys:ro \ 89 | --volume=/var/lib/docker/:/var/lib/docker:ro \ 90 | --publish=8090:8080 \ 91 | --detach=true \ 92 | --name=cadvisor \ 93 | zcube/cadvisor:latest -------------------------------------------------------------------------------- /scripts/x86/master/.gitignore: -------------------------------------------------------------------------------- 1 | dns/serverless_router.db 2 | dns/ingress.db 3 | -------------------------------------------------------------------------------- /scripts/x86/master/dns/Corefile: -------------------------------------------------------------------------------- 1 | .:53 { 2 | forward . 119.29.29.29 114.114.114.114 3 | log 4 | errors 5 | } 6 | 7 | minik8s.com:53 { 8 | file /config/ingress.db 9 | log 10 | errors 11 | } 12 | 13 | func.minik8s.com:53 { 14 | file /config/serverless_router.db 15 | log 16 | errors 17 | } 18 | -------------------------------------------------------------------------------- /scripts/x86/master/dns/ingress.db.template: -------------------------------------------------------------------------------- 1 | minik8s.com. IN SOA dns.minik8s.com. email.minik8s.com. 2015082541 7200 3600 1209600 3600 2 | * IN A ${INGRESS_IP} 3 | -------------------------------------------------------------------------------- /scripts/x86/master/dns/serverless_router.db.template: -------------------------------------------------------------------------------- 1 | minik8s.com. IN SOA dns.minik8s.com. email.minik8s.com. 2015082541 7200 3600 1209600 3600 2 | * IN A ${SERVERLESS_ROUTER_IP} 3 | -------------------------------------------------------------------------------- /scripts/x86/master/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | api_server: 5 | image: minik8s.xyz/api_server:latest 6 | container_name: api_server 7 | ports: 8 | - "8080:8080" 9 | environment: 10 | - "ETCD_ENDPOINT=${ETCD_ENDPOINT}" 11 | - "METRICS_SERVER=prometheus:9090" 12 | depends_on: 13 | - prometheus 14 | networks: 15 | default: 16 | ipv4_address: "$API_SERVER_IP" 17 | 18 | prometheus: 19 | image: prom/prometheus:latest 20 | container_name: prometheus 21 | volumes: 22 | - ./prometheus.yml:/etc/prometheus/prometheus.yml 23 | networks: 24 | default: 25 | ipv4_address: "$PROMETHEUS_IP" 26 | 27 | endpoints-controller: 28 | image: minik8s.xyz/endpoints-controller:latest 29 | container_name: endpoints-controller 30 | environment: 31 | - API_SERVER_ENDPOINT=http://api_server:8080 32 | depends_on: 33 | - api_server 34 | 35 | ingress-controller: 36 | image: minik8s.xyz/ingress-controller:latest 37 | container_name: ingress 38 | environment: 39 | - API_SERVER_ENDPOINT=http://api_server:8080 40 | depends_on: 41 | - api_server 42 | networks: 43 | default: 44 | ipv4_address: "$INGRESS_IP" 45 | 46 | replicaset-controller: 47 | image: minik8s.xyz/replicaset-controller:latest 48 | container_name: replicaset-controller 49 | environment: 50 | - API_SERVER_URL=http://api_server:8080 51 | - API_SERVER_WATCH_URL=ws://api_server:8080 52 | depends_on: 53 | - api_server 54 | 55 | function-controller: 56 | image: minik8s.xyz/function-controller:latest 57 | container_name: function-controller 58 | environment: 59 | - API_SERVER_URL=http://api_server:8080 60 | - API_SERVER_WATCH_URL=ws://api_server:8080 61 | depends_on: 62 | - api_server 63 | 64 | serverless-router: 65 | image: minik8s.xyz/serverless-router:latest 66 | container_name: serverless-router 67 | environment: 68 | - API_SERVER_ENDPOINT=http://api_server:8080 69 | depends_on: 70 | - api_server 71 | 72 | cadvisor: 73 | image: zcube/cadvisor:latest 74 | container_name: cadvisor 75 | ports: 76 | - "8090:8080" 77 | volumes: 78 | - /:/rootfs:ro 79 | - /var/run:/var/run:rw 80 | - /sys:/sys:ro 81 | - /var/lib/docker/:/var/lib/docker:ro 82 | 83 | networks: 84 | default: 85 | ipam: 86 | driver: default 87 | config: 88 | - subnet: "${FLANNEL_SUBNET}" 89 | -------------------------------------------------------------------------------- /scripts/x86/master/down.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export IP=$(ip route get 114.114.114.114 | awk '{ print $7; exit }') 3 | export API_SERVER_IP=${SUBNET_BASE}.100 4 | export INGRESS_IP=${SUBNET_BASE}.101 5 | export SERVERLESS_ROUTER_IP=${SUBNET_BASE}.102 6 | export PROMETHEUS_IP=${SUBNET_BASE}.103 7 | export ETCD_ENDPOINT=http://${IP}:2379 8 | set -a; source /run/flannel/subnet.env; set +a 9 | 10 | printf "nameserver 119.29.29.29\n" > /etc/resolv.conf 11 | 12 | docker compose -p minik8s-control-plane down -t 1 -v --remove-orphans 13 | docker rm -f etcd 14 | docker rm -f dns 15 | systemctl daemon-reload 16 | systemctl stop rkubeproxy 17 | systemctl stop flanneld -------------------------------------------------------------------------------- /scripts/x86/master/flanneld.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Flannel CNI Daemon for rMiniK8s 3 | After=network-online.target firewalld.service containerd.service time-set.target 4 | Before=kdocker.service 5 | 6 | [Service] 7 | EnvironmentFile=/etc/rminik8s/conf.env 8 | ExecStart=/usr/local/bin/flanneld --etcd-endpoints=${ETCD_ENDPOINT} 9 | Restart=on-failure 10 | RestartSec=3s 11 | 12 | [Install] 13 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/x86/master/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | 4 | # Attach these labels to any time series or alerts when communicating with 5 | # external systems (federation, remote storage, Alertmanager). 6 | external_labels: 7 | monitor: 'codelab-monitor' 8 | 9 | # A scrape configuration containing exactly one endpoint to scrape: 10 | # Here it's Prometheus itself. 11 | scrape_configs: 12 | # The job name is added as a label `job=` to any timeseries scraped from this config. 13 | - job_name: 'prometheus' 14 | 15 | # Override the global default and scrape targets from this job every 5 seconds. 16 | scrape_interval: 5s 17 | 18 | static_configs: 19 | - targets: ['localhost:9090'] 20 | 21 | - job_name: 'cadvisor_master' 22 | scrape_interval: 15s 23 | static_configs: 24 | - targets: ['cadvisor:8090'] 25 | -------------------------------------------------------------------------------- /scripts/x86/master/rkubeproxy.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=rKubeProxy 3 | After=network-online.target firewalld.service containerd.service time-set.target flanneld.service 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/node.env 7 | ExecStart=/usr/local/bin/rkube-proxy 8 | Restart=on-failure 9 | RestartSec=3s 10 | 11 | [Install] 12 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /scripts/x86/master/up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | mkdir -p /etc/rminik8s 8 | ARCH=$(dpkg --print-architecture) 9 | printf "ARCH: $ARCH\n" 10 | export IP=$(ip route get 114.114.114.114 | awk '{ print $7; exit }') 11 | printf "IP: $IP\n\n" 12 | 13 | # install flannel 14 | if [ ! -f /usr/local/bin/flanneld ]; then 15 | printf "Installing flanneld ...\n" 16 | wget -q --show-progress -O /usr/local/bin/flanneld "http://minik8s.xyz:8008/flanneld-$ARCH" 17 | chmod +x /usr/local/bin/flanneld 18 | else 19 | printf "flanneld installed\n\n" 20 | fi 21 | cp ./flanneld.service /etc/systemd/system/flanneld.service 22 | printf "flanneld service installed\n\n" 23 | 24 | # start etcd 25 | printf "Starting etcd... \n" 26 | docker run -d \ 27 | --network host \ 28 | --restart=always \ 29 | --name etcd quay.io/coreos/etcd:latest \ 30 | etcd \ 31 | -enable-v2 \ 32 | -advertise-client-urls http://0.0.0.0:2379 \ 33 | -listen-client-urls http://0.0.0.0:2379 34 | docker run -d \ 35 | --network host \ 36 | --rm \ 37 | quay.io/coreos/etcd:latest \ 38 | etcdctl \ 39 | set /coreos.com/network/config '{ "Network": "10.66.0.0/16", "Backend": {"Type": "vxlan"}}' 40 | export ETCD_ENDPOINT=http://${IP}:2379 41 | printf "ETCD_ENDPOINT=$ETCD_ENDPOINT\n" > /etc/rminik8s/conf.env 42 | printf "ETCD started, endpoint=$ETCD_ENDPOINT\n\n" 43 | 44 | # start flannel 45 | systemctl daemon-reload 46 | systemctl restart flanneld.service 47 | while [ ! -f /run/flannel/subnet.env ]; do 48 | echo "Waiting for flanneld service" 49 | sleep 0.5 50 | done 51 | set -a; source /run/flannel/subnet.env; set +a 52 | printf "flanneld service ok\n\n" 53 | 54 | # assign ip for each component 55 | SUBNET_BASE=${FLANNEL_SUBNET:0:-5} 56 | export API_SERVER_IP=${SUBNET_BASE}.100 57 | export INGRESS_IP=${SUBNET_BASE}.101 58 | export SERVERLESS_ROUTER_IP=${SUBNET_BASE}.102 59 | export PROMETHEUS_IP=${SUBNET_BASE}.103 60 | 61 | # start dns 62 | envsubst <./dns/serverless_router.db.template > ./dns/serverless_router.db 63 | envsubst <./dns/ingress.db.template > ./dns/ingress.db 64 | docker run -d --name dns \ 65 | --restart=always \ 66 | -v $(pwd)/dns:/config \ 67 | --network host \ 68 | coredns/coredns \ 69 | -conf /config/Corefile 70 | printf "nameserver $IP\n" > /etc/resolv.conf 71 | printf "DNS server started\n\n" 72 | 73 | # start control plane 74 | docker-compose -p minik8s-control-plane up -d 75 | printf "control plane started\n\n" 76 | 77 | # start rkube-proxy 78 | printf "Installing rkube-proxy ...\n" 79 | wget -q --show-progress -O /usr/local/bin/rkube-proxy http://minik8s.xyz:8008/rkube-proxy 80 | chmod +x /usr/local/bin/rkube-proxy 81 | cp ./rkubeproxy.service /etc/systemd/system/rkubeproxy.service 82 | printf "ETCD_ENDPOINT=$ETCD_ENDPOINT\nAPI_SERVER_ENDPOINT=http://${API_SERVER_IP}:8080\n" > /etc/rminik8s/node.env 83 | systemctl daemon-reload 84 | systemctl restart rkubeproxy.service 85 | printf "rkube-proxy started\n\n" 86 | 87 | # display ip 88 | printf "The following endpoints are exposed:\n" 89 | printf " api-server: http://${API_SERVER_IP}:8080\n" 90 | printf " ingress: http://${INGRESS_IP}\n" 91 | printf " prometheus: http://${PROMETHEUS_IP}:9090\n" 92 | printf " etcd: $ETCD_ENDPOINT\n" 93 | printf " dns: $IP:53\n" 94 | 95 | -------------------------------------------------------------------------------- /scripts/x86/node/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$EUID" -ne 0 ] 3 | then echo "Please run as root" 4 | exit 5 | fi 6 | 7 | # stop docker 8 | systemctl stop docker.service containerd.service docker.socket 9 | 10 | # env 11 | ARCH=$(dpkg --print-architecture) 12 | echo "ARCH: $ARCH" 13 | 14 | # install flannel 15 | if [ ! -f /usr/local/bin/flanneld ]; then 16 | echo "Installing flanneld ..." 17 | wget -q --show-progress -O /usr/local/bin/flanneld "https://github.com/flannel-io/flannel/releases/download/v0.17.0/flanneld-${ARCH}" 18 | chmod +x /usr/local/bin/flanneld 19 | fi 20 | 21 | # install services 22 | echo "Installing systemd services ..." 23 | wget -q --show-progress -O /etc/systemd/system/flanneld.service https://s3.jcloud.sjtu.edu.cn/1b088ff214b04e6291c549a95685610b-share/flanneld.service 24 | wget -q --show-progress -O /etc/systemd/system/kdocker.service https://s3.jcloud.sjtu.edu.cn/1b088ff214b04e6291c549a95685610b-share/kdocker.service 25 | 26 | # write configuration file 27 | if [ ! -f /etc/rminik8s/conf.env ]; then 28 | echo "Configuration not found. Generating..." 29 | echo -n "Please input master etcd endpoint(eg. http://127.0.0.1:2379): " 30 | read -r ETCD_ENDPOINT 31 | mkdir -p /etc/rminik8s 32 | printf 'ETCD_ENDPOINT="%s"\n' "$ETCD_ENDPOINT" > /etc/rminik8s/conf.env 33 | fi 34 | 35 | # modify dns 36 | systemctl stop systemd-resolved 37 | if grep -Fq "minik8s" /etc/resolv.conf; 38 | then 39 | echo "DNS already configured" 40 | else 41 | echo -n "Please input DNS server ip: " 42 | read -r DNS_SERVER_IP 43 | mv /etc/resolv.conf /etc/resolv.conf.bk 44 | printf '#minik8s\nnameserver %s\n' "$DNS_SERVER_IP" | cat - /etc/resolv.conf.bk > /etc/resolv.conf 45 | fi 46 | 47 | 48 | # start 49 | systemctl daemon-reload 50 | systemctl restart flanneld.service 51 | echo "flanneld started" 52 | 53 | while [ ! -f /run/flannel/subnet.env ]; do 54 | sleep 0.1 55 | done 56 | systemctl restart kdocker.service 57 | echo "docker started" 58 | -------------------------------------------------------------------------------- /scripts/x86/node/rkubelet.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=rKubelet 3 | After=network-online.target firewalld.service containerd.service time-set.target flanneld.service 4 | 5 | [Service] 6 | EnvironmentFile=/etc/rminik8s/conf.env 7 | ExecStart=/usr/local/bin/rkubelet 8 | 9 | [Install] 10 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /serverless/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2021" 3 | name = "serverless" 4 | version = "0.1.0" 5 | 6 | [[bin]] 7 | name = "serverless-router" 8 | path = "src/router/main.rs" 9 | 10 | [dependencies] 11 | anyhow = {version = "1.0.56", features = ["backtrace"]} 12 | dotenv = "0.15.0" 13 | futures = "0.3.21" 14 | hyper = "0.14.18" 15 | lazy_static = "1.4.0" 16 | nginx-config = "0.13.2" 17 | ordinal = "0.3.2" 18 | prometheus = "0.13.1" 19 | reqwest = {version = "0.11", features = ["json"]} 20 | resources = {path = "../resources"} 21 | tokio = {version = "1.17.0", features = ["full"]} 22 | tokio-tungstenite = "0.17.1" 23 | tracing = "0.1.32" 24 | tracing-subscriber = "0.3.10" 25 | -------------------------------------------------------------------------------- /serverless/src/router/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate lazy_static; 3 | 4 | use std::env; 5 | 6 | use anyhow::Result; 7 | use hyper::{ 8 | service::{make_service_fn, service_fn}, 9 | Server, 10 | }; 11 | use prometheus::{opts, register_counter_vec, CounterVec}; 12 | use reqwest::Url; 13 | use resources::{ 14 | models::NodeConfig, 15 | objects::{function::Function, service::Service}, 16 | }; 17 | 18 | use crate::{route::router, utils::create_informer}; 19 | 20 | lazy_static! { 21 | static ref CONFIG: NodeConfig = { 22 | dotenv::from_path("/etc/rminik8s/node.env").ok(); 23 | NodeConfig { 24 | etcd_endpoint: match env::var("ETCD_ENDPOINT") { 25 | Ok(url) => Url::parse(url.as_str()).unwrap(), 26 | Err(_) => Url::parse("http://127.0.0.1:2379/").unwrap(), 27 | }, 28 | api_server_endpoint: match env::var("API_SERVER_ENDPOINT") { 29 | Ok(url) => Url::parse(url.as_str()).unwrap(), 30 | Err(_) => Url::parse("http://127.0.0.1:8080/").unwrap(), 31 | }, 32 | } 33 | }; 34 | static ref REQUESTS_COUNTER: CounterVec = register_counter_vec!( 35 | opts!("function_requests_total", "Total number of requests"), 36 | &["function"] // labels 37 | ) 38 | .unwrap(); 39 | } 40 | 41 | mod route; 42 | mod utils; 43 | mod workflow; 44 | 45 | #[tokio::main] 46 | async fn main() -> Result<(), Box> { 47 | tracing_subscriber::fmt::init(); 48 | tracing::info!("Serverless router started"); 49 | 50 | // let (tx, mut rx) = mpsc::channel::(16); 51 | let func_informer = create_informer::("functions"); 52 | let func_store = func_informer.get_store(); 53 | let func_informer_handler = tokio::spawn(async move { func_informer.run().await }); 54 | 55 | let svc_informer = create_informer::("services"); 56 | let svc_store = svc_informer.get_store(); 57 | let svc_informer_handler = tokio::spawn(async move { svc_informer.run().await }); 58 | 59 | let addr = ([0, 0, 0, 0], 80).into(); 60 | let service = make_service_fn(move |_| { 61 | let func_store = func_store.clone(); 62 | let svc_store = svc_store.clone(); 63 | 64 | async move { 65 | Ok::<_, hyper::Error>(service_fn(move |req| { 66 | let func_store = func_store.clone(); 67 | let svc_store = svc_store.clone(); 68 | async move { Ok::<_, hyper::Error>(router(req, func_store, svc_store).await) } 69 | })) 70 | } 71 | }); 72 | 73 | let server = Server::bind(&addr).serve(service); 74 | let graceful = server.with_graceful_shutdown(async { 75 | tokio::signal::ctrl_c() 76 | .await 77 | .expect("failed to install CTRL+C signal handler"); 78 | }); 79 | 80 | tracing::info!("Listening on http://{}", addr); 81 | 82 | graceful.await?; 83 | func_informer_handler.abort(); 84 | svc_informer_handler.abort(); 85 | 86 | Ok(()) 87 | } 88 | -------------------------------------------------------------------------------- /serverless/src/router/route/workflow.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markcty/rMiniK8s/35b2c30064f9d203f76af0752635ccf67b246424/serverless/src/router/route/workflow.rs -------------------------------------------------------------------------------- /serverless/src/router/utils.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use resources::{ 3 | informer::{EventHandler, Informer, ListerWatcher, ResyncHandler}, 4 | models::{self}, 5 | objects::Object, 6 | }; 7 | use tokio_tungstenite::connect_async; 8 | 9 | use crate::CONFIG; 10 | 11 | pub fn create_informer(kind_plural: &'static str) -> Informer { 12 | let lw = ListerWatcher { 13 | lister: Box::new(move |_| { 14 | Box::pin(async move { 15 | let res = reqwest::get( 16 | CONFIG 17 | .api_server_endpoint 18 | .join(&format!("/api/v1/{}", kind_plural))?, 19 | ) 20 | .await? 21 | .json::>>() 22 | .await?; 23 | let res = res.data.ok_or_else(|| anyhow!("Lister failed"))?; 24 | anyhow::Ok(res) 25 | }) 26 | }), 27 | watcher: Box::new(move |_| { 28 | Box::pin(async move { 29 | let mut url = CONFIG 30 | .api_server_endpoint 31 | .join(&format!("/api/v1/watch/{}", kind_plural))?; 32 | url.set_scheme("ws").ok(); 33 | let (stream, _) = connect_async(url).await?; 34 | anyhow::Ok(stream) 35 | }) 36 | }), 37 | }; 38 | 39 | // create event handler closures 40 | let eh = EventHandler { 41 | add_cls: Box::new(move |_| Box::pin(async move { Ok(()) })), 42 | update_cls: Box::new(move |_| Box::pin(async move { Ok(()) })), 43 | delete_cls: Box::new(move |_| Box::pin(async move { Ok(()) })), 44 | }; 45 | 46 | let rh = ResyncHandler(Box::new(move |_| Box::pin(async move { Ok(()) }))); 47 | 48 | // start the informer 49 | Informer::new(lw, eh, rh) 50 | } 51 | --------------------------------------------------------------------------------