├── .gitee ├── ISSUE_TEMPLATE.zh-CN.md └── PULL_REQUEST_TEMPLATE.zh-CN.md ├── .gitignore ├── .gitlab-ci.yml ├── LICENSE ├── README.md ├── build ├── Makefile ├── restart.sh ├── run.sh └── stop.sh ├── cmd ├── apiserver.go ├── controller.go ├── kubectl.go ├── kubelet.go ├── kubeproxy.go ├── scheduler.go └── serverless.go ├── config ├── config.go ├── kubelet-config.yaml └── scheduler-config.yaml ├── doc ├── CICD.md ├── apiserver.md ├── assets │ ├── image-20230419215427092.png │ ├── image-20230420170733340.png │ └── image-20230514181753203.png ├── auto-scaler.md ├── dns.md ├── guideline.md ├── kubectl-api.md ├── kubelet.md ├── kubeproxy.md ├── scheduler.md ├── serverless.md └── 要求文档.pdf ├── example ├── dns │ ├── dnspod.yaml │ ├── dnspod2.yaml │ ├── dnsrecord.yaml │ ├── dnsservice.yaml │ └── dnsservice2.yaml ├── hpa │ ├── hpa-new.yaml │ ├── hpa.yaml │ ├── replica.yaml │ ├── service.yaml │ └── utilization │ │ ├── hpa.yaml │ │ └── replica.yaml ├── pod.yaml ├── pod │ ├── demo-pod.yaml │ ├── demo-pod2.yaml │ ├── demo-pod4.yaml │ ├── simple_http_server.go │ ├── test_cpu.go │ ├── test_memory.go │ └── test_network.go ├── replica.yaml ├── rs │ ├── demo-pod2.yaml │ └── replica.yaml ├── serverless │ ├── add.py │ ├── addfunc.yaml │ ├── diff.py │ ├── difffunc.yaml │ ├── mutlicall.sh │ ├── param.yaml │ ├── printdiff.py │ ├── printdifffunc.yaml │ ├── printsum.py │ ├── printsumfunc.yaml │ ├── singlefunc.py │ ├── singlefunc.yaml │ └── workflow.yaml └── service.yaml ├── go.mod ├── go.sum ├── gpu ├── Dockerfile ├── GPU.md ├── files.h ├── generate_matrix_data.c ├── gpu-add.yaml ├── gpu-mul.yaml ├── job.py ├── matrix-add │ ├── Makefile │ ├── files.h │ └── matrix-add.cu └── matrix-mul │ ├── Makefile │ ├── files.h │ └── matrix-mul.cu ├── pkg ├── apiobject │ ├── autoscaler.go │ ├── autoscaler_test │ ├── dnsrecord.go │ ├── dnsrecord_test.go │ ├── doc.go │ ├── endpoint.go │ ├── function.go │ ├── function_test.go │ ├── job.go │ ├── metrics.go │ ├── node.go │ ├── node_test.go │ ├── object.go │ ├── pod.go │ ├── pod_test.go │ ├── replication.go │ ├── replication_test.go │ ├── service.go │ ├── service_test.go │ ├── utils │ │ ├── duration.go │ │ ├── quantity.go │ │ └── time.go │ ├── workflow.go │ └── workflow_test.go ├── controller │ ├── HPAcontroller.go │ ├── jobcontroller.go │ ├── manager.go │ ├── rscontroller.go │ ├── svccontroller.go │ └── svccontroller_test.go ├── kubeapiserver │ ├── apimachinery │ │ ├── apiserver.go │ │ ├── heartbeat.go │ │ └── routeInstaller.go │ ├── doc.go │ ├── handlers │ │ ├── dnshandler.go │ │ ├── endpointhandler.go │ │ ├── functionhandler.go │ │ ├── handlertable.go │ │ ├── hpahandler.go │ │ ├── jobhandler.go │ │ ├── nodehandler.go │ │ ├── nodehandler_test.go │ │ ├── podhandler.go │ │ ├── podhandler_test.go │ │ ├── replicahandler.go │ │ ├── routeInstaller.go │ │ ├── servicehandler.go │ │ └── workflowhandler.go │ ├── run.go │ ├── storage │ │ ├── ectd_test.go │ │ └── etcd.go │ └── watch │ │ ├── list.go │ │ ├── watch.go │ │ └── watchtable.go ├── kubectl │ ├── cmd │ │ ├── apply.go │ │ ├── delete.go │ │ ├── describe.go │ │ ├── get.go │ │ ├── root.go │ │ └── trigger.go │ ├── doc.go │ ├── doc │ │ ├── dependency.md │ │ └── kubectl-api.md │ ├── test │ │ ├── http.go │ │ ├── http_test.go │ │ ├── kcl │ │ ├── kubectl │ │ ├── kubectl_test.go │ │ └── test.yaml │ └── utils │ │ └── utils.go ├── kubedns │ ├── config │ │ ├── Corefile │ │ └── nginx.conf │ ├── nginx │ │ ├── nginx.tmpl │ │ ├── nginxeditor.go │ │ └── nginxeditor_test.go │ └── testing │ │ └── test.html ├── kubelet │ ├── container │ │ ├── container.go │ │ ├── container_test.go │ │ └── containerutil.go │ ├── image │ │ ├── image.go │ │ └── image_test.go │ ├── kubelet.go │ ├── metricsserver │ │ ├── handler.go │ │ └── metricserver.go │ ├── pod │ │ ├── pod.go │ │ ├── pod_test.go │ │ ├── podutil.go │ │ └── podutil_test.go │ ├── run.go │ └── utils │ │ ├── helper.go │ │ ├── lock.go │ │ └── lock_test.go ├── kubeproxy │ ├── ipvs │ │ ├── ops.go │ │ └── state.go │ ├── proxy.go │ └── proxy_test.go ├── kubescheduler │ ├── doc.go │ ├── filter │ │ ├── configfilter.go │ │ ├── configfilter_test.go │ │ └── templatefilter.go │ ├── policy │ │ ├── lrscheduler.go │ │ ├── lrscheduler_test.go │ │ ├── resourcescheduler.go │ │ ├── resourcescheduler_test.go │ │ └── templatescheduler.go │ ├── run.go │ └── testutils │ │ └── builder.go └── serverless │ ├── activator │ ├── deploy.go │ └── deploy_test.go │ ├── autoscaler │ ├── metric.go │ └── record.go │ ├── eventfilter │ ├── functionwatcher.go │ └── workflowwatcher.go │ ├── function │ ├── image.go │ ├── image_test.go │ └── registry.sh │ ├── imagedata │ ├── Dockerfile │ ├── __pycache__ │ │ └── func.cpython-38.pyc │ ├── func.py │ ├── requirements.txt │ └── server.py │ ├── run.go │ └── workflow │ ├── workflowexecutor.go │ └── workflowexecutor_test.go └── utils ├── client.go ├── http.go ├── rand.go ├── resourceutils ├── unit.go └── unit_test.go └── utils.go /.gitee/ISSUE_TEMPLATE.zh-CN.md: -------------------------------------------------------------------------------- 1 | ### 该问题是怎么引起的? 2 | 3 | 4 | 5 | ### 重现步骤 6 | 7 | 8 | 9 | ### 报错信息 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /.gitee/PULL_REQUEST_TEMPLATE.zh-CN.md: -------------------------------------------------------------------------------- 1 | ### 一、内容说明(相关的Issue) 2 | 3 | 4 | 5 | ### 二、建议测试周期和提测地址 6 | 建议测试完成时间:xxxx.xx.xx 7 | 投产上线时间:xxxx.xx.xx 8 | 提测地址:CI环境/压测环境 9 | 测试账号: 10 | 11 | ### 三、变更内容 12 | * 3.1 关联PR列表 13 | 14 | * 3.2 数据库和部署说明 15 | 1. 常规更新 16 | 2. 重启unicorn 17 | 3. 重启sidekiq 18 | 4. 迁移任务:是否有迁移任务,没有写 "无" 19 | 5. rake脚本:`bundle exec xxx RAILS_ENV = production`;没有写 "无" 20 | 21 | * 3.4 其他技术优化内容(做了什么,变更了什么) 22 | - 重构了 xxxx 代码 23 | - xxxx 算法优化 24 | 25 | 26 | * 3.5 废弃通知(什么字段、方法弃用?) 27 | 28 | 29 | 30 | * 3.6 后向不兼容变更(是否有无法向后兼容的变更?) 31 | 32 | 33 | 34 | ### 四、研发自测点(自测哪些?冒烟用例全部自测?) 35 | 自测测试结论: 36 | 37 | 38 | ### 五、测试关注点(需要提醒QA重点关注的、可能会忽略的地方) 39 | 检查点: 40 | 41 | | 需求名称 | 是否影响xx公共模块 | 是否需要xx功能 | 需求升级是否依赖其他子产品 | 42 | |------|------------|----------|---------------| 43 | | xxx | 否 | 需要 | 不需要 | 44 | | | | | | 45 | 46 | 接口测试: 47 | 48 | 性能测试: 49 | 50 | 并发测试: 51 | 52 | 其他: 53 | 54 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | #https://www.freecodecamp.org/chinese/news/gitignore-file-how-to-ignore-files-and-folders-in-git/ 2 | 3 | # Binaries for programs and plugins 4 | *.exe 5 | *.exe~ 6 | *.dll 7 | *.so 8 | *.dylib 9 | 10 | # Test binary, built with `go testing -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Dependency directories (remove the comment below to include it) 17 | # vendor/ 18 | .idea/ 19 | default.etcd/ 20 | go_build* 21 | 22 | # ignore vscode 23 | .vscode/ 24 | 25 | # ignore log 26 | *.log 27 | 28 | # ignore build/bin 29 | build/bin/ 30 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | stages: 2 | - prepare 3 | - test 4 | - build 5 | 6 | prepare: 7 | stage: prepare 8 | script: 9 | - go env -w GOPROXY=https://goproxy.cn 10 | tags: 11 | - shell 12 | 13 | test-kubelet: 14 | stage: test 15 | script: 16 | - sudo /usr/local/go/bin/go test minik8s/pkg/kubelet/container -cover 17 | - sudo /usr/local/go/bin/go test minik8s/pkg/kubelet/pod -cover 18 | - sudo /usr/local/go/bin/go test minik8s/pkg/kubelet/image -cover 19 | tags: 20 | - shell 21 | 22 | test-kubectl: 23 | stage: test 24 | script: 25 | - echo "testing kubectl" 26 | tags: 27 | - shell 28 | 29 | test-kubeproxy: 30 | stage: test 31 | script: 32 | - echo "testing kubeproxy" 33 | - sudo /usr/local/go/bin/go test minik8s/pkg/kubeproxy -cover 34 | tags: 35 | - shell 36 | 37 | test-apiserver: 38 | stage: test 39 | script: 40 | - echo "testing apiserver" 41 | - sudo /usr/local/go/bin/go test minik8s/pkg/kubeapiserver/storage -cover 42 | tags: 43 | - shell 44 | 45 | test-scheduler: 46 | stage: test 47 | script: 48 | - echo "testing scheduler" 49 | - sudo /usr/local/go/bin/go test minik8s/pkg/kubescheduler/policy -cover 50 | - sudo /usr/local/go/bin/go test minik8s/pkg/kubescheduler/filter -cover 51 | tags: 52 | - shell 53 | 54 | test-serverless: 55 | stage: test 56 | script: 57 | - echo "testing serverless" 58 | - sudo /usr/local/go/bin/go test minik8s/pkg/serverless/activator -cover 59 | - sudo /usr/local/go/bin/go test minik8s/pkg/serverless/workflow -cover 60 | tags: 61 | - shell 62 | 63 | 64 | build: 65 | stage: build 66 | script: 67 | - cd build 68 | - make all 69 | - sudo cp -r bin /home/gitlab-runner/$CI_COMMIT_BRANCH/ 70 | tags: 71 | - shell 72 | -------------------------------------------------------------------------------- /build/Makefile: -------------------------------------------------------------------------------- 1 | CMDPATH=../cmd 2 | OUTPATH=./bin 3 | 4 | kubectl: 5 | go build -o $(OUTPATH)/kubectl $(CMDPATH)/kubectl.go 6 | 7 | kubelet: 8 | go build -o $(OUTPATH)/kubelet $(CMDPATH)/kubelet.go 9 | 10 | apiserver: 11 | go build -o $(OUTPATH)/apiserver $(CMDPATH)/apiserver.go 12 | 13 | scheduler: 14 | go build -o $(OUTPATH)/scheduler $(CMDPATH)/scheduler.go 15 | 16 | controller: 17 | go build -o $(OUTPATH)/controller $(CMDPATH)/controller.go 18 | 19 | kubeproxy: 20 | go build -o $(OUTPATH)/kubeproxy $(CMDPATH)/kubeproxy.go 21 | 22 | serverless: 23 | go build -o $(OUTPATH)/serverless $(CMDPATH)/serverless.go 24 | 25 | all: 26 | go build -o $(OUTPATH)/kubectl $(CMDPATH)/kubectl.go 27 | go build -o $(OUTPATH)/kubelet $(CMDPATH)/kubelet.go 28 | go build -o $(OUTPATH)/apiserver $(CMDPATH)/apiserver.go 29 | go build -o $(OUTPATH)/scheduler $(CMDPATH)/scheduler.go 30 | go build -o $(OUTPATH)/controller $(CMDPATH)/controller.go 31 | go build -o $(OUTPATH)/kubeproxy $(CMDPATH)/kubeproxy.go 32 | go build -o $(OUTPATH)/serverless $(CMDPATH)/serverless.go 33 | 34 | clean: 35 | rm $(OUTPATH)/* 36 | -------------------------------------------------------------------------------- /build/restart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./stop.sh 4 | 5 | sleep 20 6 | 7 | echo "restart the minik8s" 8 | ./run.sh -------------------------------------------------------------------------------- /build/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # cd to the home directory 4 | current_path=$(pwd) 5 | cd ~ 6 | cd /home 7 | 8 | # check if the ectd is running, if not, start it in the background 9 | # etcd is a progress 10 | if ! pgrep -x "etcd" > /dev/null 11 | then 12 | echo "etcd is not running, start it" 13 | nohup etcd & 14 | fi 15 | 16 | # check the default systemd-resolved, if it is running, stop it 17 | if pgrep -x "systemd-resolved" > /dev/null 18 | then 19 | echo "systemd-resolved is running, stop it" 20 | systemctl stop systemd-resolved 21 | fi 22 | 23 | # check if the coredns is running, if not, start it in the background 24 | if ! pgrep -x "coredns" > /dev/null 25 | then 26 | echo "coredns is not running, start it" 27 | nohup ./coredns -conf $(pwd)/mini-k8s/pkg/kubedns/config/Corefile & 28 | fi 29 | 30 | # check the default nginx, if it is running, stop it 31 | if pgrep -x "nginx" > /dev/null 32 | then 33 | echo "nginx is running, stop it" 34 | systemctl stop nginx 35 | fi 36 | 37 | # start the nginx in the background 38 | echo "start nginx" 39 | nohup nginx -c $(pwd)/mini-k8s/pkg/kubedns/config/nginx.conf & 40 | 41 | # build the components and run the server 42 | cd "$current_path" 43 | make kubectl 44 | make apiserver 45 | make scheduler 46 | make controller 47 | make serverless 48 | make kubeproxy 49 | 50 | # create the log directory if not exist 51 | if [ ! -d "./log" ]; then 52 | mkdir ./log 53 | fi 54 | 55 | 56 | cd bin 57 | 58 | # start the components in different terminals 59 | echo "start the minik8s" 60 | # ./apiserver > ../log/apiserver.log 2> /dev/null & 61 | 62 | ./apiserver > ../log/apiserver.log 2>&1 & 63 | echo "start apiserver" 64 | sleep 3 65 | ./scheduler > ../log/scheduler.log 2>&1 & 66 | echo "start scheduler" 67 | ./controller > ../log/controller.log 2>&1 & 68 | echo "start controller" 69 | ./kubeproxy > ../log/kubeproxy.log 2>&1 & 70 | 71 | 72 | chmod +x ../../pkg/serverless/function/registry.sh 73 | cd ../../pkg/serverless/function 74 | chmod +x ../../ 75 | ./registry.sh 76 | cd ../../../build/bin/ 77 | ./serverless > ../log/serverless.log 2>&1 & 78 | echo "start serverless" 79 | 80 | 81 | -------------------------------------------------------------------------------- /build/stop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # stop the components 4 | echo "stop the minik8s" 5 | # find the pid of the components by command 6 | # if can find multiple pid, kill them all 7 | pids=$(pgrep -f './apiserver') 8 | if [[ -n "$pids" ]]; then 9 | echo "kill apiserver" 10 | for pid in $pids; do 11 | echo "Killing process with PID $pid" 12 | kill "$pid" 13 | done 14 | fi 15 | 16 | 17 | pids=$(pgrep -f './scheduler') 18 | if [[ -n "$pids" ]]; then 19 | echo "kill scheduler" 20 | for pid in $pids; do 21 | echo "Killing process with PID $pid" 22 | kill "$pid" 23 | done 24 | fi 25 | 26 | pids=$(pgrep -f './controller') 27 | if [[ -n "$pids" ]]; then 28 | echo "kill controller" 29 | for pid in $pids; do 30 | echo "Killing process with PID $pid" 31 | kill "$pid" 32 | done 33 | fi 34 | 35 | 36 | pids=$(pgrep -f './kubeproxy') 37 | if [[ -n "$pids" ]]; then 38 | echo "kill kubeproxy" 39 | for pid in $pids; do 40 | echo "Killing process with PID $pid" 41 | kill "$pid" 42 | done 43 | fi 44 | 45 | pids=$(pgrep -f './serverless') 46 | if [[ -n "$pids" ]]; then 47 | echo "kill serverless" 48 | for pid in $pids; do 49 | echo "Killing process with PID $pid" 50 | kill "$pid" 51 | done 52 | fi -------------------------------------------------------------------------------- /cmd/apiserver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "minik8s/pkg/kubeapiserver" 4 | 5 | func main() { 6 | kubeapiserver.Run() 7 | } 8 | -------------------------------------------------------------------------------- /cmd/controller.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "minik8s/pkg/controller" 4 | 5 | func main() { 6 | controller.Run() 7 | } 8 | -------------------------------------------------------------------------------- /cmd/kubectl.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "minik8s/pkg/kubectl/cmd" 6 | ) 7 | 8 | func main() { 9 | if err := cmd.RootCmd.Execute(); err != nil { 10 | fmt.Println(err.Error()) 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /cmd/kubelet.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "minik8s/pkg/kubelet" 6 | 7 | "github.com/spf13/cobra" 8 | "github.com/spf13/viper" 9 | ) 10 | 11 | var RootCmd = &cobra.Command{ 12 | Use: "kubelet", 13 | Short: "kubelet manages containers", 14 | Long: "kubelet manages containers", 15 | Run: runRoot, 16 | } 17 | 18 | var configAddr string 19 | 20 | // var KubeletConfig = kubelet.Config{ 21 | // ApiserverAddr: "192.168.1.13:8080", 22 | // FlannelSubnet: "10.2.17.1/24", 23 | // IP: "192.168.1.12", 24 | // Labels: map[string]string{}, 25 | // ListenAddr: "localhost:10250", 26 | // } 27 | 28 | var KubeletConfig = kubelet.Config{ 29 | ApiserverAddr: "192.168.1.13:8080", 30 | FlannelSubnet: "10.2.9.1/24", 31 | IP: "192.168.1.14", 32 | Labels: map[string]string{}, 33 | ListenAddr: "localhost:10250", 34 | CPU: "2", 35 | Memory: "3Gi", 36 | } 37 | 38 | func initConfig() { 39 | //fmt.Println(configAddr) 40 | viper.SetConfigFile(configAddr) 41 | err := viper.ReadInConfig() 42 | if err == nil { 43 | //panic(err) 44 | if err := viper.Unmarshal(&KubeletConfig); err != nil { 45 | //panic(err) 46 | } 47 | } 48 | //if err,use default config 49 | fmt.Println(KubeletConfig) 50 | } 51 | 52 | func init() { 53 | cobra.OnInitialize(initConfig) 54 | //RootCmd.Flags().StringVarP(&apiserverAddr, "apiserver-address", "a", utils.ApiServerIp, "kubelet (-a apiserver-address)") 55 | RootCmd.PersistentFlags().StringVarP(&configAddr, "config", "c", "./kubelet-config.yaml", "kubelet (-c config)") 56 | } 57 | 58 | func runRoot(cmd *cobra.Command, args []string) { 59 | kubelet.Run(KubeletConfig) 60 | } 61 | func main() { 62 | if err := RootCmd.Execute(); err != nil { 63 | fmt.Println(err.Error()) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /cmd/kubeproxy.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "minik8s/config" 6 | "minik8s/pkg/kubeproxy" 7 | 8 | "github.com/spf13/cobra" 9 | "github.com/spf13/viper" 10 | ) 11 | 12 | var ProxyCmd = &cobra.Command{ 13 | Use: "kubeproxy", 14 | Short: "kubeproxy manages network", 15 | Long: "kubeproxy manages network", 16 | Run: runProxy, 17 | } 18 | 19 | var proxyConfigAddr string 20 | 21 | var KubeproxyConfig = Config{ 22 | ApiserverAddr: "192.168.1.13:8080", 23 | } 24 | 25 | type Config struct { 26 | ApiserverAddr string // 192.168.1.13:8080 27 | } 28 | 29 | func proxyInitConfig() { 30 | //fmt.Println(configAddr) 31 | viper.SetConfigFile(proxyConfigAddr) 32 | err := viper.ReadInConfig() 33 | if err == nil { 34 | //panic(err) 35 | if err := viper.Unmarshal(&KubeproxyConfig); err != nil { 36 | //panic(err) 37 | } 38 | } 39 | //if err,use default config 40 | fmt.Println(KubeproxyConfig) 41 | } 42 | 43 | func init() { 44 | cobra.OnInitialize(proxyInitConfig) 45 | //RootCmd.Flags().StringVarP(&apiserverAddr, "apiserver-address", "a", utils.ApiServerIp, "kubelet (-a apiserver-address)") 46 | ProxyCmd.PersistentFlags().StringVarP(&proxyConfigAddr, "config", "c", "./kubeproxy-config.yaml", "kubeproxy (-c config)") 47 | } 48 | 49 | func runProxy(cmd *cobra.Command, args []string) { 50 | config.ApiServerIp = KubeproxyConfig.ApiserverAddr 51 | kubeproxy.Run() 52 | } 53 | 54 | func main() { 55 | if err := ProxyCmd.Execute(); err != nil { 56 | fmt.Println(err.Error()) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /cmd/scheduler.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "github.com/spf13/cobra" 6 | "github.com/spf13/viper" 7 | "minik8s/pkg/kubescheduler" 8 | ) 9 | 10 | var SchedulerCmd = &cobra.Command{ 11 | Use: "", 12 | Short: "", 13 | Long: "", 14 | Run: runScheduler, 15 | } 16 | 17 | var schedulerConfigAddr string 18 | 19 | var SchedulerConfig = kubescheduler.Config{ 20 | Policy: "default", 21 | } 22 | 23 | func schedulerInitConfig() { 24 | //fmt.Println(configAddr) 25 | viper.SetConfigFile(schedulerConfigAddr) 26 | err := viper.ReadInConfig() 27 | if err == nil { 28 | //panic(err) 29 | if err := viper.Unmarshal(&SchedulerConfig); err != nil { 30 | //panic(err) 31 | } 32 | } 33 | //if err,use default config 34 | fmt.Println(SchedulerConfig) 35 | } 36 | 37 | func init() { 38 | cobra.OnInitialize(schedulerInitConfig) 39 | //RootCmd.Flags().StringVarP(&apiserverAddr, "apiserver-address", "a", utils.ApiServerIp, "kubelet (-a apiserver-address)") 40 | SchedulerCmd.PersistentFlags().StringVarP(&schedulerConfigAddr, "config", "c", "./scheduler-config.yaml", "scheduler (-c config)") 41 | } 42 | 43 | func runScheduler(cmd *cobra.Command, args []string) { 44 | kubescheduler.Run(SchedulerConfig) 45 | } 46 | func main() { 47 | if err := SchedulerCmd.Execute(); err != nil { 48 | fmt.Println(err.Error()) 49 | } 50 | } 51 | 52 | 53 | -------------------------------------------------------------------------------- /cmd/serverless.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "minik8s/pkg/serverless" 4 | 5 | func main() { 6 | serverless.Run() 7 | } 8 | 9 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "fmt" 4 | 5 | var ApiServerIp = "localhost:8080" 6 | var httpPrefix = fmt.Sprintf("http//%s/api/v1/", ApiServerIp) 7 | 8 | type ObjType string 9 | 10 | const ( 11 | POD ObjType = "pods" 12 | SERVICE ObjType = "services" 13 | ENDPOINT ObjType = "endpoints" 14 | REPLICA ObjType = "replicas" 15 | HPA ObjType = "hpas" 16 | FUNCTION ObjType = "functions" 17 | JOB ObjType = "jobs" 18 | NODE ObjType = "nodes" 19 | ) 20 | -------------------------------------------------------------------------------- /config/kubelet-config.yaml: -------------------------------------------------------------------------------- 1 | ApiserverAddr : 192.168.1.13:8080 2 | FlannelSubnet : 10.2.9.1/24 3 | IP : 192.168.1.14 4 | Labels : 5 | - resource : gpu 6 | ListenAddr : 192.168.1.14:10250 7 | CPU: "2" 8 | Memory: "3Gi" 9 | # ApiserverAddr : 192.168.1.14:8080 10 | # FlannelSubnet : 10.2.9.1/24 11 | # IP : 192.168.1.14 12 | # Labels : 13 | # - resource : gpu 14 | # ListenAddr : 192.168.1.14:10250 15 | # CPU: "2000" 16 | # Memory: "32212250" 17 | -------------------------------------------------------------------------------- /config/scheduler-config.yaml: -------------------------------------------------------------------------------- 1 | ApiserverAddr : 192.168.1.13:8080 2 | Policy: "resource" 3 | # ApiserverAddr : 192.168.1.14:8080 4 | # Policy: "frequency" 5 | -------------------------------------------------------------------------------- /doc/CICD.md: -------------------------------------------------------------------------------- 1 | # CI/CD 2 | 3 | ### test 4 | 5 | [go 覆盖测试工具介绍 - 建站教程 (jiuaidu.com)](https://jiuaidu.com/jianzhan/1046052/) 6 | 7 | `go test ./...`可以测试目录下所有的test文件 8 | 9 | `go test minik8s/pkg/kubelet/container` 测试指定包下的测试文件 10 | 11 | ### gitlab runner 12 | 13 | #### docker 14 | 15 | `docker run -d --name gitlab-runner --restart always -v /srv/gitlab-runner/config:/etc/gitlab-runner -v /var/run/docker.sock:/var/run/docker.sock gitlab/gitlab-runner:v15.10.1` 16 | 17 | 执行器选择docker 这里镜像需要先在主机上写Dockerfile手动构建好,然后修改`config.toml`配置文件把`pull_policy`修改为`if-not-present` 18 | 19 | 对于简单测试没问题,但是对于CNI这种复杂的东西,即使加了privilege=true,还是会出现和宿主机上不一样的情况。 20 | 21 | #### host 22 | 23 | [Install GitLab Runner | GitLab](https://docs.gitlab.com/runner/install/) 24 | 25 | 交大云主机安装二进制 26 | 27 | `nslookup www.ipads.sjtu.edu.cn` 安全组开放所有端口 28 | 29 | `gitlab-runner register` 去gitlab网页的settings/cicd复制url和token 30 | 31 | 执行器选择shell 在主机上给gitlab-runner用户足够的权限 32 | 33 | [【汇总】解决GitLab-Runner执行脚本命令无权限_gitlab-runner 提升权限_成为大佬先秃头的博客-CSDN博客](https://blog.csdn.net/qq_39940674/article/details/127616784) 34 | 35 | 采用这种方法进行CI/CD,gitlab-runner会在主机上的某个目录跑脚本,用的都是主机的环境 36 | 37 | - 优点:不需要手动配一个拥有所有环境的镜像;没有容器导致的与主机不一致,跑不起来的情况。 38 | - 缺点:会对主机产生影响;在缺少依赖的情况下无法更换gitlab-runner所在主机。 39 | 40 | ### .gitlab-ci.yml 41 | 42 | 1. prepare: 设置go env,防止go test在download时超时 43 | 44 | 2. test:`go test` 如果测试涉及到的api需要权限,需要加sudo 45 | 46 | 创建多个tag为shell的runner,使test阶段并行测试 (目前一共3个) 47 | 48 | 需要修改手动`/etc/gitlab-runner/config.toml`的concurrent为3 49 | 50 | 3. build:`go build` 生成可执行文件在`/home/gitlab-runner/$CI_COMMIT_BRANCH/`目录下 51 | 52 | 不同分支build出的文件不会互相覆盖 53 | 54 | ### 代码同步 55 | 56 | 同时推送到gitee和gitlab,不然无法用gitlab-runner 57 | 58 | [git push origin master一次提交多个远程仓库 - 兜里还剩五块出头 - 博客园 (cnblogs.com)](https://www.cnblogs.com/hmy-666/p/17304317.html) 59 | 60 | ```shell 61 | root@minik8s-1:/mini-k8s# git remote -v 62 | origin https://gitee.com/szy_0127/mini-k8s.git (fetch) 63 | origin https://gitee.com/szy_0127/mini-k8s.git (push) 64 | origin https://ipads.se.sjtu.edu.cn:2020/520021910933/minik8s.git (push) 65 | ``` 66 | 67 | -------------------------------------------------------------------------------- /doc/assets/image-20230419215427092.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IPADSIntern-MiniK8s/MiniK8s/610a8a0a09d3988fecdc60e0cdf69d3030cb9d82/doc/assets/image-20230419215427092.png -------------------------------------------------------------------------------- /doc/assets/image-20230420170733340.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IPADSIntern-MiniK8s/MiniK8s/610a8a0a09d3988fecdc60e0cdf69d3030cb9d82/doc/assets/image-20230420170733340.png -------------------------------------------------------------------------------- /doc/assets/image-20230514181753203.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IPADSIntern-MiniK8s/MiniK8s/610a8a0a09d3988fecdc60e0cdf69d3030cb9d82/doc/assets/image-20230514181753203.png -------------------------------------------------------------------------------- /doc/guideline.md: -------------------------------------------------------------------------------- 1 | # Guideline 2 | ## 1. 编程规范 3 | 4 | https://gocn.github.io/styleguide/ 5 | -------------------------------------------------------------------------------- /doc/kubectl-api.md: -------------------------------------------------------------------------------- 1 | # Mini-K8s kubectl指令手册 2 | Mini-K8s支持的命令如下: 3 | #### kubectl apply 4 | 5 | `kubectl apply -f ` 6 | 7 | #### kubectl get 8 | 9 | `kubectl get [-n ]` 10 | 11 | `kubectl get +s [-n ]` 12 | 13 | #### kubectl describe 14 | 15 | `kubectl describe [-n ]` 16 | 17 | `kubectl describe +s [-n ]` 18 | 19 | #### kubectl delete 20 | 21 | `kubectl delete [-n ]` 22 | 23 | 24 | 由于k8s的Api是基于REST的设计思想,因此,不同种类的HTTP请求也就对应了不同的操作。比较常用的对应关系是: 25 | 26 | **GET(SELECT)**:从服务器取出资源(一项或多项)。GET请求对应k8s api的获取信息功能。因此,如果是获取信息的命令都要使用GET方式发起HTTP请求。 27 | 28 | **POST(CREATE)**:在服务器新建一个资源。POST请求对应k8s api的创建功能。因此,需要创建Pods、ReplicaSet或者service的时候请使用这种方式发起请求。 29 | 30 | **PUT(UPDATE)**:在服务器更新资源(客户端提供改变后的完整资源)。对应更新nodes或Pods的状态、ReplicaSet的自动备份数量等等。 31 | 32 | **PATCH(UPDATE)**:在服务器更新资源(客户端提供改变的属性)。 33 | 34 | **DELETE(DELETE)**:从服务器删除资源。在稀牛学院的学员使用完毕环境后,可以使用这种方式将Pod删除,释放资源。 35 | -------------------------------------------------------------------------------- /doc/kubeproxy.md: -------------------------------------------------------------------------------- 1 | kubeproxy工作: 2 | 3 | –K8s会为Service分配一个“持久化的”集群内的IP 4 | 5 | –通过**kube-proxy** 实现Pod和Service之间的通信/调用 6 | 7 | –Service的IP是持久化的,就是Service对应的Pod挂了也不会变 8 | 9 | kube-proxy 的主要作用是watch apiserver,当监听到pod 或service变化时,修改本地的iptables规则或ipvs规则。 10 | 11 | service流程: 12 | 13 | - K8s会为Service分配一个“持久化的”集群内的IP 14 | - kubeproxy维护service ip和转发的实际pod ip的关系 15 | 16 | **Node Port** **Service** 17 | 18 | ![image-20230419215427092](./assets/image-20230419215427092.png) 19 | 20 | service在不同node上开相同的端口,用户以同一端口号访问任意一个node的kube-proxy,kubeproxy负责转发。 21 | 22 | **kubeproxy iptable负责维护:** 23 | 24 | service ip和转发的实际pod ip的关系 25 | 26 | 27 | 28 | 29 | 30 | **三种模式:** 31 | 32 | **userspace:**在这种模式下,kube-proxy通过观察Kubernetes中service和endpoint对象的变化,当有新的service创建时,所有节点的kube-proxy在node节点上随机选择一个端口,在[iptables](https://so.csdn.net/so/search?q=iptables&spm=1001.2101.3001.7020)中追加一条把访问service的请求重定向到这个端口的记录,并开始监听这个端口的连接请求。 33 | 34 | 创建一个service,对应的IP:1.2.3.4,port:8888,kube-proxy随机选择的端口是32890,iptable追加: 35 | 36 | -A KUBE-PORTALS-CONTAINER -d 1.2.3.4/32 -p tcp --dport 8888 -j REDIRECT --to-ports 32890 37 | 38 | **iptables 模式:** 39 | 40 | 在这种模式下,kube-proxy通过观察Kubernetes中service和endpoint对象的变化,当有servcie创建时,kube-proxy在iptables中追加新的规则。对于service的每一个endpoint,会在iptables中追加一条规则,设定动作为DNAT,将目的地址设置成真正提供服务的pod地址;再为servcie追加规则,设定动作为跳转到对应的endpoint的规则上, 41 | 42 | 默认情况下,kube-proxy随机选择一个后端的服务,可以通过iptables中的 -m recent 模块实现session affinity功能,通过 -m statistic 模块实现负载均衡时的权重功能 43 | 44 | 比如说创建了一个service,对应的IP:1.2.3.4,port:8888,对应一个后端地址:10.1.0.8:8080,则会在iptables中追加(主要规则): 45 | 46 | ```css 47 | -A PREROUTING -j KUBE-SERVICES 48 | 49 | -A KUBE-SERVICES -d 1.2.3.4/32 -p tcp –dport 8888 -j KUBE-SVC-XXXXXXXXXXXXXXXX 50 | 51 | -A KUBE-SVC-XXXXXXXXXXXXXXXX -j KUBE-SEP-XXXXXXXXXXXXXXXX 52 | 53 | -A KUBE-SEP-XXXXXXXXXXXXXXXX -p tcp -j DNAT –to-destination 10.1.0.8:8080 54 | 55 | ``` 56 | 57 | 直接查询到对应pod的cluster ip + 端口 58 | 59 | 测试方法:创建一个serevice,创建一个client pod,client pod给service发请求 60 | 61 | **ipvs模式:** 62 | 63 | kube-proxy 依然监听Service以及Endpoints对象的变化, 不过它并不创建反向代理, 也不创建大量的 iptables 规则, 而是通过netlink 创建ipvs规则,并使用k8s Service与Endpoints信息,对所在节点的ipvs规则进行定期同步; netlink 与 iptables 底层都是基于 netfilter 钩子,但是 netlink 由于采用了 hash table 而且直接工作在内核态,在性能上比 iptables 更优。 64 | 65 | 66 | 67 | **ipvsadm运行在用户态,提供简单的CLI接口进行ipvs配置。** 68 | 69 | 由于ipvs工作在内核态,直接基于内核处理包转发,所以最大的特点就是性能非常好。 70 | 71 | 72 | 73 | ipvs模式下 kubeproxy工作: 74 | 75 | 简单来说kube-proxy主要在所有的Node节点做如下三件事: 76 | 77 | 1. 如果没有dummy类型虚拟网卡,则创建一个,默认名称为`kube-ipvs0`; 78 | 2. 把Kubernetes ClusterIP地址添加到`kube-ipvs0`,同时添加到ipset中。 79 | 3. 创建ipvs service,ipvs service地址为ClusterIP以及Cluster Port,ipvs server为所有的Endpoint地址,即Pod IP及端口。 80 | 81 | #### K8s中的kube-proxy实现 82 | 83 | 这里重点是 **[proxier 对象实例化](*[https://github.com/kubernetes/kubernetes/blob/v1.18.2/cmd/kube-proxy/app/server_others.go#L307-L336](https://link.zhihu.com/?target=https%3A//github.com/kubernetes/kubernetes/blob/v1.18.2/cmd/kube-proxy/app/server_others.go%23L307-L336)*)** ,它会调用 ipvs 包的 **[实例化逻辑](*[https://github.com/kubernetes/kubernetes/blob/v1.18.2/pkg/proxy/ipvs/proxier.go#L319-L482](https://link.zhihu.com/?target=https%3A//github.com/kubernetes/kubernetes/blob/v1.18.2/pkg/proxy/ipvs/proxier.go%23L319-L482)*)** , 84 | 85 | \* **[syncProxyRules() 函数](*[https://github.com/kubernetes/kubernetes/blob/v1.18.2/pkg/proxy/ipvs/proxier.go#L989-L1626](https://link.zhihu.com/?target=https%3A//github.com/kubernetes/kubernetes/blob/v1.18.2/pkg/proxy/ipvs/proxier.go%23L989-L1626)*)** 这六百多行代码是整个 kube-proxy 模块的最核心的逻辑,会把用户创建的 service 转换为 ipvs rules,然后调用 **[ipvs go 客户端](*[https://github.com/kubernetes/kubernetes/blob/v1.18.2/pkg/util/ipvs/ipvs.go](https://link.zhihu.com/?target=https%3A//github.com/kubernetes/kubernetes/blob/v1.18.2/pkg/util/ipvs/ipvs.go)*)** 写入内核中。 86 | 这里会根据每一个 service 去构建 **[ipvs rules](*[https://github.com/kubernetes/kubernetes/blob/v1.18.2/pkg/proxy/ipvs/proxier.go#L1115-L1540](https://link.zhihu.com/?target=https%3A//github.com/kubernetes/kubernetes/blob/v1.18.2/pkg/proxy/ipvs/proxier.go%23L1115-L1540)*)** 。 87 | 88 | ![image-20230420170733340](./assets/image-20230420170733340.png) 89 | 90 | 写flannel的虚拟ip是不是就能找到 91 | 92 | 得到一个service 的虚拟ip,加入一条规则,实现service ip -> pod ip映射。这里的pod ip是不是是虚拟的flannel ip就行? 93 | 94 | ?谁来为service分配虚拟ip 95 | 96 | ### 环境配置 97 | 98 | 1. 下载ipvsadm,便于调试 99 | 100 | `apt install ipvsadm` 101 | 102 | 1. 打开ipvs的conntrack 103 | 104 | ```undefined 105 | sysctl net.ipv4.vs.conntrack=1 106 | ``` 107 | 108 | 2. 添加一个虚拟ip 109 | 110 | ```csharp 111 | ipvsadm -A -t 10.10.0.1:8410 -s rr 112 | ``` 113 | 114 | 3. 把虚拟ip地址添到本地flannel.1网卡 115 | 116 | ```csharp 117 | ip addr add 10.10.0.1/24 dev flannel.1 118 | ``` 119 | 120 | 4. 为虚拟ip添加end point(真正提供服务的节点) 121 | 122 | ```cpp 123 | ipvsadm -a -t 10.10.0.1:8410 -r 10.2.17.53:12345 -m 124 | ``` 125 | 126 | 5. 添加SNAT功能 127 | 128 | ```shell 129 | iptables -t nat -A POSTROUTING -m ipvs --vaddr 10.10.0.1 --vport 8410 -j MASQUERADE 130 | ``` 131 | 132 | 删除命令:`ipvsadm -D -t 10.10.0.1:8410` 133 | 134 | 查看所有规则:`ipvsadm -Ln` -------------------------------------------------------------------------------- /doc/scheduler.md: -------------------------------------------------------------------------------- 1 | # Scheduler 2 | 3 | ## Overview 4 | 5 | kube-scheduler 是 Kubernetes 控制平面的一个组件,负责为新创建的 Pod 选择一个合适的 Node 节点来运行。当 Kubernetes API Server 接收到创建 Pod 的请求时,会将该请求发送给 kube-scheduler,kube-scheduler 将根据一些规则和条件为该 Pod 分配一个合适的 Node。 6 | 7 | kube-scheduler 通过以下几个步骤来为 Pod 分配 Node: 8 | 9 | 获取 Pod 的调度要求:kube-scheduler 从 Kubernetes API Server 中获取 Pod 的调度要求,包括 Pod 所需的 CPU、内存等资源,以及 Pod 的亲和性和反亲和性规则等。 10 | 11 | 执行策略:kube-scheduler 将会执行一些策略来为 Pod 选择一个 Node,例如默认策略、负载均衡策略、亲和性策略和节点亲和性策略等。 12 | 13 | 筛选 Node:kube-scheduler 将基于 Pod 调度要求和策略对集群中的每个 Node 进行筛选,以找到满足 Pod 调度要求的可用 Node。 14 | 15 | 评分和排序:kube-scheduler 会对可用的 Node 进行评分和排序,以找到最适合运行该 Pod 的 Node。kube-scheduler 根据节点资源使用情况、节点亲和性和反亲和性规则等因素对每个 Node 进行评分,然后选择最高评分的 Node。 16 | 17 | 绑定 Pod 和 Node:kube-scheduler 选择了最适合运行该 Pod 的 Node 之后,会向 Kubernetes API Server 发送一个绑定请求,将该 Pod 绑定到所选的 Node 上,Kubernetes API Server 将更新该 Pod 的状态并通知 kubelet 在相应的 Node 上创建并运行该 Pod。 18 | 19 | ## Pod的调度需求 20 | 21 | Pod 的调度需求可以通过 Pod 的配置信息来体现。以下是一些 Pod 配置中可以影响 Pod 调度需求的字段和它们的举例: 22 | 23 | 资源需求(Resource Requirements):Pod 可以通过容器的资源需求来指定它需要的 CPU 和内存资源。这些需求可以帮助 Kubernetes 调度器决定将 Pod 调度到哪个节点上。 24 | 举例: 25 | 26 | ```yaml 27 | apiVersion: v1 28 | kind: Pod 29 | metadata: 30 | name: nginx-pod 31 | spec: 32 | containers: 33 | - name: nginx 34 | image: nginx 35 | resources: 36 | requests: 37 | memory: "64Mi" 38 | cpu: "250m" 39 | limits: 40 | memory: "128Mi" 41 | cpu: "500m" 42 | 43 | ``` 44 | 45 | 在上面的示例中,容器 nginx 的资源需求分别为 250m CPU 和 64Mi 内存,而其资源限制分别为 500m CPU 和 128Mi 内存。 46 | 47 | 调度限制(Node Selector):Pod 可以通过配置调度限制来规定哪些节点可以或不能调度该 Pod。这些限制可以基于节点的标签、容量或亲和性等属性。 48 | 举例: 49 | 50 | ```yaml 51 | apiVersion: v1 52 | kind: Pod 53 | metadata: 54 | name: nginx-pod 55 | spec: 56 | nodeSelector: 57 | disktype: ssd 58 | containers: 59 | - name: nginx 60 | image: nginx 61 | ``` 62 | 63 | 在上面的示例中,Pod nginx-pod 的调度限制是其节点必须有一个 disktype 标签值为 ssd。 64 | 65 | 亲和性和反亲和性(Affinity and Anti-Affinity):Pod 可以通过配置亲和性和反亲和性规则来指定它应该调度到哪个节点上或不能调度到哪个节点上。这些规则可以基于节点的标签、容量或已运行的 Pod 等属性。 66 | 举例: 67 | 68 | ```yaml 69 | apiVersion: v1 70 | kind: Pod 71 | metadata: 72 | name: nginx-pod 73 | spec: 74 | affinity: 75 | nodeAffinity: 76 | requiredDuringSchedulingIgnoredDuringExecution: 77 | nodeSelectorTerms: 78 | - matchExpressions: 79 | - key: node-role.kubernetes.io/worker 80 | operator: Exists 81 | containers: 82 | - name: nginx 83 | image: nginx 84 | ``` 85 | 86 | 在上面的示例中,Pod nginx-pod 要求调度到标记有 node-role.kubernetes.io/worker 标签的节点上。 87 | 88 | 容器亲和性和反亲和性(Affinity and Anti-Affinity):Pod 中的容器可以通过配置亲和性和反亲和性规则来指定它应该调度到哪个节点上或不能调度到哪个节点上。这些规则可以基于节点的标签、容量或已运行的容器等属性。 89 | 举例: 90 | 91 | ```yaml 92 | apiVersion: v1 93 | kind: Pod 94 | metadata: 95 | name: nginx-pod 96 | spec: 97 | containers: 98 | - name: nginx 99 | image: nginx 100 | resources: 101 | requests: 102 | memory: "64Mi" 103 | cpu: "250m" 104 | env: 105 | - name: ZONE 106 | value: "eu-west-1a" 107 | affinity: 108 | podAffinity: 109 | requiredDuringSchedulingIgnoredDuringExecution: 110 | - labelSelector: 111 | matchExpressions: 112 | - key: app 113 | operator: In 114 | values: 115 | - nginx 116 | topologyKey: "kubernetes.io/hostname" 117 | ``` 118 | 119 | 在上面的示例中,Pod 中的容器 nginx 只能调度到在 eu-west-1a 区域中且已经运行了一个 app=nginx 标签的 Pod 的节点上。 120 | 121 | 调度器扩展程序(Scheduler Extender):通过实现调度器扩展程序可以在 Kubernetes 默认调度器的基础上增加一些额外的调度算法或逻辑。扩展程序可以接收到调度请求并决定该请求应该被哪个节点处理。 122 | 举例: 123 | 124 | 调度器扩展程序可以基于节点的健康状况或其他第三方条件来决定节点是否适合调度某个 Pod。 125 | 126 | 总之,Pod 的调度需求可以通过多种方式体现在 Pod 的配置中,调度器会根据这些需求来选择最合适的节点来运行 Pod。 127 | 128 | ### 目前支持的filter策略 129 | 130 | #### configfilter 131 | 132 | 1. 如果Pod的`NodeSelector`字段不为空,首先用这个字段与Node的`MetaData`的`Label`进行匹配 133 | 2. 如果Pod的`Resources`不为空,用这个字段匹配`NodeStatus`中的`Allocatable`,判断是否满足(注意,如果Node相应的字段为空的话,这里不会过滤掉) 134 | 3. 目前不支持亲和性 135 | 136 | ## scheduler的具体操作 137 | ### Bind 138 | 在 Kubernetes 的 node config 中,节点的 IP 地址记录在 status.addresses 数组中的 type 字段为 InternalIP 的元素的 address 字段中。 139 | 在Bind的时候,会将node的status.addresses.InternalIP对应的Address 记录在pod的HostIp 中 140 | 141 | ## 测试 142 | ### 准备工作 143 | 1. register对应的node 144 | 2. 对于这些node进行watch 145 | 如果是使用postman里面的demo的话,命令如下: 146 | ```shell 147 | wscat -H "X-Source: test-node1" -c ws://localhost:8080/api/v1/watch/pods 148 | wscat -H "X-Source: test-node2" -c ws://localhost:8080/api/v1/watch/pods 149 | wscat -H "X-Source: test-node3" -c ws://localhost:8080/api/v1/watch/pods 150 | wscat -H "X-Source: test-node4" -c ws://localhost:8080/api/v1/watch/pods 151 | ``` -------------------------------------------------------------------------------- /doc/要求文档.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IPADSIntern-MiniK8s/MiniK8s/610a8a0a09d3988fecdc60e0cdf69d3030cb9d82/doc/要求文档.pdf -------------------------------------------------------------------------------- /example/dns/dnspod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: dnspod 5 | namespace: default 6 | labels: 7 | app: dns-test 8 | spec: 9 | containers: 10 | - name: c1 11 | image: docker.io/mcastelino/nettools 12 | ports: 13 | - name: p1 14 | containerPort: 22222 15 | protocol: TCP 16 | command: 17 | - /root/test_mount/simple_http_server 18 | env: 19 | - name: port 20 | value: '22222' 21 | volumeMounts: 22 | - name: test-volume 23 | mountPath: /root/test_mount 24 | - name: c2 25 | image: docker.io/mcastelino/nettools 26 | ports: 27 | - name: p1 28 | containerPort: 23456 29 | protocol: TCP 30 | command: 31 | - /root/test_mount/test_network 32 | env: 33 | - name: port 34 | value: '23456' 35 | volumeMounts: 36 | - name: test-volume 37 | mountPath: /root/test_mount 38 | volumes: 39 | - name: test-volume 40 | hostPath: 41 | path: /home/test_mount -------------------------------------------------------------------------------- /example/dns/dnspod2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: dnspod2 5 | namespace: default 6 | labels: 7 | app: dns-test2 8 | spec: 9 | containers: 10 | - name: c1 11 | image: docker.io/mcastelino/nettools 12 | ports: 13 | - name: p1 14 | containerPort: 12345 15 | protocol: TCP 16 | command: 17 | - /root/test_mount/test_network 18 | env: 19 | - name: port 20 | value: '12345' 21 | volumeMounts: 22 | - name: test-volume 23 | mountPath: /root/test_mount 24 | - name: c2 25 | image: docker.io/mcastelino/nettools 26 | ports: 27 | - name: p2 28 | containerPort: 23456 29 | protocol: TCP 30 | command: 31 | - /root/test_mount/test_network 32 | env: 33 | - name: port 34 | value: '23456' 35 | volumeMounts: 36 | - name: test-volume 37 | mountPath: /root/test_mount 38 | volumes: 39 | - name: test-volume 40 | hostPath: 41 | path: /home/test_mount -------------------------------------------------------------------------------- /example/dns/dnsrecord.yaml: -------------------------------------------------------------------------------- 1 | kind: dnsrecord 2 | apiVersion: app/v1 3 | name: dns-test1 4 | namespace: default 5 | host: minik8s.com 6 | paths: 7 | - service: dns-service 8 | pathName: path1 9 | port: 22222 10 | - service: dns-service2 11 | pathName: path2 12 | port: 23456 13 | -------------------------------------------------------------------------------- /example/dns/dnsservice.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: dns-service 5 | namespace: default 6 | spec: 7 | type: ClusterIP 8 | selector: 9 | app: dns-test 10 | ports: 11 | - name: service-port1 12 | port: 22222 13 | targetPort: p1 14 | protocol: TCP -------------------------------------------------------------------------------- /example/dns/dnsservice2.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: dns-service2 5 | namespace: default 6 | spec: 7 | type: ClusterIP 8 | selector: 9 | app: dns-test2 10 | ports: 11 | - name: service-port2 12 | port: 23456 13 | targetPort: p2 14 | protocol: TCP -------------------------------------------------------------------------------- /example/hpa/hpa-new.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: HPA 3 | metadata: 4 | name: hpa-practice 5 | spec: 6 | minReplicas: 2 # 最小pod数量 7 | maxReplicas: 5 # 最大pod数量 8 | metrics: 9 | - resource: 10 | name: "memory" 11 | target: 12 | averageValue: 100000000 13 | type: AverageValue 14 | type: Resource 15 | - resource: 16 | name: "cpu" 17 | target: 18 | averageValue: 100 19 | type: AverageValue 20 | type: Resource 21 | scaleTargetRef: # 指定要控制的deploy 22 | apiVersion: apps/v1 23 | kind: replicas 24 | name: replica-practice 25 | behavior: 26 | scaleUp: 27 | policies: 28 | - type: Pods 29 | value: 8 30 | periodSeconds: 60 # 每分钟最多8 31 | scaleDown: 32 | policies: 33 | - type: Percent 34 | value: 10 35 | periodSeconds: 60 # 每分钟最多10% 36 | stabilizationWindowSeconds: 30 -------------------------------------------------------------------------------- /example/hpa/hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: HPA 3 | metadata: 4 | name: hpa-practice 5 | spec: 6 | minReplicas: 2 # 最小pod数量 7 | maxReplicas: 5 # 最大pod数量 8 | metrics: 9 | - resource: 10 | name: "memory" 11 | target: 12 | averageUtilization: 99 13 | type: Utilization 14 | type: Resource 15 | - resource: 16 | name: "cpu" 17 | target: 18 | averageValue: 1000 19 | type: AverageValue 20 | type: Resource 21 | scaleTargetRef: # 指定要控制的deploy 22 | apiVersion: apps/v1 23 | kind: replicas 24 | name: replica-practice 25 | behavior: 26 | scaleUp: 27 | policies: 28 | - type: Pods 29 | value: 8 30 | periodSeconds: 60 # 每分钟最多10% 31 | scaleDown: 32 | policies: 33 | - type: Percent 34 | value: 10 35 | periodSeconds: 60 # 每分钟最多10% 36 | stabilizationWindowSeconds: 30 -------------------------------------------------------------------------------- /example/hpa/replica.yaml: -------------------------------------------------------------------------------- 1 | kind: Replica 2 | apiVersion: apps/v1 3 | metadata: 4 | name: replica-practice 5 | spec: 6 | replicas: 1 7 | selector: 8 | app: replica-practice 9 | template: 10 | metadata: 11 | labels: 12 | app: replica-practice 13 | spec: 14 | containers: 15 | - name: server 16 | image: docker.io/mcastelino/nettools 17 | ports: 18 | - name: p1 # 端口名称 19 | containerPort: 8080 # 容器端口 20 | command: 21 | - /root/test_mount/test_resource 22 | env: 23 | - name: port 24 | value: '8080' 25 | volumeMounts: 26 | - name: data 27 | mountPath: /root/test_mount 28 | resources: 29 | limits: 30 | cpu: "1" 31 | volumes: 32 | - name: data 33 | hostPath: 34 | path: /home/test_mount -------------------------------------------------------------------------------- /example/hpa/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: service-practice 5 | spec: 6 | selector: 7 | app: replica-practice 8 | type: ClusterIP 9 | ports: 10 | - name: service-port1 11 | protocol: TCP 12 | port: 32345 # 对外暴露的端口 13 | targetPort: p1 # 转发的端口,pod对应的端口 -------------------------------------------------------------------------------- /example/hpa/utilization/hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: HPA 3 | metadata: 4 | name: hpa-practice 5 | spec: 6 | minReplicas: 2 # 最小pod数量 7 | maxReplicas: 5 # 最大pod数量 8 | metrics: 9 | - resource: 10 | name: "memory" 11 | target: 12 | averageUtilization: 99 13 | type: Utilization 14 | type: Resource 15 | - resource: 16 | name: "cpu" 17 | target: 18 | averageValue: 1000 19 | type: AverageValue 20 | type: Resource 21 | scaleTargetRef: # 指定要控制的deploy 22 | apiVersion: apps/v1 23 | kind: replicas 24 | name: replica-practice 25 | behavior: 26 | scaleUp: 27 | policies: 28 | - type: Pods 29 | value: 8 30 | periodSeconds: 60 # 每分钟最多10% 31 | scaleDown: 32 | policies: 33 | - type: Percent 34 | value: 10 35 | periodSeconds: 60 # 每分钟最多10% 36 | stabilizationWindowSeconds: 30 -------------------------------------------------------------------------------- /example/hpa/utilization/replica.yaml: -------------------------------------------------------------------------------- 1 | kind: Replica 2 | apiVersion: apps/v1 3 | metadata: 4 | name: replica-practice 5 | spec: 6 | replicas: 1 7 | selector: 8 | app: replica-practice 9 | template: 10 | metadata: 11 | labels: 12 | app: replica-practice 13 | spec: 14 | containers: 15 | - name: server 16 | image: docker.io/mcastelino/nettools 17 | ports: 18 | - name: p1 # 端口名称 19 | containerPort: 8080 # 容器端口 20 | command: 21 | - /root/test_mount/test_resource 22 | env: 23 | - name: port 24 | value: '8080' 25 | volumeMounts: 26 | - name: data 27 | mountPath: /root/test_mount 28 | resources: 29 | limits: 30 | cpu: "1" 31 | volumes: 32 | - name: data 33 | hostPath: 34 | path: /home/test_mount -------------------------------------------------------------------------------- /example/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-practice 5 | labels: 6 | app: replica-practice 7 | spec: 8 | containers: 9 | - name: server 10 | image: docker.io/mcastelino/nettools 11 | ports: 12 | - name: p1 # 端口名称 13 | containerPort: 8080 # 容器端口 14 | command: 15 | - /root/test_mount/simple_http_server 16 | env: 17 | - name: port 18 | value: '8080' 19 | volumeMounts: 20 | - name: data 21 | mountPath: /root/test_mount 22 | volumes: 23 | - name: data 24 | hostPath: 25 | path: /home/test_mount -------------------------------------------------------------------------------- /example/pod/demo-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod 5 | namespace: demo 6 | spec: 7 | containers: 8 | - name: c1 9 | image: docker.io/mcastelino/nettools 10 | ports: 11 | - containerPort: 12345 12 | command: 13 | - /root/test_mount/test_network 14 | env: 15 | - name: port 16 | value: '12345' 17 | volumeMounts: 18 | - name: test-volume 19 | mountPath: /root/test_mount 20 | - name: c2 21 | image: docker.io/mcastelino/nettools 22 | ports: 23 | - containerPort: 23456 24 | command: 25 | - /root/test_mount/test_network 26 | env: 27 | - name: port 28 | value: '23456' 29 | volumeMounts: 30 | - name: test-volume 31 | mountPath: /root/test_mount 32 | volumes: 33 | - name: test-volume 34 | hostPath: 35 | path: /home/test_mount 36 | 37 | -------------------------------------------------------------------------------- /example/pod/demo-pod2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod2 5 | namespace: demo 6 | spec: 7 | containers: 8 | - name: c1 9 | image: docker.io/mcastelino/nettools 10 | command: 11 | - /root/test_mount/test_cpu 12 | volumeMounts: 13 | - name: test-volume 14 | mountPath: /root/test_mount 15 | resources: 16 | limits: 17 | cpu: "0.5" 18 | memory: "100Mi" 19 | - name: c2 20 | image: docker.io/mcastelino/nettools 21 | command: 22 | - /root/test_mount/test_memory 23 | volumeMounts: 24 | - name: test-volume 25 | mountPath: /root/test_mount 26 | resources: 27 | limits: 28 | cpu: "0.5" 29 | memory: "100Mi" 30 | volumes: 31 | - name: test-volume 32 | hostPath: 33 | path: /home/test_mount 34 | 35 | -------------------------------------------------------------------------------- /example/pod/demo-pod4.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod4 5 | namespace: demo 6 | spec: 7 | nodeSelector: 8 | env: test 9 | containers: 10 | - name: c1 11 | image: docker.io/mcastelino/nettools 12 | ports: 13 | - containerPort: 12345 14 | command: 15 | - /root/test_mount/test_network 16 | env: 17 | - name: port 18 | value: '12345' 19 | volumeMounts: 20 | - name: test-volume 21 | mountPath: /root/test_mount 22 | - name: c2 23 | image: docker.io/mcastelino/nettools 24 | ports: 25 | - containerPort: 23456 26 | command: 27 | - /root/test_mount/test_network 28 | env: 29 | - name: port 30 | value: '23456' 31 | volumeMounts: 32 | - name: test-volume 33 | mountPath: /root/test_mount 34 | volumes: 35 | - name: test-volume 36 | hostPath: 37 | path: /home/test_mount 38 | 39 | -------------------------------------------------------------------------------- /example/pod/simple_http_server.go: -------------------------------------------------------------------------------- 1 | 2 | package main 3 | 4 | import ( 5 | "fmt" 6 | "net" 7 | "net/http" 8 | "os" 9 | "strings" 10 | "io" 11 | ) 12 | 13 | func main() { 14 | addrs, err := net.InterfaceAddrs() 15 | if err != nil { 16 | fmt.Println(err) 17 | return 18 | } 19 | 20 | var ip string 21 | for _, addr := range addrs { 22 | //fmt.Println(addr) 23 | addrStr := fmt.Sprintf("%v",addr) 24 | if strings.Contains(addrStr,"/24"){ 25 | ip = addrStr 26 | } 27 | } 28 | port:=os.Getenv("port") 29 | http.HandleFunc("/",func(w http.ResponseWriter,request *http.Request){io.WriteString(w,"i'm "+ip+"\n")}) 30 | _ = http.ListenAndServe(":"+port, nil) 31 | } 32 | -------------------------------------------------------------------------------- /example/pod/test_cpu.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | func main() { 4 | go func() { 5 | for { 6 | } 7 | }() 8 | for { 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /example/pod/test_memory.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | func main(){ 8 | var list = make([]uint64,1,1) 9 | var i uint64 10 | time.Sleep(time.Second*10) 11 | //10M*8byte totally cost >100M memory 12 | for i=0;i<10000000;i++{ 13 | list = append(list,i) 14 | } 15 | time.Sleep(time.Duration(120)*time.Second) 16 | } 17 | -------------------------------------------------------------------------------- /example/pod/test_network.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "os" 6 | "net/http" 7 | ) 8 | 9 | func main() { 10 | port:=os.Getenv("port") 11 | http.HandleFunc("/",func(w http.ResponseWriter,request *http.Request){io.WriteString(w,"http connect success\n")}) 12 | _ = http.ListenAndServe(":"+port, nil) 13 | } 14 | -------------------------------------------------------------------------------- /example/replica.yaml: -------------------------------------------------------------------------------- 1 | kind: Replica 2 | apiVersion: apps/v1 3 | metadata: 4 | name: replica-practice1 5 | spec: 6 | replicas: 3 7 | selector: 8 | app: replica-practice 9 | template: 10 | metadata: 11 | labels: 12 | app: replica-practice 13 | spec: 14 | containers: 15 | - name: server 16 | image: docker.io/mcastelino/nettools 17 | ports: 18 | - name: p1 # 端口名称 19 | containerPort: 8080 # 容器端口 20 | command: 21 | - /root/test_mount/simple_http_server 22 | env: 23 | - name: port 24 | value: '8080' 25 | volumeMounts: 26 | - name: data 27 | mountPath: /root/test_mount 28 | volumes: 29 | - name: data 30 | hostPath: 31 | path: /home/test_mount 32 | -------------------------------------------------------------------------------- /example/rs/demo-pod2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod2 5 | labels: 6 | app: replica-practice 7 | spec: 8 | containers: 9 | - name: c1 10 | image: docker.io/mcastelino/nettools 11 | command: 12 | - /root/test_mount/test_cpu 13 | volumeMounts: 14 | - name: test-volume 15 | mountPath: /root/test_mount 16 | resources: 17 | limits: 18 | cpu: "0.5" 19 | memory: "100Mi" 20 | - name: c2 21 | image: docker.io/mcastelino/nettools 22 | command: 23 | - /root/test_mount/test_memory 24 | volumeMounts: 25 | - name: test-volume 26 | mountPath: /root/test_mount 27 | resources: 28 | limits: 29 | cpu: "0.5" 30 | memory: "100Mi" 31 | volumes: 32 | - name: test-volume 33 | hostPath: 34 | path: /home/test_mount 35 | 36 | -------------------------------------------------------------------------------- /example/rs/replica.yaml: -------------------------------------------------------------------------------- 1 | kind: Replica 2 | apiVersion: apps/v1 3 | metadata: 4 | name: replica-practice 5 | spec: 6 | replicas: 3 7 | selector: 8 | app: replica-practice 9 | template: 10 | metadata: 11 | labels: 12 | app: replica-practice 13 | spec: 14 | containers: 15 | - name: server 16 | image: docker.io/mcastelino/nettools 17 | ports: 18 | - name: p1 # 端口名称 19 | containerPort: 8080 # 容器端口 20 | command: 21 | - /root/test_mount/simple_http_server 22 | env: 23 | - name: port 24 | value: '8080' 25 | volumeMounts: 26 | - name: data 27 | mountPath: /root/test_mount 28 | volumes: 29 | - name: data 30 | hostPath: 31 | path: /home/test_mount -------------------------------------------------------------------------------- /example/serverless/add.py: -------------------------------------------------------------------------------- 1 | def run(x, y): 2 | z = x + y 3 | return {"z": z, "x": x, "y": y} -------------------------------------------------------------------------------- /example/serverless/addfunc.yaml: -------------------------------------------------------------------------------- 1 | kind: function 2 | apiVersion: app/v1 3 | name: getsum 4 | path: /home/mini-k8s/example/serverless/add.py -------------------------------------------------------------------------------- /example/serverless/diff.py: -------------------------------------------------------------------------------- 1 | def run(x, y, z): 2 | w = x - y 3 | return {"str": "the sum of the two numbers is: " + str(z), "w": w} -------------------------------------------------------------------------------- /example/serverless/difffunc.yaml: -------------------------------------------------------------------------------- 1 | kind: function 2 | apiVersion: app/v1 3 | name: getdiff 4 | path: /home/mini-k8s/example/serverless/diff.py -------------------------------------------------------------------------------- /example/serverless/mutlicall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # define the serverIp 4 | # send 3 requests to the server in a loop 5 | # bewteen each request, sleep 0.5 seconds 6 | # the parameter are x and y, in each loop x increase 1, y decrease 1 7 | cd ../../build/bin/ 8 | for i in {1..4}; do 9 | ./kubectl trigger function test -f /home/mini-k8s/example/serverless/param.yaml >> ../log/output.log & 10 | done -------------------------------------------------------------------------------- /example/serverless/param.yaml: -------------------------------------------------------------------------------- 1 | 'x': 2 2 | 'y': 3 -------------------------------------------------------------------------------- /example/serverless/printdiff.py: -------------------------------------------------------------------------------- 1 | def run(w): 2 | return {"str": "the difference of the two numbers is: " + str(w)} -------------------------------------------------------------------------------- /example/serverless/printdifffunc.yaml: -------------------------------------------------------------------------------- 1 | kind: function 2 | apiVersion: app/v1 3 | name: printdiff 4 | path: /home/mini-k8s/example/serverless/printdiff.py -------------------------------------------------------------------------------- /example/serverless/printsum.py: -------------------------------------------------------------------------------- 1 | def run(z): 2 | return {"str" : "the sum of the two numbers is: " + str(z)} -------------------------------------------------------------------------------- /example/serverless/printsumfunc.yaml: -------------------------------------------------------------------------------- 1 | kind: function 2 | apiVersion: app/v1 3 | name: printsum 4 | path: /home/mini-k8s/example/serverless/printsum.py -------------------------------------------------------------------------------- /example/serverless/singlefunc.py: -------------------------------------------------------------------------------- 1 | def run(x, y): 2 | z = x + y 3 | x = x - y 4 | y = y - x 5 | print(z) 6 | return x, y, z -------------------------------------------------------------------------------- /example/serverless/singlefunc.yaml: -------------------------------------------------------------------------------- 1 | kind: function 2 | apiVersion: app/v1 3 | name: test 4 | path: /home/mini-k8s/example/serverless/singlefunc.py -------------------------------------------------------------------------------- /example/serverless/workflow.yaml: -------------------------------------------------------------------------------- 1 | kind: workflow 2 | apiVersion: v1 3 | name: workflow-exp 4 | startAt: getsum 5 | states: 6 | getdiff: 7 | type: Task 8 | inputPath: $.x,$.y,$.z 9 | next: printdiff 10 | getsum: 11 | type: Task 12 | inputPath: $.x,$.y 13 | next: judgesum 14 | judgesum: 15 | type: Choice 16 | choices: 17 | - variable: $.z 18 | NumericGreaterThan: 5 19 | next: printsum 20 | - variable: $.z 21 | NumericLessThan: 5 22 | next: getdiff 23 | default: printerror 24 | printdiff: 25 | type: Task 26 | inputPath: $.w 27 | outputPath: $.str 28 | end: true 29 | printerror: 30 | type: Fail 31 | error: DefaultStateError 32 | cause: No Matches! 33 | printsum: 34 | type: Task 35 | inputPath: $.z 36 | outputPath: $.str 37 | end: true 38 | comment: An example of basic workflow. -------------------------------------------------------------------------------- /example/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: service-practice 5 | spec: 6 | selector: 7 | app: replica-practice 8 | type: ClusterIP 9 | ports: 10 | - name: service-port1 11 | protocol: TCP 12 | port: 6692 # 对外暴露的端口 13 | targetPort: p1 # 转发的端口,pod对应的端口 -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module minik8s 2 | 3 | go 1.19 4 | 5 | require ( 6 | bou.ke/monkey v1.0.2 7 | github.com/containerd/cgroups v1.1.0 8 | github.com/containerd/containerd v1.7.0 9 | github.com/ghodss/yaml v1.0.0 10 | github.com/gin-gonic/gin v1.9.0 11 | github.com/gogo/protobuf v1.3.2 12 | github.com/gorilla/websocket v1.5.0 13 | github.com/liushuochen/gotable v0.0.0-20221119160816-1113793e7092 14 | github.com/mqliang/libipvs v0.0.0-20230109035226-02d9e44c145f 15 | github.com/opencontainers/runtime-spec v1.1.0-rc.1 16 | github.com/robfig/cron v1.2.0 17 | github.com/sirupsen/logrus v1.9.0 18 | github.com/spf13/cobra v1.7.0 19 | github.com/spf13/viper v1.15.0 20 | github.com/tidwall/gjson v1.14.4 21 | github.com/wxnacy/wgo v1.0.4 22 | go.etcd.io/etcd v2.3.8+incompatible 23 | ) 24 | 25 | require ( 26 | github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect 27 | github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652 // indirect 28 | github.com/Microsoft/go-winio v0.6.0 // indirect 29 | github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect 30 | github.com/bytedance/sonic v1.8.7 // indirect 31 | github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect 32 | github.com/containerd/continuity v0.3.0 // indirect 33 | github.com/containerd/fifo v1.1.0 // indirect 34 | github.com/containerd/ttrpc v1.2.1 // indirect 35 | github.com/containerd/typeurl/v2 v2.1.0 // indirect 36 | github.com/coreos/etcd v2.3.8+incompatible // indirect 37 | github.com/cyphar/filepath-securejoin v0.2.3 // indirect 38 | github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect 39 | github.com/fsnotify/fsnotify v1.6.0 // indirect 40 | github.com/gin-contrib/sse v0.1.0 // indirect 41 | github.com/go-logr/logr v1.2.3 // indirect 42 | github.com/go-logr/stdr v1.2.2 // indirect 43 | github.com/go-playground/locales v0.14.1 // indirect 44 | github.com/go-playground/universal-translator v0.18.1 // indirect 45 | github.com/go-playground/validator/v10 v10.12.0 // indirect 46 | github.com/goccy/go-json v0.10.2 // indirect 47 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 48 | github.com/golang/protobuf v1.5.2 // indirect 49 | github.com/google/go-cmp v0.5.9 // indirect 50 | github.com/google/uuid v1.3.0 // indirect 51 | github.com/hashicorp/hcl v1.0.0 // indirect 52 | github.com/hkwi/nlgo v0.0.0-20190926025335-08733afbfe04 // indirect 53 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 54 | github.com/json-iterator/go v1.1.12 // indirect 55 | github.com/klauspost/compress v1.16.0 // indirect 56 | github.com/klauspost/cpuid/v2 v2.2.4 // indirect 57 | github.com/leodido/go-urn v1.2.3 // indirect 58 | github.com/magiconair/properties v1.8.7 // indirect 59 | github.com/mattn/go-isatty v0.0.18 // indirect 60 | github.com/mitchellh/mapstructure v1.5.0 // indirect 61 | github.com/moby/locker v1.0.1 // indirect 62 | github.com/moby/sys/mountinfo v0.6.2 // indirect 63 | github.com/moby/sys/sequential v0.5.0 // indirect 64 | github.com/moby/sys/signal v0.7.0 // indirect 65 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 66 | github.com/modern-go/reflect2 v1.0.2 // indirect 67 | github.com/opencontainers/go-digest v1.0.0 // indirect 68 | github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect 69 | github.com/opencontainers/runc v1.1.4 // indirect 70 | github.com/opencontainers/selinux v1.11.0 // indirect 71 | github.com/pelletier/go-toml/v2 v2.0.7 // indirect 72 | github.com/pkg/errors v0.9.1 // indirect 73 | github.com/spf13/afero v1.9.3 // indirect 74 | github.com/spf13/cast v1.5.0 // indirect 75 | github.com/spf13/jwalterweatherman v1.1.0 // indirect 76 | github.com/spf13/pflag v1.0.5 // indirect 77 | github.com/subosito/gotenv v1.4.2 // indirect 78 | github.com/tidwall/match v1.1.1 // indirect 79 | github.com/tidwall/pretty v1.2.0 // indirect 80 | github.com/twitchyliquid64/golang-asm v0.15.1 // indirect 81 | github.com/ugorji/go/codec v1.2.11 // indirect 82 | go.opencensus.io v0.24.0 // indirect 83 | go.opentelemetry.io/otel v1.14.0 // indirect 84 | go.opentelemetry.io/otel/trace v1.14.0 // indirect 85 | golang.org/x/arch v0.3.0 // indirect 86 | golang.org/x/crypto v0.8.0 // indirect 87 | golang.org/x/mod v0.10.0 // indirect 88 | golang.org/x/net v0.10.0 // indirect 89 | golang.org/x/sync v0.2.0 // indirect 90 | golang.org/x/sys v0.8.0 // indirect 91 | golang.org/x/text v0.9.0 // indirect 92 | golang.org/x/tools v0.9.1 // indirect 93 | google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect 94 | google.golang.org/grpc v1.53.0 // indirect 95 | google.golang.org/protobuf v1.30.0 // indirect 96 | gopkg.in/ini.v1 v1.67.0 // indirect 97 | gopkg.in/yaml.v2 v2.4.0 // indirect 98 | gopkg.in/yaml.v3 v3.0.1 // indirect 99 | ) 100 | -------------------------------------------------------------------------------- /gpu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | 3 | RUN apt-get update 4 | RUN apt-get -y install openssh-server python3-pip vim 5 | 6 | RUN pip3 install paramiko scp 7 | 8 | WORKDIR /root 9 | 10 | RUN mkdir .ssh 11 | 12 | COPY ./id_rsa .ssh/id_rsa 13 | 14 | COPY ./known_hosts .ssh/known_hosts 15 | 16 | COPY ./job.py job.py 17 | -------------------------------------------------------------------------------- /gpu/files.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | void read_values_from_file(const char * file, double * data, size_t size) { 9 | std::ifstream values(file, std::ios::binary); 10 | values.read(reinterpret_cast(data), size); 11 | values.close(); 12 | } 13 | 14 | void write_values_to_file(const char * file, double * data, size_t size) { 15 | std::ofstream values(file, std::ios::binary); 16 | values.write(reinterpret_cast(data), size); 17 | values.close(); 18 | } 19 | -------------------------------------------------------------------------------- /gpu/generate_matrix_data.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "files.h" 3 | 4 | #define N 10000 5 | 6 | 7 | 8 | 9 | int main(){ 10 | unsigned long long size = (unsigned long long)N*N*sizeof(double); 11 | double *a = (double*)malloc(size); 12 | double *b = (double*)malloc(size); 13 | for( int row = 0; row < N; ++row ){ 14 | for( int col = 0; col < N; ++col ){ 15 | a[row*N + col] = row; 16 | b[row*N + col] = col+2; 17 | } 18 | } 19 | write_values_to_file("matrix_a_data",a,size); 20 | write_values_to_file("matrix_b_data",b,size); 21 | 22 | read_values_from_file("matrix_a_data",a,size); 23 | read_values_from_file("matrix_b_data",b,size); 24 | 25 | for( int row = 0; row < N; ++row ){ 26 | for( int col = 0; col < N; ++col ){ 27 | if(a[row*N + col] != row ||b[row*N + col] != col+2){ 28 | printf("error\n"); 29 | return -1; 30 | } 31 | } 32 | } 33 | printf("generate data success\n"); 34 | } -------------------------------------------------------------------------------- /gpu/gpu-add.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Job 3 | metadata: 4 | name: matrix-add 5 | namespace: gpu 6 | spec: 7 | containers: 8 | - name: gpu-server 9 | image: gpu-server 10 | command: 11 | - "./job.py" 12 | env: 13 | - name: source-path 14 | value: /gpu 15 | - name: job-name 16 | value: matrix-add 17 | - name: partition 18 | value: dgx2 19 | - name: "N" 20 | value: "1" 21 | - name: ntasks-per-node 22 | value: "1" 23 | - name: cpus-per-task 24 | value: "6" 25 | - name: gres 26 | value: gpu:1 27 | volumeMounts: 28 | - name: share-data 29 | mountPath: /gpu 30 | volumes: 31 | - name: share-data 32 | hostPath: 33 | path: /minik8s-sharedata/gpu/matrix-add 34 | 35 | backoffLimit: 3 36 | ttlSecondsAfterFinished: 10 37 | -------------------------------------------------------------------------------- /gpu/gpu-mul.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Job 3 | metadata: 4 | name: matrix-mul 5 | namespace: gpu 6 | spec: 7 | containers: 8 | - name: gpu 9 | image: gpu-server 10 | command: 11 | - "./job.py" 12 | env: 13 | - name: source-path 14 | value: /gpu 15 | - name: job-name 16 | value: matrix-add 17 | - name: partition 18 | value: dgx2 19 | - name: "N" 20 | value: "1" 21 | - name: ntasks-per-node 22 | value: "1" 23 | - name: cpus-per-task 24 | value: "6" 25 | - name: gres 26 | value: gpu:1 27 | volumeMounts: 28 | - name: share-data 29 | mountPath: /gpu 30 | volumes: 31 | - name: share-data 32 | hostPath: 33 | path: /minik8s-sharedata/gpu/matrix-mul 34 | 35 | -------------------------------------------------------------------------------- /gpu/job.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import paramiko 3 | from time import sleep 4 | from scp import SCPClient 5 | from os import getenv 6 | 7 | 8 | NREAD = 100000 9 | ssh = paramiko.SSHClient() 10 | ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 11 | ssh.connect("pilogin.hpc.sjtu.edu.cn",22,"stu1648") 12 | 13 | 14 | job_submit_tag = "Submitted batch job" 15 | line_finish_tag = "[stu1648@" 16 | PENDING = "PENDING" 17 | COMPLETED = "COMPLETED" 18 | FAILED = "FAILED" 19 | 20 | source_path = getenv("source-path") 21 | job_name = getenv("job-name") 22 | partition= getenv("partition") 23 | N = getenv("N") 24 | ntasks_per_node = getenv("ntasks-per-node") 25 | cpus_per_task = getenv("cpus-per-task") 26 | gres = getenv("gres") 27 | 28 | if not job_name or not source_path: 29 | print("env error") 30 | exit(0) 31 | if source_path[-1] == "/": 32 | # scp send whole source 33 | source_path = source_path[:-1] 34 | 35 | if not partition: 36 | partition = "dgx2" 37 | if not N: 38 | N = 1 39 | if not ntasks_per_node: 40 | ntasks_per_node = 1 41 | if not cpus_per_task: 42 | cpus_per_task = 6 43 | if not gres: 44 | gres = "gpu:1" 45 | 46 | def generate_slurm(): 47 | print("generating slurm") 48 | with open(f"./{job_name}.slurm","w") as f: 49 | f.write("#!/bin/bash\n") 50 | 51 | f.write(f"#SBATCH --job-name={job_name}\n") 52 | f.write(f"#SBATCH --partition={partition}\n") 53 | f.write(f"#SBATCH -N {N}\n") 54 | f.write(f"#SBATCH --ntasks-per-node={ntasks_per_node}\n") 55 | f.write(f"#SBATCH --cpus-per-task={cpus_per_task}\n") 56 | f.write(f"#SBATCH --gres={gres}\n") 57 | 58 | 59 | # result must exist . is the same dir as .slurm 60 | f.write(f"#SBATCH --output=result/output.txt\n") 61 | f.write(f"#SBATCH --error=result/error.txt\n") 62 | 63 | f.write(f"ulimit -s unlimited\n") 64 | f.write(f"ulimit -l unlimited\n") 65 | 66 | f.write("module load gcc/8.3.0 cuda/10.1.243-gcc-8.3.0\n") 67 | 68 | f.write("make build\n") 69 | f.write("make run\n") 70 | 71 | def upload_source(): 72 | print("uploading source") 73 | scp = SCPClient(ssh.get_transport(),socket_timeout=16) 74 | scp.put(source_path,recursive=True,remote_path=f"~/{job_name}") 75 | scp.put(f"./{job_name}.slurm",f"~/{job_name}/{job_name}.slurm") 76 | scp.close() 77 | 78 | 79 | def download_result(job_id): 80 | print("downloading result") 81 | scp = SCPClient(ssh.get_transport(),socket_timeout=16) 82 | scp = SCPClient(ssh.get_transport(),socket_timeout=16) 83 | #scp.get(f"~/result/{job_id}.out",f"{source_path}/{job_name}.out") 84 | #scp.get(f"~/result/{job_id}.err",f"{source_path}/{job_name}.err") 85 | scp.get(f"~/{job_name}/result",recursive=True,local_path=f"{source_path}/") 86 | scp.close() 87 | 88 | 89 | def submit_job(): 90 | t = 3 91 | while t: 92 | s = ssh.invoke_shell() 93 | print("starting ssh") 94 | sleep(2) 95 | recv = s.recv(NREAD).decode('utf-8') 96 | if recv.find("stu1648") == -1: 97 | print("start ssh failed,retrying") 98 | t -= 1 99 | sleep(5) 100 | continue 101 | 102 | print("start ssh success") 103 | 104 | print("sending sbatch") 105 | s.send(f"cd ~/{job_name} && sbatch ./{job_name}.slurm\n") 106 | sleep(5) 107 | 108 | recv = s.recv(NREAD).decode('utf-8') 109 | index = recv.find(job_submit_tag) 110 | if index ==-1: 111 | print(recv) 112 | print("sbatch failed,retrying") 113 | t -= 1 114 | sleep(5) 115 | continue 116 | print("sbatch success") 117 | job_id = recv[index+len(job_submit_tag)+1:recv.index(line_finish_tag)-2] 118 | #job_id = 25099457 119 | print(f"{job_id=}") 120 | 121 | print("start checking job status") 122 | 123 | check_status_cmd = f"sacct | grep {job_id} | awk '{{print $6}}'" 124 | 125 | while True: 126 | s.send(check_status_cmd+"\n") 127 | sleep(2) 128 | 129 | recv = s.recv(NREAD).decode('utf-8') 130 | status = recv[recv.index(check_status_cmd)+len(check_status_cmd)+2:recv.index(line_finish_tag)-2] 131 | print(f"{status=}") 132 | 133 | if status.find(FAILED)!=-1: 134 | print("job failed") 135 | #user might need error message, still get results 136 | #exit(0) 137 | return job_id 138 | if status.find(COMPLETED)==-1: 139 | sleep(10) 140 | else: 141 | return job_id 142 | 143 | 144 | generate_slurm() 145 | upload_source() 146 | job_id = submit_job() 147 | if job_id: 148 | download_result(job_id) 149 | print("finish") 150 | -------------------------------------------------------------------------------- /gpu/matrix-add/Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | nvcc -o matrix-add matrix-add.cu -I . 3 | run: 4 | ./matrix-add 5 | -------------------------------------------------------------------------------- /gpu/matrix-add/files.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | void read_values_from_file(const char * file, double * data, size_t size) { 9 | std::ifstream values(file, std::ios::binary); 10 | values.read(reinterpret_cast(data), size); 11 | values.close(); 12 | } 13 | 14 | void write_values_to_file(const char * file, double * data, size_t size) { 15 | std::ofstream values(file, std::ios::binary); 16 | values.write(reinterpret_cast(data), size); 17 | values.close(); 18 | } 19 | -------------------------------------------------------------------------------- /gpu/matrix-add/matrix-add.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #define CHECK_CORRECTNESS 4 | 5 | #define N 10000 6 | 7 | __global__ void matrixAddGPU( double * a, double * b, double * c ) 8 | { 9 | 10 | int row_begin = blockIdx.x * blockDim.x + threadIdx.x; 11 | int col_begin = blockIdx.y * blockDim.y + threadIdx.y; 12 | int stride_row = gridDim.x * blockDim.x; 13 | int stride_col = gridDim.y * blockDim.y; 14 | 15 | for(int row = row_begin; row < N ;row += stride_row) { 16 | for(int col= col_begin; col< N ; col+= stride_col) { 17 | c[row * N + col] = a[row*N+col] + b[row*N+col]; 18 | } 19 | } 20 | } 21 | 22 | void matrixAddCPU( double * a, double * b, double * c ) 23 | { 24 | 25 | for( int row = 0; row < N; ++row ) 26 | for( int col = 0; col < N; ++col ) 27 | { 28 | c[row * N + col] = a[row*N+col]+b[row*N+col]; 29 | } 30 | } 31 | 32 | int main() 33 | { 34 | cudaError_t cudaStatus; 35 | 36 | int deviceId; 37 | int numberOfSMs; 38 | 39 | cudaGetDevice(&deviceId); 40 | cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); 41 | printf("SM:%d\n",numberOfSMs);//80 42 | 43 | double *a, *b, *c_gpu; 44 | 45 | unsigned long long size = (unsigned long long)N * N * sizeof (double); // Number of bytes of an N x N matrix 46 | 47 | // Allocate memory 48 | cudaMallocManaged (&a, size); 49 | cudaMallocManaged (&b, size); 50 | cudaMallocManaged (&c_gpu, size); 51 | read_values_from_file("matrix_a_data", a, size); 52 | read_values_from_file("matrix_b_data", b, size); 53 | 54 | //if too large,invalid configuration argument 55 | dim3 threads_per_block(32,32,1); 56 | dim3 number_of_blocks (16*numberOfSMs,16*numberOfSMs, 1); 57 | cudaMemPrefetchAsync(a, size, deviceId); 58 | cudaMemPrefetchAsync(b, size, deviceId); 59 | cudaMemPrefetchAsync(c_gpu, size, deviceId); 60 | matrixAddGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu ); 61 | cudaStatus = cudaGetLastError(); 62 | if (cudaStatus != cudaSuccess) { 63 | fprintf(stderr, "call matrixAddGPU error: %s\n", cudaGetErrorString(cudaStatus)); 64 | return -1; 65 | } 66 | 67 | cudaDeviceSynchronize(); // Wait for the GPU to finish before proceeding 68 | 69 | // Call the CPU version to check our work 70 | // Compare the two answers to make sure they are equal 71 | bool error = false; 72 | #ifdef CHECK_CORRECTNESS 73 | double *c_cpu; 74 | cudaMallocManaged (&c_cpu, size); 75 | matrixAddCPU( a, b, c_cpu ); 76 | for( int row = 0; row < N && !error; ++row ) 77 | for( int col = 0; col < N && !error; ++col ) 78 | if (c_cpu[row * N + col] != c_gpu[row * N + col]) 79 | { 80 | printf("FOUND ERROR at c[%d][%d]\n", row, col); 81 | error = true; 82 | break; 83 | } 84 | cudaFree( c_cpu ); 85 | #endif 86 | if (!error) 87 | printf("Success!\n"); 88 | write_values_to_file("result/matrix_c_data", c_gpu, size); 89 | // Free all our allocated memory 90 | cudaFree(a); 91 | cudaFree(b); 92 | cudaFree( c_gpu ); 93 | } 94 | -------------------------------------------------------------------------------- /gpu/matrix-mul/Makefile: -------------------------------------------------------------------------------- 1 | build: 2 | nvcc -o matrix-mul matrix-mul.cu -I . 3 | run: 4 | ./matrix-mul 5 | -------------------------------------------------------------------------------- /gpu/matrix-mul/files.h: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | void read_values_from_file(const char * file, double * data, size_t size) { 9 | std::ifstream values(file, std::ios::binary); 10 | values.read(reinterpret_cast(data), size); 11 | values.close(); 12 | } 13 | 14 | void write_values_to_file(const char * file, double * data, size_t size) { 15 | std::ofstream values(file, std::ios::binary); 16 | values.write(reinterpret_cast(data), size); 17 | values.close(); 18 | } 19 | -------------------------------------------------------------------------------- /gpu/matrix-mul/matrix-mul.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #define CHECK_CORRECTNESS 4 | 5 | #define N 10000 6 | 7 | __global__ void matrixMulGPU( double * a, double * b, double * c ) 8 | { 9 | 10 | int row_begin = blockIdx.x * blockDim.x + threadIdx.x; 11 | int col_begin = blockIdx.y * blockDim.y + threadIdx.y; 12 | int stride_row = gridDim.x * blockDim.x; 13 | int stride_col = gridDim.y * blockDim.y; 14 | 15 | for(int row = row_begin; row < N ;row += stride_row) { 16 | for(int col= col_begin; col< N ; col+= stride_col) { 17 | double val = 0; 18 | for(int k = 0; k < N; ++k ){ 19 | val += a[row * N + k] * b[k * N + col]; 20 | c[row * N + col] = val; 21 | } 22 | } 23 | } 24 | } 25 | 26 | void matrixMulCPU( double * a, double * b, double * c ) 27 | { 28 | 29 | for( int row = 0; row < N; ++row ) 30 | for( int col = 0; col < N; ++col ) 31 | { 32 | double val = 0; 33 | for ( int k = 0; k < N; ++k ) 34 | val += a[row * N + k] * b[k * N + col]; 35 | c[row * N + col] = val; 36 | } 37 | } 38 | 39 | 40 | int main() 41 | { 42 | cudaError_t cudaStatus; 43 | 44 | int deviceId; 45 | int numberOfSMs; 46 | 47 | cudaGetDevice(&deviceId); 48 | cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); 49 | printf("SM:%d\n",numberOfSMs);//80 50 | 51 | double *a, *b, *c_gpu; 52 | 53 | unsigned long long size = (unsigned long long)N * N * sizeof (double); // Number of bytes of an N x N matrix 54 | 55 | // Allocate memory 56 | cudaMallocManaged (&a, size); 57 | cudaMallocManaged (&b, size); 58 | cudaMallocManaged (&c_gpu, size); 59 | read_values_from_file("matrix_a_data", a, size); 60 | read_values_from_file("matrix_b_data", b, size); 61 | 62 | //if too large,invalid configuration argument 63 | dim3 threads_per_block(32,32,1); 64 | dim3 number_of_blocks (16*numberOfSMs,16*numberOfSMs, 1); 65 | cudaMemPrefetchAsync(a, size, deviceId); 66 | cudaMemPrefetchAsync(b, size, deviceId); 67 | cudaMemPrefetchAsync(c_gpu, size, deviceId); 68 | matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu ); 69 | cudaStatus = cudaGetLastError(); 70 | if (cudaStatus != cudaSuccess) { 71 | fprintf(stderr, "call matrixAddGPU error: %s\n", cudaGetErrorString(cudaStatus)); 72 | return -1; 73 | } 74 | 75 | cudaDeviceSynchronize(); // Wait for the GPU to finish before proceeding 76 | 77 | // Call the CPU version to check our work 78 | // Compare the two answers to make sure they are equal 79 | bool error = false; 80 | #ifdef CHECK_CORRECTNESS 81 | double *c_cpu; 82 | cudaMallocManaged (&c_cpu, size); 83 | matrixMulCPU( a, b, c_cpu ); 84 | for( int row = 0; row < N && !error; ++row ) 85 | for( int col = 0; col < N && !error; ++col ) 86 | if (c_cpu[row * N + col] != c_gpu[row * N + col]) 87 | { 88 | printf("FOUND ERROR at c[%d][%d]\n", row, col); 89 | error = true; 90 | break; 91 | } 92 | cudaFree( c_cpu ); 93 | #endif 94 | if (!error) 95 | printf("Success!\n"); 96 | write_values_to_file("result/matrix_c_data", c_gpu, size); 97 | // Free all our allocated memory 98 | cudaFree(a); 99 | cudaFree(b); 100 | cudaFree( c_gpu ); 101 | } 102 | -------------------------------------------------------------------------------- /pkg/apiobject/autoscaler_test: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | { 4 | "apiVersion": "apps/v1", 5 | "kind": "HPA", 6 | "metadata": { 7 | "name": "hpa-practice", 8 | "namespace": "default" 9 | }, 10 | "spec": { 11 | "behavior": { 12 | "scaleDown": { 13 | "policies": [ 14 | { 15 | "periodSeconds": 60, 16 | "type": "Percent", 17 | "value": 10 18 | } 19 | ] 20 | } 21 | }, 22 | "maxReplicas": 5, 23 | "metrics": [ 24 | { 25 | "resource": { 26 | "name": "memory", 27 | "target": { 28 | "averageValue": "1000", 29 | "type": "AverageValue" 30 | } 31 | }, 32 | "type": "Resource" 33 | }, 34 | { 35 | "resource": { 36 | "name": "cpu", 37 | "target": { 38 | "averageValue": "1000", 39 | "type": "AverageValue" 40 | } 41 | }, 42 | "type": "Resource" 43 | } 44 | ], 45 | "minReplicas": 2, 46 | "scaleTargetRef": { 47 | "apiVersion": "apps/v1", 48 | "kind": "replicas", 49 | "name": "replica-practice" 50 | } 51 | } 52 | } -------------------------------------------------------------------------------- /pkg/apiobject/dnsrecord.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | // example: 4 | 5 | import "encoding/json" 6 | 7 | type DNSRecord struct { 8 | Kind string `json:"kind,omitempty"` 9 | APIVersion string `json:"apiVersion,omitempty"` 10 | Name string `json:"name"` 11 | NameSpace string `json:"namespace,omitempty"` 12 | Host string `json:"host"` 13 | Paths []Path `json:"paths"` 14 | } 15 | 16 | type Path struct { 17 | Address string `json:"address,omitempty"` 18 | PathName string `json:"pathName,omitempty"` 19 | Service string `json:"service"` 20 | Port int `json:"port"` 21 | } 22 | 23 | type DNSEntry struct { 24 | Host string `json:"host"` 25 | } 26 | 27 | func (r *DNSRecord) MarshalJSON() ([]byte, error) { 28 | type Alias DNSRecord 29 | return json.Marshal(&struct { 30 | *Alias 31 | }{ 32 | Alias: (*Alias)(r), 33 | }) 34 | } 35 | 36 | func (r *DNSRecord) UnMarshalJSON(data []byte) error { 37 | type Alias DNSRecord 38 | aux := &struct { 39 | *Alias 40 | }{ 41 | Alias: (*Alias)(r), 42 | } 43 | if err := json.Unmarshal(data, &aux); err != nil { 44 | return err 45 | } 46 | return nil 47 | } 48 | -------------------------------------------------------------------------------- /pkg/apiobject/dnsrecord_test.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | // dns := DNSConfig{ 4 | // Name: "example-dns", 5 | // Kind: "CoreDNS", 6 | // Host: "example.com", 7 | // Paths: []Path{ 8 | // {Address: "/sub1", Service: "service1", Port: 8080}, 9 | // {Address: "/sub2", Service: "service2", Port: 8081}, 10 | // }, 11 | //} 12 | -------------------------------------------------------------------------------- /pkg/apiobject/doc.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | -------------------------------------------------------------------------------- /pkg/apiobject/endpoint.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | type Endpoint struct { 8 | Data MetaData `json:"metadata"` 9 | Spec EndpointSpec `json:"spec"` 10 | } 11 | 12 | type EndpointSpec struct { 13 | SvcIP string `json:"svcIP"` 14 | SvcPort int32 `json:"svcPort"` 15 | DestIP string `json:"dstIP"` 16 | DestPort int32 `json:"dstPort"` 17 | } 18 | 19 | func (e *Endpoint) MarshalJSON() ([]byte, error) { 20 | type Alias Endpoint 21 | return json.Marshal(&struct { 22 | *Alias 23 | }{ 24 | Alias: (*Alias)(e), 25 | }) 26 | } 27 | 28 | func (e *Endpoint) UnMarshalJSON(data []byte) error { 29 | type Alias Endpoint 30 | aux := &struct { 31 | *Alias 32 | }{ 33 | Alias: (*Alias)(e), 34 | } 35 | if err := json.Unmarshal(data, &aux); err != nil { 36 | return err 37 | } 38 | return nil 39 | } 40 | 41 | func (e *Endpoint) Union(other *Endpoint) { 42 | return 43 | } 44 | -------------------------------------------------------------------------------- /pkg/apiobject/function.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import "encoding/json" 4 | 5 | type Function struct { 6 | Kind string `json:"kind,omitempty"` 7 | APIVersion string `json:"apiVersion,omitempty"` 8 | Status VersionLabel `json:"status,omitempty"` 9 | Name string `json:"name"` 10 | Path string `json:"path"` 11 | } 12 | 13 | 14 | 15 | func (r *Function) MarshalJSON() ([]byte, error) { 16 | type Alias Function 17 | return json.Marshal(&struct { 18 | *Alias 19 | }{ 20 | Alias: (*Alias)(r), 21 | }) 22 | } 23 | 24 | func (r *Function) UnMarshalJSON(data []byte) error { 25 | type Alias Function 26 | aux := &struct { 27 | *Alias 28 | }{ 29 | Alias: (*Alias)(r), 30 | } 31 | if err := json.Unmarshal(data, &aux); err != nil { 32 | return err 33 | } 34 | return nil 35 | } -------------------------------------------------------------------------------- /pkg/apiobject/function_test.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | ) 7 | 8 | func TestFunction(t *testing.T) { 9 | function := Function{ 10 | Kind: "Function", 11 | APIVersion: "app/v1", 12 | Name: "test", 13 | Path: "/home/mini-k8s/example/serverless/singlefunc.py", 14 | } 15 | 16 | functionJson, err := json.MarshalIndent(function, "", " ") 17 | if err != nil { 18 | t.Errorf("GenerateReplicaSet failed, error marshalling replicas: %s", err) 19 | } 20 | 21 | t.Logf("replicaSet: %s", functionJson) 22 | } 23 | -------------------------------------------------------------------------------- /pkg/apiobject/job.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | ) 7 | 8 | /* an basic example of a job apiobject: 9 | apiVersion: v1 10 | kind: Pod 11 | metadata: 12 | name: gpu-job 13 | namespace: gpu 14 | spec: 15 | containers: 16 | - name: gpu-server 17 | image: gpu-server 18 | command: 19 | - "./job.py" 20 | env: 21 | - name: source-path 22 | value: /gpu 23 | - name: job-name 24 | value: gpu-matrix 25 | - name: partition 26 | value: dgx2 27 | - name: "N" 28 | value: "1" 29 | - name: ntasks-per-node 30 | value: "1" 31 | - name: cpus-per-task 32 | value: "6" 33 | - name: gres 34 | value: gpu:1 35 | volumeMounts: 36 | - name: share-data 37 | mountPath: /gpu 38 | volumes: 39 | - name: share-data 40 | hostPath: 41 | path: /minik8s-sharedata/gpu/matrix 42 | 43 | 44 | */ 45 | 46 | type Job struct { 47 | APIVersion string `json:"apiVersion,omitempty"` 48 | Data MetaData `json:"metadata"` 49 | Spec JobSpec `json:"spec,omitempty"` 50 | Status PodStatus `json:"status,omitempty"` 51 | } 52 | 53 | type JobSpec struct { 54 | NodeSelector map[string]string `json:"nodeSelector,omitempty"` 55 | Containers []Container `json:"containers"` 56 | Volumes []Volumes `json:"volumes,omitempty"` 57 | BackoffLimit int `json:"backoffLimit"` 58 | TtlSecondsAfterFinished int `json:"ttlSecondsAfterFinished"` 59 | } 60 | 61 | 62 | func (j *Job) UnMarshalJSON(data []byte) error { 63 | type Alias Job 64 | aux := &struct { 65 | *Alias 66 | }{ 67 | Alias: (*Alias)(j), 68 | } 69 | if err := json.Unmarshal(data, &aux); err != nil { 70 | return err 71 | } 72 | return nil 73 | } 74 | 75 | func (j *Job) MarshalJSON() ([]byte, error) { 76 | type Alias Job 77 | return json.Marshal(&struct { 78 | *Alias 79 | }{ 80 | Alias: (*Alias)(j), 81 | }) 82 | } 83 | 84 | func (j *Job) String() string { 85 | return fmt.Sprintf("Job: %s", j.Data.Name) 86 | } 87 | 88 | func (j *Job) UnMarshalJsonList(data []byte) ([]Job, error) { 89 | var jobs []Job 90 | err := json.Unmarshal(data, &jobs) 91 | if err != nil { 92 | return nil, err 93 | } 94 | return jobs, nil 95 | } 96 | 97 | func (j *Job) Union(other *Job) { 98 | if j.Status.Phase == "" { 99 | j.Status.Phase = other.Status.Phase 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /pkg/apiobject/metrics.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import ( 4 | "encoding/json" 5 | "minik8s/pkg/apiobject/utils" 6 | ) 7 | 8 | // NodeMetrics sets resource usage metrics of a node. 9 | type NodeMetrics struct { 10 | APIVersion string `json:"apiVersion,omitempty"` 11 | Data MetaData `json:"metadata"` 12 | 13 | // The following fields define time interval from which metrics were 14 | // collected from the interval [Timestamp-Window, Timestamp]. 15 | Timestamp utils.Time `json:"timestamp"` 16 | Window utils.Duration `json:"window"` 17 | 18 | // The memory usage is the memory working set. 19 | Usage ResourceList `json:"resources"` 20 | } 21 | 22 | /* 23 | { 24 | "kind": "PodMetrics", 25 | "apiVersion": "metrics.k8s.io/v1beta1", 26 | "metadata": { 27 | "name": "svddb-servixxxxxxxxxxxxxxx4b4-bzs84", 28 | "namespace": "vddb", 29 | "selfLink": "/apis/metrics.k8s.io/v1beta1/namespaces/vddb/pods/svddbxxxxxxxxxxxxxxxx-bzs84", 30 | "creationTimestamp": "2022-12-16T14:40:46Z" 31 | }, 32 | "timestamp": "2022-12-16T14:40:09Z", 33 | "window": "30s", 34 | "containers": [ 35 | { 36 | "name": "svdxxxxxxxxrators", 37 | "usage": { "cpu": "2575748239n", "memory": "1257180Ki" } 38 | } 39 | ] 40 | } 41 | */ 42 | 43 | type PodMetrics struct { 44 | APIVersion string `json:"apiVersion,omitempty"` 45 | Data MetaData `json:"metadata"` 46 | 47 | // The following fields define time interval from which metrics were 48 | // collected from the interval [Timestamp-Window, Timestamp]. 49 | Timestamp utils.Time `json:"timestamp"` 50 | Window utils.Duration `json:"window"` 51 | 52 | // Metrics for all containers are collected within the same time window. 53 | Containers []ContainerMetrics `json:"containers"` 54 | } 55 | 56 | type ContainerMetrics struct { 57 | // Container name corresponding to the one from pod.spec.containers. 58 | Name string `json:"name"` 59 | // The memory usage is the memory working set. 60 | Usage ResourceList `json:"resources"` 61 | } 62 | 63 | type ResourceList map[ResourceName]utils.Quantity 64 | 65 | func (p *PodMetrics) MarshalJSON() ([]byte, error) { 66 | type Alias PodMetrics 67 | return json.Marshal(&struct { 68 | *Alias 69 | }{ 70 | Alias: (*Alias)(p), 71 | }) 72 | } 73 | 74 | func (p *PodMetrics) UnMarshalJSON(data []byte) error { 75 | type Alias PodMetrics 76 | aux := &struct { 77 | *Alias 78 | }{ 79 | Alias: (*Alias)(p), 80 | } 81 | if err := json.Unmarshal(data, &aux); err != nil { 82 | return err 83 | } 84 | return nil 85 | } 86 | 87 | func (n *NodeMetrics) MarshalJSON() ([]byte, error) { 88 | type Alias NodeMetrics 89 | return json.Marshal(&struct { 90 | *Alias 91 | }{ 92 | Alias: (*Alias)(n), 93 | }) 94 | } 95 | 96 | func (n *NodeMetrics) UnMarshalJSON(data []byte) error { 97 | type Alias NodeMetrics 98 | aux := &struct { 99 | *Alias 100 | }{ 101 | Alias: (*Alias)(n), 102 | } 103 | if err := json.Unmarshal(data, &aux); err != nil { 104 | return err 105 | } 106 | return nil 107 | } 108 | -------------------------------------------------------------------------------- /pkg/apiobject/node_test.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "testing" 6 | ) 7 | 8 | func TestNode(t *testing.T) { 9 | log.SetLevel(log.DebugLevel) 10 | n := &Node{ 11 | Kind: "Node", 12 | APIVersion: "v1", 13 | Data: MetaData{ 14 | Name: "test", 15 | }, 16 | Spec: NodeSpec{ 17 | Unschedulable: false, 18 | }, 19 | } 20 | 21 | b, err := n.MarshalJSON() 22 | if err != nil { 23 | t.Fatal(err) 24 | } 25 | 26 | log.Debug("Node string: ", string(b)) 27 | 28 | n2 := &Node{} 29 | err = n2.UnMarshalJSON(b) 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /pkg/apiobject/object.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import ( 4 | "encoding/json" 5 | ) 6 | 7 | type Object interface { 8 | MarshalJSON() ([]byte, error) 9 | UnMarshalJSON(data []byte) error 10 | } 11 | 12 | type MetaData struct { 13 | Name string `json:"name,omitempty"` 14 | Namespace string `json:"namespace,omitempty"` 15 | Labels map[string]string `json:"labels,omitempty"` 16 | ResourcesVersion VersionLabel `json:"resourcesVersion,omitempty"` // use for update 17 | } 18 | 19 | type VersionLabel string 20 | 21 | const ( 22 | DELETE VersionLabel = "delete" 23 | UPDATE VersionLabel = "update" 24 | CREATE VersionLabel = "create" 25 | ) 26 | 27 | // MarshalJSONList the object list to json 28 | func MarshalJSONList(list interface{}) ([]byte, error) { 29 | return json.Marshal(list) 30 | } 31 | -------------------------------------------------------------------------------- /pkg/apiobject/pod_test.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | ) 7 | 8 | func TestPod(t *testing.T) { 9 | p := &Pod{ 10 | Data: MetaData{ 11 | Name: "test-pod", 12 | }, 13 | Spec: PodSpec{ 14 | NodeSelector: map[string]string{"env": "test"}, 15 | Containers: []Container{ 16 | {Name: "test-container"}, 17 | }, 18 | }, 19 | } 20 | 21 | 22 | workflowJson, err := json.MarshalIndent(p, "", " ") 23 | if err != nil { 24 | t.Errorf("GenerateWorkflow failed, error marshalling replicas: %s", err) 25 | } 26 | t.Log(string(workflowJson)) 27 | 28 | //expected := `{"metadata":{"name":"test-pod","labels":{}},"spec":{"containers":[{"name":"test-container"}]},"status":{}}` 29 | 30 | // _, err := p.MarshalJSON() 31 | // if err != nil { 32 | // t.Fatalf("unexpected error: %v", err) 33 | // } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /pkg/apiobject/replication.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import "encoding/json" 4 | 5 | /* 6 | an basic example of a repicaset apiobject: 7 | 8 | kind: Deployment 9 | apiVersion: apps/v1 10 | metadata: 11 | 12 | name: deploy-practice 13 | 14 | spec: 15 | 16 | replicas: 3 17 | selector: 18 | app: deploy-practice 19 | template: 20 | metadata: 21 | labels: 22 | app: deploy-practice 23 | spec: 24 | containers: 25 | - name: fileserver 26 | image: hejingkai/fileserver:latest 27 | ports: 28 | - name: p1 # 端口名称 29 | containerPort: 8080 # 容器端口 30 | volumeMounts: 31 | - name: download 32 | mountPath: /usr/share/files 33 | - name: downloader 34 | image: hejingkai/downloader:latest 35 | ports: 36 | - name: p2 # 端口名称 37 | containerPort: 3000 # 容器端口 38 | volumeMounts: 39 | - name: download 40 | mountPath: /data 41 | volumes: # 定义数据卷 42 | - name: download # 数据卷名称 43 | emptyDir: {} # 数据卷类型 44 | */ 45 | type ReplicationController struct { 46 | Kind string `json:"kind,omitempty"` 47 | APIVersion string `json:"apiVersion,omitempty"` 48 | Data MetaData `json:"metadata"` 49 | Spec ReplicationControllerSpec `json:"spec,omitempty"` 50 | Status ReplicationControllerStatus `json:"status,omitempty"` 51 | } 52 | 53 | type ReplicationControllerSpec struct { 54 | // Replicas is the number of desired replicas. 55 | Replicas int32 `json:"replicas"` 56 | 57 | // Selector is a label query over pods that should match the Replicas count. 58 | Selector map[string]string `json:"selector"` 59 | 60 | Template *PodTemplateSpec `json:"template"` 61 | } 62 | 63 | type PodTemplateSpec struct { 64 | Data MetaData `json:"metadata"` 65 | Spec PodSpec `json:"spec"` 66 | } 67 | 68 | type ReplicationControllerStatus struct { 69 | // Replicas is the number of actual replicas. 70 | Replicas int32 `json:"replicas"` 71 | // Used for replica controlled by HPA 72 | Scale int32 `json:"scale"` 73 | OwnerReference OwnerReference `json:"ownerReference,omitempty"` 74 | // the truly ready replicas. 75 | ReadyReplicas int32 `json:"readyReplicas,omitempty"` 76 | } 77 | 78 | func (r *ReplicationController) UnMarshalJSON(data []byte) error { 79 | type Alias ReplicationController 80 | aux := &struct { 81 | *Alias 82 | }{ 83 | Alias: (*Alias)(r), 84 | } 85 | if err := json.Unmarshal(data, &aux); err != nil { 86 | return err 87 | } 88 | return nil 89 | } 90 | 91 | func (r *ReplicationController) MarshalJSON() ([]byte, error) { 92 | type Alias ReplicationController 93 | return json.Marshal(&struct { 94 | *Alias 95 | }{ 96 | Alias: (*Alias)(r), 97 | }) 98 | } 99 | 100 | func (r *ReplicationController) Union(other *ReplicationController) { 101 | if r.Status.Replicas == 0 { 102 | r.Status.Replicas = other.Status.Replicas 103 | } 104 | empty := OwnerReference{} 105 | if empty == r.Status.OwnerReference { 106 | r.Status.OwnerReference = other.Status.OwnerReference 107 | } 108 | } 109 | 110 | func (r *ReplicationController) UnMarshalJSONList(data []byte) ([]ReplicationController, error) { 111 | var list []ReplicationController 112 | if err := json.Unmarshal(data, &list); err != nil { 113 | return nil, err 114 | } 115 | return list, nil 116 | } 117 | -------------------------------------------------------------------------------- /pkg/apiobject/replication_test.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "testing" 7 | ) 8 | 9 | func TestReplicationController(t *testing.T) { 10 | replica := ReplicationController{ 11 | APIVersion: "v1", 12 | Data: MetaData{ 13 | Name: "deploy-practice", 14 | Namespace: "default", 15 | }, 16 | Spec: ReplicationControllerSpec{ 17 | Replicas: 3, 18 | Selector: map[string]string{ 19 | "app": "deploy-practice", 20 | }, 21 | }, 22 | } 23 | jsonBytes, _ := json.MarshalIndent(replica, "", " ") 24 | fmt.Println(string(jsonBytes)) 25 | 26 | } 27 | -------------------------------------------------------------------------------- /pkg/apiobject/service_test.go: -------------------------------------------------------------------------------- 1 | package apiobject 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | ) 7 | 8 | func TestService(t *testing.T) { 9 | service := &Service{ 10 | APIVersion: "app/v1", 11 | Data: MetaData{ 12 | Name: "dns-service", 13 | }, 14 | Spec: ServiceSpec{ 15 | Type: "ClusterIP", 16 | Ports: []ServicePort{ 17 | { 18 | Name: "service-port1", 19 | Protocol: "TCP", 20 | Port: 6800, 21 | TargetPort: "p1", 22 | }, 23 | { 24 | Name: "service-port2", 25 | Protocol: "TCP", 26 | Port: 6880, 27 | TargetPort: "p2", 28 | }, 29 | { 30 | Name: "service-port3", 31 | Protocol: "TCP", 32 | Port: 80, 33 | TargetPort: "p3", 34 | }, 35 | }, 36 | Selector: map[string]string{ 37 | "app": "dns-test", 38 | }, 39 | }, 40 | } 41 | 42 | serviceJson, err := json.MarshalIndent(service, "", " ") 43 | if err != nil { 44 | t.Error(err) 45 | } 46 | t.Log(string(serviceJson)) 47 | } -------------------------------------------------------------------------------- /pkg/apiobject/utils/duration.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "encoding/json" 5 | "time" 6 | ) 7 | 8 | // Duration is a wrapper around time.Duration which supports correct 9 | // marshaling to YAML and JSON. In particular, it marshals into strings, which 10 | // can be used as map keys in json. 11 | type Duration struct { 12 | time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"` 13 | } 14 | 15 | // UnmarshalJSON implements the json.Unmarshaller interface. 16 | func (d *Duration) UnMarshalJSON(b []byte) error { 17 | var str string 18 | err := json.Unmarshal(b, &str) 19 | if err != nil { 20 | return err 21 | } 22 | 23 | pd, err := time.ParseDuration(str) 24 | if err != nil { 25 | return err 26 | } 27 | d.Duration = pd 28 | return nil 29 | } 30 | 31 | // MarshalJSON implements the json.Marshaler interface. 32 | func (d Duration) MarshalJSON() ([]byte, error) { 33 | return json.Marshal(d.Duration.String()) 34 | } 35 | 36 | // ToUnstructured implements the value.UnstructuredConverter interface. 37 | func (d Duration) ToUnstructured() interface{} { 38 | return d.Duration.String() 39 | } 40 | 41 | // OpenAPISchemaType is used by the kube-openapi generator when constructing 42 | // the OpenAPI spec of this type. 43 | // 44 | // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators 45 | func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} } 46 | 47 | // OpenAPISchemaFormat is used by the kube-openapi generator when constructing 48 | // the OpenAPI spec of this type. 49 | func (_ Duration) OpenAPISchemaFormat() string { return "" } 50 | -------------------------------------------------------------------------------- /pkg/apiobject/utils/quantity.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | /* 4 | 单位约定: 5 | cpu : k8s的1000 = cpu的一个核 6 | 7 | 如果一台服务器cpu是4核 那么 k8s单位表示就是 4* 1000 8 | 9 | 内存 : k8s的8320MI = 8320 * 1024 * 1024 字节 10 | 11 | 1MI = 1024*1024 字节 12 | 13 | 同理 1024MI /1024 = 1G 14 | */ 15 | type Quantity uint64 16 | -------------------------------------------------------------------------------- /pkg/apiobject/workflow_test.go: -------------------------------------------------------------------------------- 1 | 2 | package apiobject 3 | 4 | import ( 5 | "encoding/json" 6 | "testing" 7 | ) 8 | 9 | func TestWorkflow(t *testing.T) { 10 | value1 := 1 11 | value2 := 2 12 | example := WorkFlow { 13 | APIVersion: "v1", 14 | Comment: "An example of the Amazon States Language using a choice state.", 15 | StartAt: "FirstState", 16 | States: map[string]State { 17 | "FirstState": TaskState { 18 | Type: "Task", 19 | InputPath: "$.orderId, $.customer", 20 | ResultPath: "$.myResult", 21 | Next: "ChoiceState", 22 | }, 23 | "ChoiceState": ChoiceState { 24 | Type: "Choice", 25 | Choices: []ChoiceItem { 26 | { 27 | Variable: "$.foo", 28 | NumericEquals: &value1, 29 | Next: "FirstMatchState", 30 | }, 31 | { 32 | Variable: "$.foo", 33 | NumericEquals: &value2, 34 | Next: "SecondMatchState", 35 | }, 36 | }, 37 | Default: "DefaultState", 38 | }, 39 | "FirstMatchState": TaskState { 40 | Type: "Task", 41 | InputPath: "$.orderId, $.customer", 42 | ResultPath: "$.myResult", 43 | Next: "NextState", 44 | }, 45 | "SecondMatchState": TaskState { 46 | Type: "Task", 47 | InputPath: "$.orderId, $.customer", 48 | ResultPath: "$.myResult", 49 | Next: "NextState", 50 | }, 51 | "DefaultState": FailState { 52 | Type: "Fail", 53 | Error: "DefaultStateError", 54 | Cause: "No Matches!", 55 | }, 56 | "NextState": TaskState { 57 | Type: "Task", 58 | InputPath: "$.orderId, $.customer", 59 | ResultPath: "$.myResult", 60 | End: true, 61 | }, 62 | }, 63 | } 64 | 65 | workflowJson, err := json.MarshalIndent(example, "", " ") 66 | if err != nil { 67 | t.Errorf("GenerateWorkflow failed, error marshalling replicas: %s", err) 68 | } 69 | t.Logf("workflow: %s", workflowJson) 70 | } 71 | 72 | 73 | func TestWorkflowExample(t *testing.T) { 74 | value := 5 75 | example := WorkFlow { 76 | Kind : "Workflow", 77 | APIVersion: "v1", 78 | Comment: "An example of basic workflow.", 79 | StartAt: "getsum", 80 | States: map[string]State { 81 | "getsum": TaskState { 82 | Type: "Task", 83 | InputPath: "$.x,$.y", 84 | Next: "judgesum", 85 | }, 86 | "judgesum": ChoiceState { 87 | Type: "Choice", 88 | Choices: []ChoiceItem { 89 | { 90 | Variable: "$.z", 91 | NumericGreaterThan: &value, 92 | Next: "printsum", 93 | }, 94 | { 95 | Variable: "$.z", 96 | NumericLessThan: &value, 97 | Next: "getdiff", 98 | }, 99 | }, 100 | Default: "printerror", 101 | }, 102 | "printsum": TaskState { 103 | Type: "Task", 104 | InputPath: "$.z", 105 | ResultPath: "$.str", 106 | End: true, 107 | }, 108 | "getdiff": TaskState { 109 | Type: "Task", 110 | InputPath: "$.x,$.y,$.z", 111 | Next: "printdiff", 112 | }, 113 | "printdiff": TaskState { 114 | Type: "Task", 115 | InputPath: "$.z", 116 | ResultPath: "$.str", 117 | End: true, 118 | }, 119 | "printerror": FailState { 120 | Type: "Fail", 121 | Error: "DefaultStateError", 122 | Cause: "No Matches!", 123 | }, 124 | }, 125 | } 126 | workflowJson, err := json.MarshalIndent(example, "", " ") 127 | if err != nil { 128 | t.Errorf("GenerateWorkflow failed, error marshalling replicas: %s", err) 129 | } 130 | t.Logf("workflow: %s", workflowJson) 131 | 132 | workflowJson, err = example.MarshalJSON() 133 | if err != nil { 134 | t.Errorf("GenerateWorkflow failed, error marshalling replicas: %s", err) 135 | } 136 | 137 | workflow := WorkFlow{} 138 | err = workflow.UnMarshalJSON(workflowJson) 139 | if err != nil { 140 | t.Errorf("GenerateWorkflow failed, error marshalling replicas: %s", err) 141 | } 142 | } 143 | 144 | 145 | func TestMarshal(t *testing.T) { 146 | rawData := []byte(`{"kind":"Workflow","apiVersion":"v1","name":"workflow-exp","status":"update","startAt":"getsum","states":{"getdiff":{"inputPath":"$.x,$.y,$.z","next":"printdiff","type":"Task"},"getsum":{"inputPath":"$.x,$.y","next":"judgesum","type":"Task"},"judgesum":{"choices":[{"NumericGreaterThan":5,"next":"printsum","variable":"$.z"},{"NumericLessThan":5,"next":"getdiff","variable":"$.z"}],"default":"printerror","type":"Choice"},"printdiff":{"end":true,"inputPath":"$.z","outputPath":"$.str","type":"Task"},"printerror":{"cause":"No Matches!","error":"DefaultStateError","type":"Fail"},"printsum":{"end":true,"inputPath":"$.z","outputPath":"$.str","type":"Task"}},"comment":"An example of basic workflow."}`) 147 | workflow := WorkFlow{} 148 | err := workflow.UnMarshalJSON(rawData) 149 | if err != nil { 150 | t.Errorf("GenerateWorkflow failed, error marshalling replicas: %s", err) 151 | } 152 | workflowJson, err := workflow.MarshalJSON() 153 | if err != nil { 154 | t.Errorf("GenerateWorkflow failed, error marshalling replicas: %s", err) 155 | } 156 | t.Logf("workflow: %s", workflowJson) 157 | } -------------------------------------------------------------------------------- /pkg/controller/jobcontroller.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "minik8s/config" 6 | "minik8s/pkg/apiobject" 7 | "minik8s/utils" 8 | "time" 9 | ) 10 | 11 | type jobHandler struct { 12 | } 13 | 14 | type jobPodHandler struct { 15 | } 16 | 17 | func (r jobHandler) HandleCreate(message []byte) { 18 | job := &apiobject.Job{} 19 | job.UnMarshalJSON(message) 20 | 21 | createPod(*job) 22 | job.Status.Phase = apiobject.Created 23 | utils.UpdateObject(job,config.JOB,job.Data.Namespace,job.Data.Name) 24 | 25 | log.Info("[job controller] Create job. Name:", job.Data.Name) 26 | } 27 | 28 | func (r jobHandler) HandleDelete(message []byte) { 29 | job := &apiobject.Job{} 30 | job.UnMarshalJSON(message) 31 | 32 | deletePod(*job) 33 | utils.DeleteObject(config.JOB, job.Data.Namespace, job.Data.Name) 34 | 35 | log.Info("[job controller] Delete job. Name:", job.Data.Name) 36 | } 37 | 38 | func (r jobHandler) HandleUpdate(message []byte) { 39 | 40 | } 41 | 42 | func (r jobHandler) GetType() config.ObjType { 43 | return config.JOB 44 | } 45 | 46 | /* ========== Start Pod Handler ========== */ 47 | 48 | func (p jobPodHandler) HandleCreate(message []byte) { 49 | 50 | } 51 | 52 | func (p jobPodHandler) HandleDelete(message []byte) { 53 | //delete job-> delete pod 54 | //not consider pod deleted by user directly 55 | } 56 | 57 | func (p jobPodHandler) HandleUpdate(message []byte) { 58 | pod := &apiobject.Pod{} 59 | pod.UnMarshalJSON(message) 60 | 61 | 62 | if pod.Status.OwnerReference.Kind == config.JOB{ 63 | log.Info("[job controller] handleupdate") 64 | info := utils.GetObject(config.JOB, pod.Data.Namespace, pod.Status.OwnerReference.Name) 65 | job:= &apiobject.Job{} 66 | job.UnMarshalJSON([]byte(info)) 67 | 68 | 69 | job.Status.Phase = pod.Status.Phase 70 | switch pod.Status.Phase{ 71 | case apiobject.Failed:{ 72 | job.Spec.BackoffLimit -= 1 73 | if job.Spec.BackoffLimit > 0{ 74 | go func(job apiobject.Job){ 75 | deletePod(job) 76 | time.Sleep(time.Second*5) 77 | createPod(job) 78 | }(*job) 79 | } 80 | } 81 | case apiobject.Finished:{ 82 | waitToDelete:=func(t int, job apiobject.Job){ 83 | time.Sleep(time.Second * time.Duration(t)) 84 | deletePod(job) 85 | } 86 | go waitToDelete(job.Spec.TtlSecondsAfterFinished,*job) 87 | 88 | } 89 | } 90 | log.Info("[job controller] update phase:", job.Status.Phase) 91 | utils.UpdateObject(job, config.JOB, pod.Data.Namespace, pod.Data.Name) 92 | } 93 | 94 | } 95 | 96 | func (p jobPodHandler) GetType() config.ObjType { 97 | return config.POD 98 | } 99 | 100 | 101 | 102 | func createPod(job apiobject.Job){ 103 | pod := &apiobject.Pod{ 104 | Data: job.Data, 105 | Spec: apiobject.PodSpec{ 106 | NodeSelector:job.Spec.NodeSelector, 107 | Containers:job.Spec.Containers, 108 | Volumes:job.Spec.Volumes, 109 | }, 110 | } 111 | pod.Status.OwnerReference = apiobject.OwnerReference{ 112 | Kind: config.JOB, 113 | Name: job.Data.Name, 114 | Controller: false, 115 | } 116 | 117 | utils.CreateObject(pod,config.POD,job.Data.Namespace) 118 | 119 | } 120 | func deletePod(job apiobject.Job){ 121 | utils.DeleteObject(config.POD, job.Data.Namespace, job.Data.Name) 122 | 123 | } 124 | -------------------------------------------------------------------------------- /pkg/controller/manager.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import "minik8s/utils" 4 | 5 | func Run() { 6 | /* service controller */ 7 | var sp svcPodHandler 8 | var ss svcServiceHandler 9 | go utils.Sync(ss) 10 | go utils.Sync(sp) 11 | 12 | /* replicaset controller */ 13 | var rp rsPodHandler 14 | var rr rsReplicaHandler 15 | go utils.Sync(rp) 16 | go utils.Sync(rr) 17 | 18 | /* hpa controller */ 19 | var hs hpaScalerHandler 20 | go utils.Sync(hs) 21 | go regularCheck() 22 | 23 | var jc jobPodHandler 24 | var jh jobHandler 25 | go utils.Sync(jc) 26 | go utils.Sync(jh) 27 | 28 | 29 | utils.WaitForever() 30 | } 31 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/apimachinery/apiserver.go: -------------------------------------------------------------------------------- 1 | package apimachinery 2 | 3 | import ( 4 | "context" 5 | "minik8s/pkg/apiobject" 6 | "minik8s/pkg/kubeapiserver/handlers" 7 | "minik8s/pkg/kubeapiserver/storage" 8 | "minik8s/pkg/kubeapiserver/watch" 9 | "strings" 10 | 11 | "github.com/gin-gonic/gin" 12 | log "github.com/sirupsen/logrus" 13 | ) 14 | 15 | type APIServer struct { 16 | HttpServer *gin.Engine 17 | EtcdStorage *storage.EtcdStorage 18 | } 19 | 20 | 21 | func InitNodes(tool *storage.EtcdStorage) { 22 | // delete all nodes' info in etcd 23 | key := "/registry/nodes/" 24 | var nodes []apiobject.Node 25 | err := tool.GetList(context.Background(), key, &nodes) 26 | if err != nil { 27 | log.Info("[InitNodes] the node list is empty") 28 | } else { 29 | for _, node := range nodes { 30 | nodeKey := "/registry/nodes/" + node.Data.Name 31 | err := tool.Delete(context.Background(), nodeKey) 32 | if err != nil { 33 | log.Error("[InitNodes] delete node error: ", err) 34 | } 35 | } 36 | } 37 | 38 | } 39 | 40 | func NewAPI() *APIServer { 41 | storage := storage.NewEtcdStorageNoParam() 42 | if storage == nil { 43 | return nil 44 | } 45 | 46 | InitNodes(storage) 47 | return &APIServer{HttpServer: gin.Default(), EtcdStorage: storage} 48 | } 49 | 50 | // UpgradeToWebSocket the route middleware for update http request to websocket request 51 | func (a *APIServer) UpgradeToWebSocket() gin.HandlerFunc { 52 | return func(c *gin.Context) { 53 | upgradeHeader := c.GetHeader("Upgrade") 54 | connectionHeader := c.GetHeader("Connection") 55 | sourceHeader := c.GetHeader("X-Source") 56 | if (strings.ToLower(upgradeHeader) == "websocket" && strings.Contains(strings.ToLower(connectionHeader), "upgrade")) || c.Query("watch") == "true" { 57 | // Stop the request processing 58 | c.Abort() 59 | 60 | // Get the key to watch 61 | // the resources that can be watched: pod, node, service... 62 | resource := c.Param("resource") 63 | namespace := c.Param("namespace") 64 | name := c.Param("name") 65 | var watchKey = "/registry/" + resource 66 | if namespace != "" { 67 | watchKey += "/" + namespace 68 | if name != "" { 69 | watchKey += "/" + name 70 | } 71 | } 72 | 73 | // set up a new websocket connection 74 | newWatcher, err := watch.NewWatchServer(c) 75 | if err != nil { 76 | log.Error("[UpgradeToWebSocket] fail to establish a new websocket connection, err: ", err) 77 | return 78 | } 79 | 80 | // add the watch server to the watch server map 81 | // only service and node watch need to add to the watch table, and all of them watch the all pods 82 | log.Info("[UpgradeToWebSocket] watchKey: ", watchKey) 83 | if sourceHeader != "" { 84 | watch.WatchTable[sourceHeader] = newWatcher 85 | log.Info("[NodeWatchHandler] watchTable size: ", len(watch.WatchTable)) 86 | } 87 | 88 | // store the connection in the watchStorage 89 | list, ok := watch.WatchStorage.Load(watchKey) 90 | if ok { 91 | if threadList, ok := list.(*watch.ThreadSafeList); !ok { 92 | log.Error("[UpgradeToWebSocket] fail to convert the list to ThreadSafeList") 93 | return 94 | } else { 95 | threadList.PushBack(newWatcher) 96 | watch.WatchStorage.Store(watchKey, threadList) 97 | } 98 | 99 | } else { 100 | // create a list 101 | newList := watch.NewThreadSafeList() 102 | newList.PushBack(newWatcher) 103 | watch.WatchStorage.Store(watchKey, newList) 104 | } 105 | } else { 106 | // Continue with the request processing 107 | c.Next() 108 | } 109 | } 110 | } 111 | 112 | func (a *APIServer) RegisterHandler(route handlers.Route) { 113 | a.HttpServer.Use(a.UpgradeToWebSocket()) 114 | switch route.Method { 115 | case "GET": 116 | a.HttpServer.GET(route.Path, route.Handler) 117 | case "POST": 118 | a.HttpServer.POST(route.Path, route.Handler) 119 | case "PUT": 120 | a.HttpServer.PUT(route.Path, route.Handler) 121 | case "DELETE": 122 | a.HttpServer.DELETE(route.Path, route.Handler) 123 | } 124 | } 125 | 126 | 127 | func (a *APIServer) Run(addr string) error { 128 | for _, route := range handlers.HandlerTable { 129 | a.RegisterHandler(route) 130 | } 131 | 132 | return a.HttpServer.Run(addr) 133 | } 134 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/apimachinery/heartbeat.go: -------------------------------------------------------------------------------- 1 | package apimachinery 2 | 3 | import ( 4 | "context" 5 | "minik8s/pkg/apiobject" 6 | "minik8s/pkg/apiobject/utils" 7 | "minik8s/pkg/kubeapiserver/handlers" 8 | "minik8s/pkg/kubeapiserver/storage" 9 | "time" 10 | 11 | log "github.com/sirupsen/logrus" 12 | ) 13 | 14 | 15 | 16 | func HeartBeat() { 17 | // get all node from etcd 18 | storageTool := storage.NewEtcdStorageNoParam() 19 | key := "/registry/nodes/" 20 | for { 21 | log.Info("[HeartBeat] check node check heartbeat") 22 | var nodes []apiobject.Node 23 | err := storageTool.GetList(context.Background(), key, &nodes) 24 | if err != nil { 25 | log.Error("[HeartBeat] the node list is empty") 26 | } else { 27 | for _, node := range nodes { 28 | // check timeout 29 | if node.Status.Time == 0 { // the time not assigned 30 | continue 31 | } 32 | timeout := utils.CheckTimeout(node.Status.Time) 33 | if timeout { 34 | nodeKey := "/registry/nodes/" + node.Data.Name 35 | node.Status.Conditions[0].Status = apiobject.NetworkUnavailable 36 | err := storageTool.GuaranteedUpdate(context.Background(), nodeKey, &node) 37 | if err != nil { 38 | log.Info("[HeartBeat] update node error: ", err) 39 | } 40 | // reschedule 41 | handlers.Reschedule(&node) 42 | } 43 | } 44 | } 45 | time.Sleep(10 * time.Second) 46 | } 47 | } -------------------------------------------------------------------------------- /pkg/kubeapiserver/apimachinery/routeInstaller.go: -------------------------------------------------------------------------------- 1 | package apimachinery 2 | 3 | import "github.com/gin-gonic/gin" 4 | 5 | // ServiceRoutes the route information for service 6 | type ServiceRoutes struct { 7 | ServiceName string 8 | Routes []Route 9 | } 10 | 11 | // Route specific route 12 | type Route struct { 13 | Path string 14 | Method string 15 | Handler gin.HandlerFunc 16 | } 17 | 18 | func (r *Route) register(engine *gin.Engine) { 19 | switch r.Method { 20 | case "GET": 21 | engine.GET(r.Path, r.Handler) 22 | case "POST": 23 | engine.POST(r.Path, r.Handler) 24 | case "PUT": 25 | engine.PUT(r.Path, r.Handler) 26 | case "DELETE": 27 | engine.DELETE(r.Path, r.Handler) 28 | default: 29 | panic("invalid HTTP method") 30 | } 31 | 32 | } 33 | 34 | func (serv *ServiceRoutes) registerRoutes(engine *gin.Engine) { 35 | for i := range serv.Routes { 36 | serv.Routes[i].register(engine) 37 | } 38 | } 39 | 40 | // WatchFilter route filter for "watch=true" query parameter 41 | func WatchFilter(c *gin.Context) bool { 42 | if c.Query("watch") == "true" { 43 | return true 44 | } else { 45 | return false 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/doc.go: -------------------------------------------------------------------------------- 1 | package kubeapiserver 2 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/handlers/nodehandler_test.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "github.com/gin-gonic/gin" 5 | log "github.com/sirupsen/logrus" 6 | "minik8s/pkg/apiobject" 7 | "minik8s/pkg/kubeapiserver/watch" 8 | "net/http" 9 | "net/http/httptest" 10 | "strings" 11 | "testing" 12 | ) 13 | 14 | func TestRegisterNodeHandler(t *testing.T) { 15 | log.SetLevel(log.DebugLevel) 16 | n := &apiobject.Node{ 17 | Kind: "Node", 18 | Data: apiobject.MetaData{ 19 | Name: "test-node", 20 | }, 21 | Spec: apiobject.NodeSpec{ 22 | Unschedulable: false, 23 | }, 24 | } 25 | 26 | requestBody, err := n.MarshalJSON() 27 | log.Debug("TestRegisterNodeHandler the request body is ", string(requestBody), " and the error is ", err) 28 | if err != nil { 29 | t.Error(err) 30 | } 31 | 32 | payload := strings.NewReader(string(requestBody)) 33 | url := "api/v1/nodes" 34 | req, err := http.NewRequest(http.MethodGet, url, payload) 35 | if err != nil { 36 | t.Error(err) 37 | } 38 | req.Header.Set("Content-Type", "application/json") 39 | req.Header.Set("Upgrade", "websocket") 40 | req.Header.Set("Connection", "Upgrade") 41 | req.Header.Set("Sec-WebSocket-Version", "13") 42 | req.Header.Set("Sec-WebSocket-Key", "dGhlIHNhbXBsZSBub25jZQ==") 43 | 44 | w := httptest.NewRecorder() 45 | c, _ := gin.CreateTestContext(w) 46 | 47 | c.Request = req 48 | 49 | RegisterNodeHandler(c) 50 | 51 | if w.Code != http.StatusOK { 52 | t.Errorf("Expected status code %d, got %d", http.StatusCreated, w.Code) 53 | log.Debug("[TestRegisterNodeHandler] ", w.Body.String()) 54 | } 55 | 56 | watcherKey := "registry/nodes/test" 57 | _, OK := watch.WatchTable[watcherKey] 58 | if !OK { 59 | t.Errorf("Expected watcher key %s, got %s", watcherKey, "nil") 60 | } 61 | 62 | } 63 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/handlers/podhandler_test.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "github.com/gin-gonic/gin" 5 | log "github.com/sirupsen/logrus" 6 | "minik8s/pkg/apiobject" 7 | "net/http" 8 | "net/http/httptest" 9 | "strings" 10 | "testing" 11 | ) 12 | 13 | func TestCreatePodHandler(t *testing.T) { 14 | 15 | p := &apiobject.Pod{ 16 | Data: apiobject.MetaData{ 17 | Name: "test-pod", 18 | }, 19 | Spec: apiobject.PodSpec{ 20 | Containers: []apiobject.Container{ 21 | {Name: "test-container"}, 22 | }, 23 | }, 24 | } 25 | 26 | requestBody, err := p.MarshalJSON() 27 | payload := strings.NewReader(string(requestBody)) 28 | if err != nil { 29 | t.Fatalf("unexpected error: %v", err) 30 | } 31 | 32 | url := "/api/v1/namespaces/{namespace}/pods" 33 | namespace := "default" 34 | url = strings.Replace(url, "{namespace}", namespace, 1) 35 | req, _ := http.NewRequest(http.MethodPost, url, payload) 36 | req.Header.Set("Content-Type", "application/json") 37 | 38 | w := httptest.NewRecorder() 39 | c, _ := gin.CreateTestContext(w) 40 | 41 | c.Request = req 42 | 43 | CreatePodHandler(c) 44 | 45 | if w.Code != http.StatusOK && w.Code != http.StatusInternalServerError { 46 | if w.Code == http.StatusInternalServerError { 47 | log.Warn("TestPodHandler_CreatePod: ", w.Body.String()) 48 | } 49 | t.Fatalf("unexpected status code: %d", w.Code) 50 | } 51 | } 52 | 53 | func TestGetPodHandler(t *testing.T) { 54 | 55 | url := "/api/v1/namespaces/{namespace}/pods/{name}" 56 | namespace := "default" 57 | name := "test-pod" 58 | url = strings.Replace(url, "{namespace}", namespace, 1) 59 | url = strings.Replace(url, "{name}", name, 1) 60 | req, _ := http.NewRequest(http.MethodGet, url, nil) 61 | 62 | w := httptest.NewRecorder() 63 | c, _ := gin.CreateTestContext(w) 64 | 65 | c.Request = req 66 | 67 | GetPodHandler(c) 68 | 69 | if w.Code != http.StatusOK && w.Code != http.StatusInternalServerError { 70 | if w.Code == http.StatusInternalServerError { 71 | log.Warn("TestPodHandler_GetPod: ", w.Body.String()) 72 | } 73 | t.Fatalf("unexpected status code: %d", w.Code) 74 | } 75 | } 76 | 77 | func TestDeletePodHandler(t *testing.T) { 78 | 79 | url := "/api/v1/namespaces/{namespace}/pods/{name}" 80 | namespace := "default" 81 | name := "test-pod" 82 | url = strings.Replace(url, "{namespace}", namespace, 1) 83 | url = strings.Replace(url, "{name}", name, 1) 84 | req, _ := http.NewRequest(http.MethodDelete, url, nil) 85 | 86 | w := httptest.NewRecorder() 87 | c, _ := gin.CreateTestContext(w) 88 | 89 | c.Request = req 90 | 91 | DeletePodHandler(c) 92 | 93 | if w.Code != http.StatusOK && w.Code != http.StatusInternalServerError { 94 | if w.Code == http.StatusInternalServerError { 95 | log.Warn("TestPodHandler_DeletePod: ", w.Body.String()) 96 | } 97 | t.Fatalf("unexpected status code: %d", w.Code) 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/handlers/routeInstaller.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import "github.com/gin-gonic/gin" 4 | 5 | // ServiceRoutes the route information for service 6 | type ServiceRoutes struct { 7 | ServiceName string 8 | Routes []Route 9 | } 10 | 11 | // Route specific route 12 | type Route struct { 13 | Path string 14 | Method string 15 | Handler gin.HandlerFunc 16 | } 17 | 18 | func (r *Route) register(engine *gin.Engine) { 19 | switch r.Method { 20 | case "GET": 21 | engine.GET(r.Path, r.Handler) 22 | case "POST": 23 | engine.POST(r.Path, r.Handler) 24 | case "PUT": 25 | engine.PUT(r.Path, r.Handler) 26 | case "DELETE": 27 | engine.DELETE(r.Path, r.Handler) 28 | default: 29 | panic("invalid HTTP method") 30 | } 31 | } 32 | 33 | func (serv *ServiceRoutes) registerRoutes(engine *gin.Engine) { 34 | for i := range serv.Routes { 35 | serv.Routes[i].register(engine) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/run.go: -------------------------------------------------------------------------------- 1 | package kubeapiserver 2 | 3 | import ( 4 | "minik8s/pkg/kubeapiserver/apimachinery" 5 | ) 6 | 7 | func Run() { 8 | myAPI := apimachinery.NewAPI() 9 | go apimachinery.HeartBeat() 10 | err := myAPI.Run(":8080") 11 | if err != nil { 12 | panic(err) 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/storage/ectd_test.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "go.etcd.io/etcd/clientv3" 7 | "testing" 8 | ) 9 | 10 | type MyStruct struct { 11 | Field1 string 12 | Field2 int 13 | } 14 | 15 | func TestStorage(t *testing.T) { 16 | client, err := clientv3.New(clientv3.Config{ 17 | Endpoints: []string{"localhost:2380"}, 18 | }) 19 | if err != nil { 20 | return 21 | } 22 | 23 | // test create 24 | myStruct := &MyStruct{Field1: "Hello", Field2: 42} 25 | etcdStorage := NewEtcdStorage(client) 26 | err = etcdStorage.Create(context.Background(), "myStruct", &myStruct) 27 | var expectedErr error = nil 28 | 29 | if err != expectedErr { 30 | t.Errorf("Expected error %v, got %v", expectedErr, err) 31 | } 32 | 33 | myStruct5 := &MyStruct{Field1: "Hello2", Field2: 43} 34 | err = etcdStorage.Create(context.Background(), "myStruct2", &myStruct5) 35 | if err != expectedErr { 36 | t.Errorf("Expected error %v, got %v", expectedErr, err) 37 | } 38 | 39 | // test get 40 | var myStruct2 MyStruct 41 | err = etcdStorage.Get(context.Background(), "myStruct", &myStruct2) 42 | if err != expectedErr { 43 | t.Errorf("Expected error %v, got %v", expectedErr, err) 44 | } 45 | if myStruct2.Field1 != myStruct.Field1 { 46 | t.Errorf("Expected %v, got %v", myStruct.Field1, myStruct2.Field1) 47 | } 48 | if myStruct2.Field2 != myStruct.Field2 { 49 | t.Errorf("Expected %v, got %v", myStruct.Field2, myStruct2.Field2) 50 | } 51 | 52 | // test getList 53 | var myStructList []MyStruct 54 | err = etcdStorage.GetList(context.Background(), "myStruct", &myStructList) 55 | if err != expectedErr { 56 | t.Errorf("Expected error %v, got %v", expectedErr, err) 57 | } 58 | if len(myStructList) != 2 { 59 | for key, myStruct := range myStructList { 60 | fmt.Println(key, myStruct) 61 | } 62 | t.Errorf("Expected %v, got %v", 1, len(myStructList)) 63 | } 64 | 65 | // test update 66 | myStruct.Field1 = "World" 67 | err = etcdStorage.GuaranteedUpdate(context.Background(), "myStruct", &myStruct) 68 | if err != expectedErr { 69 | t.Errorf("Expected error %v, got %v", expectedErr, err) 70 | } 71 | 72 | // test get 73 | var myStruct3 MyStruct 74 | err = etcdStorage.Get(context.Background(), "myStruct", &myStruct3) 75 | if err != expectedErr { 76 | t.Errorf("Expected error %v, got %v", expectedErr, err) 77 | } 78 | if myStruct3.Field1 != myStruct.Field1 { 79 | t.Errorf("Expected %v, got %v", myStruct.Field1, myStruct3.Field1) 80 | } 81 | 82 | // test delete 83 | err = etcdStorage.Delete(context.Background(), "myStruct") 84 | if err != expectedErr { 85 | t.Errorf("Expected error %v, got %v", expectedErr, err) 86 | } 87 | 88 | err = etcdStorage.Delete(context.Background(), "myStruct2") 89 | if err != expectedErr { 90 | t.Errorf("Expected error %v, got %v", expectedErr, err) 91 | } 92 | 93 | // test get 94 | var myStruct4 MyStruct 95 | err = etcdStorage.Get(context.Background(), "myStruct", &myStruct4) 96 | if err == expectedErr { 97 | t.Errorf("Expected error %v, got %v", expectedErr, err) 98 | } 99 | 100 | var ErrKeyNotFound = fmt.Errorf("key not found: myStruct") 101 | if err != ErrKeyNotFound { 102 | fmt.Print(ErrKeyNotFound) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/watch/list.go: -------------------------------------------------------------------------------- 1 | package watch 2 | 3 | import ( 4 | "container/list" 5 | "sync" 6 | ) 7 | 8 | type ThreadSafeList struct { 9 | mux sync.Mutex 10 | List *list.List 11 | } 12 | 13 | func NewThreadSafeList() *ThreadSafeList { 14 | return &ThreadSafeList{List: list.New()} 15 | } 16 | 17 | func (l *ThreadSafeList) Len() int { 18 | l.mux.Lock() 19 | defer l.mux.Unlock() 20 | 21 | return l.List.Len() 22 | } 23 | 24 | func (l *ThreadSafeList) Front() *list.Element { 25 | l.mux.Lock() 26 | defer l.mux.Unlock() 27 | 28 | return l.List.Front() 29 | } 30 | 31 | func (l *ThreadSafeList) Back() *list.Element { 32 | l.mux.Lock() 33 | defer l.mux.Unlock() 34 | 35 | return l.List.Back() 36 | } 37 | 38 | func (l *ThreadSafeList) PushFront(v interface{}) *list.Element { 39 | l.mux.Lock() 40 | defer l.mux.Unlock() 41 | 42 | return l.List.PushFront(v) 43 | } 44 | 45 | func (l *ThreadSafeList) PushBack(v interface{}) *list.Element { 46 | l.mux.Lock() 47 | defer l.mux.Unlock() 48 | 49 | return l.List.PushBack(v) 50 | } 51 | 52 | func (l *ThreadSafeList) Remove(e *list.Element) interface{} { 53 | l.mux.Lock() 54 | defer l.mux.Unlock() 55 | 56 | return l.List.Remove(e) 57 | } 58 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/watch/watch.go: -------------------------------------------------------------------------------- 1 | package watch 2 | 3 | import ( 4 | "github.com/gin-gonic/gin" 5 | "github.com/gorilla/websocket" 6 | log "github.com/sirupsen/logrus" 7 | //"minik8s/pkg/kubeapiserver/storage" 8 | "net/http" 9 | "sync" 10 | ) 11 | 12 | //var Storage = storage.NewEtcdStorageNoParam() 13 | 14 | // WatchServer WebSocket server 15 | type WatchServer struct { 16 | connMutex sync.Mutex 17 | Conn *websocket.Conn 18 | } 19 | 20 | // NewWatchServer create a new WebSocket server 21 | func NewWatchServer(c *gin.Context) (*WatchServer, error) { 22 | // update HTTP connection to WebSocket connection 23 | conn, err := (&websocket.Upgrader{ 24 | ReadBufferSize: 1024, 25 | WriteBufferSize: 1024, 26 | CheckOrigin: func(r *http.Request) bool { 27 | return true 28 | }, 29 | }).Upgrade(c.Writer, c.Request, nil) 30 | 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | return &WatchServer{Conn: conn}, nil 36 | } 37 | 38 | // Read websocket message 39 | func (s *WatchServer) Read() ([]byte, error) { 40 | s.connMutex.Lock() 41 | _, message, err := s.Conn.ReadMessage() 42 | if err != nil { 43 | s.connMutex.Unlock() 44 | return nil, err 45 | } 46 | s.connMutex.Unlock() 47 | return message, nil 48 | } 49 | 50 | // Write websocket message 51 | func (s *WatchServer) Write(message []byte) error { 52 | s.connMutex.Lock() 53 | err := s.Conn.WriteMessage(websocket.TextMessage, message) 54 | if err != nil { 55 | s.connMutex.Unlock() 56 | return err 57 | } 58 | s.connMutex.Unlock() 59 | return nil 60 | } 61 | 62 | // Close websocket connection 63 | func (s *WatchServer) Close() error { 64 | return s.Conn.Close() 65 | } 66 | 67 | 68 | func ListWatch(watchKey string, value []byte) error { 69 | 70 | list, ok := WatchStorage.Load(watchKey) 71 | if !ok { 72 | log.Error("[ListWatch] key: ", watchKey, " not found") 73 | return nil 74 | } 75 | if threadList, ok := list.(*ThreadSafeList); ok { 76 | for e := threadList.List.Front(); e != nil; e = e.Next() { 77 | if server, ok := e.Value.(*WatchServer); ok { 78 | err := server.Write(value) 79 | if err != nil { 80 | log.Warn("[ListWatch] Write message error: ", err) 81 | } 82 | } 83 | } 84 | } 85 | return nil 86 | } 87 | -------------------------------------------------------------------------------- /pkg/kubeapiserver/watch/watchtable.go: -------------------------------------------------------------------------------- 1 | package watch 2 | 3 | import "sync" 4 | 5 | // WatchTable map the attribute name to the watch server 6 | var WatchTable = make(map[string]*WatchServer) 7 | 8 | var WatchStorage sync.Map 9 | -------------------------------------------------------------------------------- /pkg/kubectl/cmd/apply.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "log" 7 | ctlutils "minik8s/pkg/kubectl/utils" 8 | "minik8s/utils" 9 | "os" 10 | "strings" 11 | 12 | "github.com/ghodss/yaml" 13 | "github.com/spf13/cobra" 14 | "github.com/tidwall/gjson" 15 | "github.com/wxnacy/wgo/arrays" 16 | ) 17 | 18 | var ApplyCmd = &cobra.Command{ 19 | Use: "apply", 20 | Short: "Kubectl apply manages applications through files defining Kubernetes resources.", 21 | Long: "Kubectl apply manages applications through files defining Kubernetes resources. Usage: kubectl apply (-f FILENAME)", 22 | Run: apply, 23 | } 24 | 25 | func apply(cmd *cobra.Command, args []string) { 26 | _yaml, err := os.ReadFile(filePath) 27 | if err != nil { 28 | fmt.Println(err.Error()) 29 | return 30 | } 31 | 32 | _json, err := yaml.YAMLToJSON(_yaml) 33 | if err != nil { 34 | fmt.Println(err.Error()) 35 | return 36 | } 37 | 38 | kind := strings.ToLower(gjson.Get(string(_json), "kind").String()) 39 | namespace := gjson.Get(string(_json), "metadata.namespace").String() 40 | if kind == "dnsrecord" { 41 | namespace = gjson.Get(string(_json), "namespace").String() 42 | } 43 | var _url string 44 | //fmt.Print(namespace) 45 | if idx := arrays.ContainsString(ctlutils.Resources, kind); idx != -1 { 46 | if namespace == "" { 47 | var obj map[string]interface{} 48 | json.Unmarshal(_json, &obj) 49 | obj["metadata"].(map[string]interface{})["namespace"] = "default" 50 | _json, _ = json.Marshal(obj) 51 | } 52 | _url = ctlutils.ParseUrlMany(kind, namespace) 53 | } else if idx := arrays.ContainsString(ctlutils.Globals, kind); idx != -1 { 54 | _url = ctlutils.ParseUrlMany(kind, "nil") 55 | } else { 56 | fmt.Printf("error: the server doesn't have a resource type \"%s\"", kind) 57 | } 58 | fmt.Printf("url:%s\n", _url) 59 | info, err := utils.SendRequest("POST", _json, _url) 60 | if err != nil { 61 | log.Fatal(info) 62 | } 63 | name := gjson.Get(string(_json), "metadata.name") 64 | fmt.Print(name, " configured", "\n") 65 | 66 | } 67 | -------------------------------------------------------------------------------- /pkg/kubectl/cmd/delete.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | ctlutils "minik8s/pkg/kubectl/utils" 7 | "minik8s/utils" 8 | "strings" 9 | 10 | "github.com/spf13/cobra" 11 | "github.com/wxnacy/wgo/arrays" 12 | ) 13 | 14 | var DeleteCmd = &cobra.Command{ 15 | Use: "delete ", 16 | Short: "Delete resources by resources and names", 17 | Long: "Delete resources by resources and names", 18 | Args: cobra.ExactArgs(2), 19 | Run: delete, 20 | } 21 | 22 | func delete(cmd *cobra.Command, args []string) { 23 | 24 | var _url string 25 | /* get all resources of in certain type under specified namespace */ 26 | kind := strings.ToLower(args[0]) 27 | name := strings.ToLower(args[1]) 28 | /* validate if `kind` is in the resource list */ 29 | if idx := arrays.ContainsString(ctlutils.Resources, kind); idx != -1 { 30 | _url = ctlutils.ParseUrlOne(kind, name, nameSpace) 31 | } else if idx := arrays.ContainsString(ctlutils.Globals, kind); idx != -1 { 32 | _url = ctlutils.ParseUrlOne(kind, name, "nil") 33 | } else { 34 | fmt.Printf("error: the server doesn't have a resource type \"%s\"", kind) 35 | } 36 | 37 | fmt.Printf("url:%s\n", _url) 38 | 39 | /* display the return info */ 40 | var str []byte 41 | _, err := utils.SendRequest("DELETE", str, _url) 42 | if err != nil { 43 | log.Fatal(err) 44 | } 45 | fmt.Print(name, " deleted", "\n") 46 | /* TODO 解析info,错误判断pod名字是否存在 */ 47 | } 48 | -------------------------------------------------------------------------------- /pkg/kubectl/cmd/describe.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | ctlutils "minik8s/pkg/kubectl/utils" 6 | log "github.com/sirupsen/logrus" 7 | "minik8s/utils" 8 | "os" 9 | "strings" 10 | 11 | "github.com/ghodss/yaml" 12 | "github.com/spf13/cobra" 13 | "github.com/tidwall/gjson" 14 | "github.com/wxnacy/wgo/arrays" 15 | ) 16 | 17 | var DescribeCmd = &cobra.Command{ 18 | Use: "describe / describe s", 19 | Short: "Display one or many resources", 20 | Long: "Display one or many resources", 21 | Args: cobra.RangeArgs(1, 2), 22 | Run: describe, 23 | } 24 | 25 | func describe(cmd *cobra.Command, args []string) { 26 | 27 | var _url string 28 | var kind string 29 | if len(args) == 1 { 30 | /* get all resources in certain type under specified namespace */ 31 | kind = strings.ToLower(args[0]) 32 | kind = kind[0 : len(kind)-1] 33 | /* validate if `kind` is in the resource list */ 34 | if idx := arrays.ContainsString(ctlutils.Resources, kind); idx != -1 { 35 | _url = ctlutils.ParseUrlMany(kind, nameSpace) 36 | } else if idx := arrays.ContainsString(ctlutils.Globals, kind); idx != -1 { 37 | _url = ctlutils.ParseUrlMany(kind, "nil") 38 | } else { 39 | fmt.Printf("error: the server doesn't have a resource type \"%s\"", kind) 40 | } 41 | 42 | fmt.Printf("url:%s\n", _url) 43 | 44 | } else { 45 | /* get resource in certain type with its name under specified namespace */ 46 | kind = strings.ToLower(args[0]) 47 | name := strings.ToLower(args[1]) 48 | /* validate if `kind` is in the resource list */ 49 | if idx := arrays.ContainsString(ctlutils.Resources, kind); idx != -1 { 50 | _url = ctlutils.ParseUrlOne(kind, name, nameSpace) 51 | } else if idx := arrays.ContainsString(ctlutils.Globals, kind); idx != -1 { 52 | _url = ctlutils.ParseUrlOne(kind, name, "nil") 53 | } else { 54 | fmt.Printf("error: the server doesn't have a resource type \"%s\"", kind) 55 | } 56 | 57 | fmt.Printf("url:%s\n", _url) 58 | 59 | } 60 | 61 | /* display the info */ 62 | var str []byte 63 | _json, err := utils.SendRequest("GET", str, _url) 64 | if err != nil { 65 | //log.Fatal(err) 66 | /* 解析info,错误判断pod名字是否存在 */ 67 | fmt.Print(_json) 68 | } 69 | 70 | 71 | if kind == "function" { 72 | path := gjson.Get(string(_json), "path") 73 | if path.Exists() { 74 | f, err := os.Open(path.String()) 75 | if err != nil { 76 | log.Info("open file error") 77 | } 78 | defer f.Close() 79 | buf := make([]byte, 1024) 80 | // read the entire file 81 | n, err := f.Read(buf) 82 | fmt.Print(string(buf[:n])) 83 | fmt.Print("\n") 84 | } 85 | } else { 86 | info, _ := yaml.JSONToYAML([]byte(_json)) 87 | fmt.Print(string(info)) 88 | fmt.Print("\n") 89 | } 90 | 91 | 92 | /* {"error":"key not found: /registry/pods/default/test"} */ 93 | /* {"metadata":{"name":"test-pod"},"spec":{"containers":[{"name":"test-container","resources":{"limits":{},"requests":{}}}]},"status":{"phase":"Pending"}}root@minik8s-2:~/mini-k8s/pkg/kubectl/test# */ 94 | 95 | } 96 | -------------------------------------------------------------------------------- /pkg/kubectl/cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "github.com/spf13/cobra" 6 | ) 7 | 8 | func init() { 9 | /* flag options that can be inherited by child commands */ 10 | RootCmd.PersistentFlags().StringVarP(&nameSpace, "nameSpace", "n", "default", "kubectl (-n NAMESPACE)") 11 | 12 | /* apply cmd: eg: kubectl apply -f */ 13 | ApplyCmd.Flags().StringVarP(&filePath, "filePath", "f", "", "kubectl apply -f ") 14 | ApplyCmd.MarkFlagRequired("filePath") 15 | RootCmd.AddCommand(ApplyCmd) 16 | 17 | RootCmd.AddCommand(GetCmd) 18 | 19 | RootCmd.AddCommand(DescribeCmd) 20 | 21 | RootCmd.AddCommand(DeleteCmd) 22 | 23 | TriggerCmd.Flags().StringVarP(&filePath, "filePath", "f", "", "kubectl trigger -f ") 24 | TriggerCmd.MarkFlagRequired("filePath") 25 | RootCmd.AddCommand(TriggerCmd) 26 | 27 | } 28 | 29 | var filePath string 30 | var nameSpace string 31 | 32 | var RootCmd = &cobra.Command{ 33 | Use: "kubectl", 34 | Short: "kubectl controls the minik8s cluster manager.", 35 | Long: "kubectl controls the minik8s cluster manager.", 36 | Run: runRoot, 37 | } 38 | 39 | func runRoot(cmd *cobra.Command, args []string) { 40 | fmt.Printf("execute %s args:%v \n", cmd.Name(), args) 41 | } 42 | -------------------------------------------------------------------------------- /pkg/kubectl/cmd/trigger.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | ctlutils "minik8s/pkg/kubectl/utils" 6 | "minik8s/utils" 7 | "os" 8 | "strings" 9 | 10 | "github.com/ghodss/yaml" 11 | "github.com/spf13/cobra" 12 | ) 13 | 14 | var TriggerCmd = &cobra.Command{ 15 | Use: "trigger -f ", 16 | Short: "Kubectl trigger command", 17 | Long: "Kubectl trigger command, Usage: kubectl trigger (-f FILENAME)", 18 | Run: trigger, 19 | } 20 | 21 | 22 | func trigger(cmd *cobra.Command, args []string) { 23 | _yaml, err := os.ReadFile(filePath) 24 | if err != nil { 25 | fmt.Println(err.Error()) 26 | return 27 | } 28 | 29 | _json, err := yaml.YAMLToJSON(_yaml) 30 | if err != nil { 31 | fmt.Println(err.Error()) 32 | return 33 | } 34 | 35 | kind := strings.ToLower(args[0]) 36 | if kind != "function" && kind != "workflow" { 37 | fmt.Println("invalid resource type, it should be function or workflow") 38 | } 39 | 40 | name := strings.ToLower(args[1]) 41 | _url := ctlutils.ParseUrlTrigger(kind, name) 42 | fmt.Printf("url:%s\n", _url) 43 | info, err := utils.SendRequest("POST", _json, _url) 44 | fmt.Println("the response: ", info) 45 | } -------------------------------------------------------------------------------- /pkg/kubectl/doc.go: -------------------------------------------------------------------------------- 1 | package kubectl -------------------------------------------------------------------------------- /pkg/kubectl/doc/dependency.md: -------------------------------------------------------------------------------- 1 | `github.com/ghodss/yaml`: 支持yaml转json 2 | 3 | `github.com/tidwall/gjson`: 支持json格式解析 4 | 5 | `github.com/spf13/cobra`: 支持交互式命令解析 6 | -------------------------------------------------------------------------------- /pkg/kubectl/doc/kubectl-api.md: -------------------------------------------------------------------------------- 1 | # Mini-K8s kubectl指令手册 2 | Mini-K8s支持的命令如下: 3 | #### kubectl apply 4 | 5 | `kubectl apply -f ` 6 | 7 | #### kubectl get 8 | 9 | `kubectl get [-n ]` 10 | 11 | `kubectl get +s [-n ]` 12 | 13 | #### kubectl delete 14 | 15 | `kubectl delete [-n ]` 16 | 17 | 由于k8s的Api是基于REST的设计思想,因此,不同种类的HTTP请求也就对应了不同的操作。比较常用的对应关系是: 18 | 19 | **GET(SELECT)**:从服务器取出资源(一项或多项)。GET请求对应k8s api的获取信息功能。因此,如果是获取信息的命令都要使用GET方式发起HTTP请求。 20 | 21 | **POST(CREATE)**:在服务器新建一个资源。POST请求对应k8s api的创建功能。因此,需要创建Pods、ReplicaSet或者service的时候请使用这种方式发起请求。 22 | 23 | **PUT(UPDATE)**:在服务器更新资源(客户端提供改变后的完整资源)。对应更新nodes或Pods的状态、ReplicaSet的自动备份数量等等。 24 | 25 | **PATCH(UPDATE)**:在服务器更新资源(客户端提供改变的属性)。 26 | 27 | **DELETE(DELETE)**:从服务器删除资源。在稀牛学院的学员使用完毕环境后,可以使用这种方式将Pod删除,释放资源。 28 | -------------------------------------------------------------------------------- /pkg/kubectl/test/http.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "github.com/gin-gonic/gin" 6 | ) 7 | 8 | func main() { 9 | r := gin.Default() 10 | r.POST("/api/v1/namespaces/default/pods", func(c *gin.Context) { 11 | data, _ := c.GetRawData() 12 | fmt.Printf("receive: %s", string(data)) 13 | c.JSON(200, gin.H{ 14 | "message": "pong", 15 | }) 16 | }) 17 | fmt.Print("run") 18 | r.Run("127.0.0.1:8080") // 监听并在 0.0.0.0:8080 上启动服务 19 | } 20 | -------------------------------------------------------------------------------- /pkg/kubectl/test/http_test.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "github.com/gin-gonic/gin" 6 | ) 7 | 8 | func main() { 9 | r := gin.Default() 10 | r.POST("/api/v1/namespaces/default/pods", func(c *gin.Context) { 11 | data, _ := c.GetRawData() 12 | fmt.Printf("receive: %s", string(data)) 13 | c.JSON(200, gin.H{ 14 | "message": "pong", 15 | }) 16 | }) 17 | fmt.Print("run") 18 | r.Run("127.0.0.1:8080") // 监听并在 0.0.0.0:8080 上启动服务 19 | } 20 | -------------------------------------------------------------------------------- /pkg/kubectl/test/kcl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IPADSIntern-MiniK8s/MiniK8s/610a8a0a09d3988fecdc60e0cdf69d3030cb9d82/pkg/kubectl/test/kcl -------------------------------------------------------------------------------- /pkg/kubectl/test/kubectl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IPADSIntern-MiniK8s/MiniK8s/610a8a0a09d3988fecdc60e0cdf69d3030cb9d82/pkg/kubectl/test/kubectl -------------------------------------------------------------------------------- /pkg/kubectl/test/kubectl_test.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "bytes" 5 | "minik8s/pkg/kubectl/cmd" 6 | "testing" 7 | ) 8 | 9 | func TestApply(t *testing.T) { 10 | /* usable only when api-server is on */ 11 | actual := new(bytes.Buffer) 12 | cmd.RootCmd.SetOut(actual) 13 | cmd.RootCmd.SetErr(actual) 14 | cmd.RootCmd.SetArgs([]string{"apply", "-f", "D:\\mini-k8s\\pkg\\kubectl\\test\\test.yaml"}) 15 | cmd.RootCmd.Execute() 16 | 17 | //expected := "This-is-command-a1" 18 | //fmt.Print(actual.String()) 19 | // 20 | //assert.Equal(t, actual.String(), expected, "actual is not expected") 21 | } 22 | -------------------------------------------------------------------------------- /pkg/kubectl/test/test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Pod 3 | metadata: 4 | labels: 5 | app: deploy-1 6 | name: example-pod2 7 | spec: 8 | containers: 9 | - image: nginx 10 | name: example-container 11 | ports: 12 | - containerPort: 12345 13 | name: p1 14 | -------------------------------------------------------------------------------- /pkg/kubectl/utils/utils.go: -------------------------------------------------------------------------------- 1 | package ctlutils 2 | 3 | import ( 4 | "fmt" 5 | "minik8s/config" 6 | "strings" 7 | 8 | "github.com/tidwall/gjson" 9 | ) 10 | 11 | //var apiServerIp = "http://192.168.1.13:8080" 12 | 13 | var Resources = []string{"pod", "service", "endpoint", "replica", "job", "hpa", "dnsrecord"} 14 | var Globals = []string{"function", "workflow", "node"} 15 | 16 | func ParseUrlFromJson(_json []byte) string { 17 | // operation: create/apply. eg: POST "/api/v1/namespaces/{namespace}/pods" 18 | kind := strings.ToLower(gjson.Get(string(_json), "kind").String()) 19 | namespace := gjson.Get(string(_json), "metadata.namespace") 20 | 21 | url := fmt.Sprintf("http://%s/api/v1/namespaces/%s/%ss", config.ApiServerIp, namespace, kind) 22 | return url 23 | } 24 | 25 | func ParseUrlMany(kind string, ns string) string { 26 | // operation: get. eg: GET "/api/v1/namespaces/{namespace}/pods" 27 | // operation: create/apply. eg: POST "/api/v1/namespaces/{namespace}/pods" 28 | var namespace string 29 | if ns == "nil" { 30 | url := fmt.Sprintf("http://%s/api/v1/%ss", config.ApiServerIp, kind) 31 | return url 32 | } 33 | if ns == "" { 34 | namespace = "default" 35 | } else { 36 | namespace = ns 37 | } 38 | url := fmt.Sprintf("http://%s/api/v1/namespaces/%s/%ss", config.ApiServerIp, namespace, kind) 39 | return url 40 | } 41 | 42 | func ParseUrlOne(kind string, name string, ns string) string { 43 | // operation: get. eg: "/api/v1/namespaces/{namespace}/pods/{pod_name}" 44 | var namespace string 45 | if ns == "nil" { 46 | url := fmt.Sprintf("http://%s/api/v1/%ss/%s", config.ApiServerIp, kind, name) 47 | return url 48 | } 49 | if ns == "" { 50 | namespace = "default" 51 | } else { 52 | namespace = ns 53 | } 54 | url := fmt.Sprintf("http://%s/api/v1/namespaces/%s/%ss/%s", config.ApiServerIp, namespace, kind, name) 55 | return url 56 | } 57 | 58 | func ParseUrlTrigger(kind string, name string) string { 59 | url := fmt.Sprintf("http://%s/api/v1/%ss/%s/trigger", config.ApiServerIp, kind, name) 60 | return url 61 | } 62 | -------------------------------------------------------------------------------- /pkg/kubedns/config/Corefile: -------------------------------------------------------------------------------- 1 | .:53 { 2 | etcd { 3 | endpoint http://localhost:2380 4 | path /dns 5 | upstream /etc/resolv.conf 6 | fallthrough 7 | } 8 | forward . 114.114.114.114 9 | reload 6s 10 | errors 11 | loop 12 | prometheus # 监控插件 13 | loadbalance 14 | } -------------------------------------------------------------------------------- /pkg/kubedns/config/nginx.conf: -------------------------------------------------------------------------------- 1 | 2 | worker_processes 5; ## Default: 1 3 | error_log ./error.log debug; 4 | pid ./nginx.pid; 5 | worker_rlimit_nofile 8192; 6 | 7 | events { 8 | worker_connections 4096; ## Default: 1024 9 | } 10 | http { 11 | 12 | server { 13 | listen 0.0.0.0:80; 14 | server_name node1.com; 15 | 16 | 17 | location /path1/ { 18 | access_log /var/log/nginx/access.log; 19 | proxy_pass http://127.1.1.10:8010/; 20 | } 21 | 22 | location /path2/ { 23 | access_log /var/log/nginx/access.log; 24 | proxy_pass http://127.1.1.11:8011/; 25 | } 26 | 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /pkg/kubedns/nginx/nginx.tmpl: -------------------------------------------------------------------------------- 1 | http { 2 | {{range .Servers}} 3 | server { 4 | listen 0.0.0.0:{{.Port}}; 5 | server_name {{.ServerName}}; 6 | 7 | {{range .Locations}} 8 | location /{{.Path}}/ { 9 | access_log /var/log/nginx/access.log; 10 | proxy_pass http://{{.IP}}:{{.Port}}/; 11 | } 12 | {{end}} 13 | } 14 | {{end}} 15 | } 16 | -------------------------------------------------------------------------------- /pkg/kubedns/nginx/nginxeditor.go: -------------------------------------------------------------------------------- 1 | package nginx 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "minik8s/pkg/apiobject" 6 | "os" 7 | "os/exec" 8 | "strconv" 9 | "text/template" 10 | ) 11 | 12 | type Location struct { 13 | Path string 14 | IP string 15 | Port string 16 | } 17 | 18 | type Server struct { 19 | Port string 20 | ServerName string 21 | Locations []Location 22 | } 23 | 24 | type Config struct { 25 | Servers []Server 26 | } 27 | 28 | // GenerateConfig generate the nginx config file 29 | func GenerateConfig(configs []apiobject.DNSRecord) { 30 | 31 | file, err := os.OpenFile("/home/mini-k8s/pkg/kubedns/config/nginx.conf", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) 32 | if err != nil { 33 | log.Error("[GenerateConfig] error opening file: ", err) 34 | } 35 | defaultHeader := ` 36 | worker_processes 5; ## Default: 1 37 | error_log ./error.log debug; 38 | pid ./nginx.pid; 39 | worker_rlimit_nofile 8192; 40 | 41 | events { 42 | worker_connections 4096; ## Default: 1024 43 | } 44 | ` 45 | bytes, err := file.WriteString(defaultHeader) 46 | if err != nil { 47 | log.Error("[GenerateConfig] error writing to file: ", err) 48 | } 49 | log.Debug("[GenerateConfig] wrote ", bytes, " bytes to file") 50 | 51 | tmpl := template.Must(template.ParseFiles("/home/mini-k8s/pkg/kubedns/nginx/nginx.tmpl")) 52 | 53 | // generate the servers 54 | ServerList := make([]Server, 0) 55 | for _, config := range configs { 56 | // generate the locations 57 | locations := make([]Location, 0) 58 | for _, path := range config.Paths { 59 | location := Location{ 60 | Path: path.PathName, 61 | IP: path.Address, 62 | Port: strconv.Itoa(path.Port), 63 | } 64 | locations = append(locations, location) 65 | } 66 | server := Server{ 67 | Port: "80", 68 | ServerName: config.Host, 69 | Locations: locations, 70 | } 71 | ServerList = append(ServerList, server) 72 | } 73 | 74 | config := Config{ 75 | Servers: ServerList, 76 | } 77 | err = tmpl.Execute(file, config) 78 | if err != nil { 79 | log.Error("[GenerateConfig] error executing template: ", err) 80 | } 81 | 82 | file.Close() 83 | } 84 | 85 | 86 | func ReloadNginx() error { 87 | cmd := exec.Command("pkill", "nginx") 88 | err := cmd.Run() 89 | cmd = exec.Command("nginx", "-c", "/home/mini-k8s/pkg/kubedns/config/nginx.conf") 90 | err = cmd.Run() 91 | if err != nil { 92 | log.Error("[ReloadNginx] cmd.Run() failed with ", err.Error()) 93 | return err 94 | } 95 | return nil 96 | } 97 | -------------------------------------------------------------------------------- /pkg/kubedns/nginx/nginxeditor_test.go: -------------------------------------------------------------------------------- 1 | package nginx 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "minik8s/pkg/apiobject" 7 | "testing" 8 | ) 9 | 10 | func TestNginxEditor(t *testing.T) { 11 | DNSRecordList := make([]apiobject.DNSRecord, 0) 12 | DNSRecordList = append(DNSRecordList, apiobject.DNSRecord{ 13 | Kind: "DNS", 14 | APIVersion: "v1", 15 | Name: "dns-test1", 16 | Host: "node1.com", 17 | Paths: []apiobject.Path{ 18 | { 19 | PathName: "path1", 20 | Address: "127.1.1.10", 21 | Service: "service1", 22 | Port: 8010, 23 | }, 24 | { 25 | PathName: "path2", 26 | Address: "127.1.1.11", 27 | Service: "service2", 28 | Port: 8011, 29 | }, 30 | }, 31 | }) 32 | jsonData, err := json.MarshalIndent(DNSRecordList[0], "", " ") 33 | if err != nil { 34 | fmt.Println("Error marshalling data:", err) 35 | return 36 | } 37 | t.Log("json data: ", string(jsonData)) 38 | // fmt.Println(string(jsonData)) 39 | DNSRecordList = append(DNSRecordList, apiobject.DNSRecord{ 40 | Kind: "DNS", 41 | APIVersion: "v1", 42 | Name: "dns-test2", 43 | Host: "node2.com", 44 | Paths: []apiobject.Path{ 45 | { 46 | Address: "127.1.1.12", 47 | Service: "service3", 48 | Port: 8081, 49 | }, 50 | { 51 | Address: "127.1.1.13", 52 | Service: "service4", 53 | Port: 8082, 54 | }, 55 | }, 56 | }) 57 | 58 | GenerateConfig(DNSRecordList) 59 | } 60 | -------------------------------------------------------------------------------- /pkg/kubedns/testing/test.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Test Page 5 | 6 | 7 |

Hello, world!

8 | 9 | 10 | -------------------------------------------------------------------------------- /pkg/kubelet/container/containerutil.go: -------------------------------------------------------------------------------- 1 | package container 2 | 3 | import ( 4 | "github.com/containerd/containerd/oci" 5 | "github.com/opencontainers/runtime-spec/specs-go" 6 | "strconv" 7 | ) 8 | 9 | func GenerateMountSpec(mounts map[string]string) oci.SpecOpts { 10 | sMounts := make([]specs.Mount, len(mounts), len(mounts)) 11 | i := 0 12 | for source, destination := range mounts { 13 | sMounts[i] = specs.Mount{ 14 | Destination: destination, 15 | Source: source, 16 | Type: "bind", // if tmpfs, can not persist 17 | Options: []string{"bind"}, //otherwise no such device error 18 | } 19 | i++ 20 | } 21 | return oci.WithMounts(sMounts) 22 | } 23 | 24 | func GenerateHostnameSpec(hostname string) oci.SpecOpts { 25 | return oci.WithHostname(hostname) 26 | } 27 | 28 | func GenerateCMDSpec(CmdLine []string) oci.SpecOpts { 29 | return oci.WithProcessArgs(CmdLine...) 30 | } 31 | 32 | type CPUSpecType int 33 | 34 | const ( 35 | CPUNone CPUSpecType = iota 36 | CPUNumber //float ,not bind to certain cpu; eg: 1 ,0.5 37 | CPUCoreID // bind certain cpus ,start from 0; eg: 0,1, 0-2 38 | CPUShares // priority 39 | ) 40 | 41 | type CPUSpec struct { 42 | Type CPUSpecType 43 | Value string 44 | } 45 | 46 | func GenerateCPUSpec(spec CPUSpec) oci.SpecOpts { 47 | switch spec.Type { 48 | case CPUNumber: 49 | cpus, _ := strconv.ParseFloat(spec.Value, 64) 50 | var ( 51 | period = uint64(100000) 52 | quota = int64(cpus * 100000.0) 53 | ) 54 | return oci.WithCPUCFS(quota, period) 55 | case CPUCoreID: 56 | return oci.WithCPUs(spec.Value) 57 | case CPUShares: 58 | shares, _ := strconv.ParseUint(spec.Value, 10, 64) 59 | return oci.WithCPUShares(shares) 60 | } 61 | return nil 62 | } 63 | 64 | // bytes, if exceed ,the container will be stopped at once 65 | func GenerateMemorySpec(limit uint64) oci.SpecOpts { 66 | return oci.WithMemoryLimit(limit) 67 | } 68 | 69 | func GenerateEnvSpec(envs []string) oci.SpecOpts { 70 | return oci.WithEnv(envs) 71 | } 72 | 73 | func GenerateNamespaceSpec(nsType, path string) oci.SpecOpts { 74 | return oci.WithLinuxNamespace(specs.LinuxNamespace{Type: specs.LinuxNamespaceType(nsType), Path: path}) 75 | } 76 | -------------------------------------------------------------------------------- /pkg/kubelet/image/image.go: -------------------------------------------------------------------------------- 1 | package image 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/containerd/containerd" 7 | "minik8s/pkg/kubelet/utils" 8 | "strings" 9 | ) 10 | 11 | func imageFromLocal(client *containerd.Client, imageName string) containerd.Image { 12 | im, err := client.ImageService().Get(context.Background(), imageName) 13 | if err != nil { 14 | return nil 15 | } 16 | return containerd.NewImage(client, im) 17 | } 18 | 19 | func imageFromDefaultRegistry(client *containerd.Client, imageName string) containerd.Image { 20 | image, err := client.Pull(context.Background(), imageName, containerd.WithPullUnpack) 21 | if err != nil { 22 | return nil 23 | } 24 | return image 25 | } 26 | func EnsureImage(namespace string, client *containerd.Client, imageName string) containerd.Image { 27 | //has tag 28 | if !strings.Contains(imageName, ":") { 29 | imageName += ":latest" 30 | } else { 31 | if strings.Contains(imageName[strings.Index(imageName, ":"):], "/") { 32 | if !strings.Contains(imageName[strings.Index(imageName, ":")+1:], ":") { 33 | imageName += ":latest" 34 | } 35 | } 36 | } 37 | //fmt.Println(imageName) 38 | if strings.Contains(imageName, "master:5000") { 39 | output, err := utils.Ctl(namespace, "pull", "--insecure-registry", imageName) 40 | if err != nil { 41 | fmt.Println(output) 42 | return nil 43 | } 44 | return imageFromLocal(client, imageName) 45 | } 46 | if strings.Contains(imageName, "/") { 47 | return imageFromDefaultRegistry(client, imageName) 48 | } 49 | local := "master:5000/" + imageName 50 | _, err := utils.Ctl(namespace, "pull", "--insecure-registry", local) 51 | if err == nil { 52 | return imageFromLocal(client, local) 53 | } 54 | return imageFromDefaultRegistry(client, "docker.io/library/"+imageName) 55 | } 56 | -------------------------------------------------------------------------------- /pkg/kubelet/image/image_test.go: -------------------------------------------------------------------------------- 1 | package image 2 | 3 | import ( 4 | "minik8s/pkg/kubelet/utils" 5 | "testing" 6 | ) 7 | 8 | func TestEnsureImage(t *testing.T) { 9 | testcases := []string{ 10 | "ubuntu", 11 | "gpu-server", 12 | "gpu-server:latest", 13 | "master:5000/gpu-server", 14 | "master:5000/gpu-server:latest", 15 | } 16 | ns := "test-image" 17 | client, err := utils.NewClient(ns) 18 | if err != nil { 19 | t.Fatalf("client failed") 20 | } 21 | for _, i := range testcases { 22 | utils.Ctl(ns, "rmi", i) 23 | image := EnsureImage(ns, client, i) 24 | if image == nil { 25 | t.Fatalf("pull image %v failed", i) 26 | } 27 | image = EnsureImage(ns, client, i) 28 | if image == nil { 29 | t.Fatalf("pull exist image %v failed", i) 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /pkg/kubelet/metricsserver/handler.go: -------------------------------------------------------------------------------- 1 | package metricsserver 2 | 3 | import ( 4 | "github.com/gin-gonic/gin" 5 | "minik8s/config" 6 | "minik8s/pkg/apiobject" 7 | "minik8s/pkg/kubelet/pod" 8 | kubeletUtils "minik8s/pkg/kubelet/utils" 9 | "minik8s/utils" 10 | "net/http" 11 | ) 12 | 13 | type route struct { 14 | Path string 15 | Method string 16 | Handler gin.HandlerFunc 17 | } 18 | 19 | var handlerTable = [...]route{ 20 | //{Path: "/:namespace/:pod", Method: "GET", Handler: nil}, 21 | {Path: "/:namespace/:pod", Method: "GET", Handler: getPodMetricsHandler}, 22 | } 23 | 24 | func getPodMetricsHandler(c *gin.Context) { 25 | namespace := c.Param("namespace") 26 | name := c.Param("pod") 27 | info := utils.GetObject(config.POD, namespace, name) 28 | p := &apiobject.Pod{} 29 | p.UnMarshalJSON([]byte(info)) 30 | 31 | kubeletUtils.RLock(namespace, name) 32 | metrics := pod.GetPodMetrics(namespace, *p) 33 | kubeletUtils.RUnLock(namespace, name) 34 | c.JSON(http.StatusOK, metrics) 35 | } 36 | -------------------------------------------------------------------------------- /pkg/kubelet/metricsserver/metricserver.go: -------------------------------------------------------------------------------- 1 | package metricsserver 2 | 3 | import ( 4 | "github.com/gin-gonic/gin" 5 | ) 6 | 7 | type MetricsServer struct { 8 | HttpServer *gin.Engine 9 | } 10 | 11 | func NewMetricsServer() *MetricsServer { 12 | return &MetricsServer{HttpServer: gin.Default()} 13 | } 14 | 15 | func (s *MetricsServer) RegisterHandler(r route) { 16 | switch r.Method { 17 | case "GET": 18 | s.HttpServer.GET(r.Path, r.Handler) 19 | case "POST": 20 | s.HttpServer.POST(r.Path, r.Handler) 21 | case "PUT": 22 | s.HttpServer.PUT(r.Path, r.Handler) 23 | case "DELETE": 24 | s.HttpServer.DELETE(r.Path, r.Handler) 25 | } 26 | } 27 | 28 | func (s *MetricsServer) Run(addr string) error { 29 | for _, r := range handlerTable { 30 | s.RegisterHandler(r) 31 | } 32 | return s.HttpServer.Run(addr) 33 | } 34 | -------------------------------------------------------------------------------- /pkg/kubelet/pod/podutil.go: -------------------------------------------------------------------------------- 1 | package pod 2 | 3 | import ( 4 | "fmt" 5 | "minik8s/pkg/apiobject" 6 | "strconv" 7 | "strings" 8 | ) 9 | 10 | func parseMemory(m string) (uint64, error) { 11 | loc := strings.Index(m, "Mi") 12 | if loc == -1 { 13 | res, err := strconv.ParseUint(m, 10, 64) 14 | return res, err 15 | } 16 | res, err := strconv.ParseUint(m[:loc], 10, 64) 17 | return res * 1024 * 1024, err 18 | } 19 | 20 | func parseCmd(cmd []string, args []string) []string { 21 | if cmd == nil && args == nil { 22 | return []string{} 23 | } 24 | if cmd == nil { 25 | //invalid 26 | return []string{} 27 | } 28 | res := make([]string, len(cmd)+len(args)) 29 | copy(res, cmd) 30 | if args != nil { 31 | res = append(cmd, args...) 32 | } 33 | return res 34 | } 35 | 36 | func parseEnv(envs []apiobject.Env) []string { 37 | res := make([]string, len(envs), len(envs)) 38 | for i, env := range envs { 39 | res[i] = fmt.Sprintf("%s=%s", env.Name, env.Value) 40 | } 41 | return res 42 | } 43 | -------------------------------------------------------------------------------- /pkg/kubelet/pod/podutil_test.go: -------------------------------------------------------------------------------- 1 | package pod 2 | 3 | import ( 4 | "minik8s/pkg/apiobject" 5 | "testing" 6 | ) 7 | 8 | func TestParseMemory(t *testing.T) { 9 | res, err := parseMemory("100Mi") 10 | if err != nil || res != 100*1024*1024 { 11 | t.Fatalf("test parseMemory error") 12 | } 13 | res, err = parseMemory("10000") 14 | if err != nil || res != 10000 { 15 | t.Fatalf("test parseMemory failed") 16 | } 17 | } 18 | 19 | func TestParseCmd(t *testing.T) { 20 | res := parseCmd([]string{"/bin/bash"}, []string{"-c", "echo Hello Kubernetes!"}) 21 | if len(res) != 3 || res[0] != "/bin/bash" || res[1] != "-c" || res[2] != "echo Hello Kubernetes!" { 22 | t.Fatalf("test parsecmd failed") 23 | } 24 | } 25 | 26 | func TestParseEnv(t *testing.T) { 27 | res := parseEnv([]apiobject.Env{ 28 | {"a", "b"}, {"c", "d"}, 29 | }) 30 | if len(res) != 2 || res[0] != "a=b" || res[1] != "c=d" { 31 | t.Fatalf("test parseenv failed") 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /pkg/kubelet/run.go: -------------------------------------------------------------------------------- 1 | package kubelet 2 | 3 | import ( 4 | "fmt" 5 | "minik8s/config" 6 | "time" 7 | ) 8 | 9 | type Config struct { 10 | ApiserverAddr string // 192.168.1.13:8080 11 | FlannelSubnet string //10.2.17.1/24 12 | IP string //192.168.1.12 13 | Labels map[string]string 14 | ListenAddr string //localhost:10250 15 | CPU string 16 | Memory string 17 | } 18 | 19 | func Run(c Config) { 20 | config.ApiServerIp = c.ApiserverAddr 21 | kl := NewKubelet(c) 22 | go func() { 23 | for { 24 | if !kl.register() { 25 | time.Sleep(time.Second * 10) 26 | continue 27 | } 28 | time.Sleep(time.Second * 2) 29 | //normally, watch Pod will not return 30 | kl.watchPod() 31 | fmt.Println("trying to reconnect to apiserver") 32 | time.Sleep(time.Second * 10) 33 | } 34 | }() 35 | 36 | go kl.watchContainersStatus() 37 | err := kl.Server.Run(kl.ListenAddr) 38 | if err != nil { 39 | fmt.Println(err.Error()) 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /pkg/kubelet/utils/helper.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "github.com/containerd/containerd" 7 | "os/exec" 8 | "strings" 9 | "minik8s/pkg/apiobject" 10 | ) 11 | 12 | var runtimePath, _ = exec.LookPath("nerdctl") 13 | 14 | func Ctl(namespace string, args ...string) (string, error) { 15 | //fmt.Println(append([]string{"-n", namespace}, args...)) 16 | res, err := exec.Command(runtimePath, append([]string{"-n", namespace}, args...)...).CombinedOutput() 17 | return string(res), err 18 | } 19 | 20 | func CheckCmd(namespace string, containerName string, args []string, answer string) bool { 21 | output, _ := Ctl(namespace, append([]string{"exec", containerName}, args...)...) 22 | return strings.Index(output, answer) > -1 23 | } 24 | 25 | func GetInfo(namespace, containerName, fields string) (string, error) { 26 | output, err := Ctl(namespace, "inspect", "-f", fmt.Sprintf("{{%s}}", fields), containerName) 27 | if err != nil { 28 | return "", err 29 | } 30 | //remove the last \n 31 | return output[:len(output)-1], nil 32 | } 33 | 34 | func NewClient(namespace string) (*containerd.Client, error) { 35 | return containerd.New("/run/containerd/containerd.sock", containerd.WithDefaultNamespace(namespace)) 36 | } 37 | 38 | func GetContainersByPod(pod apiobject.Pod)[]containerd.Container{ 39 | client, err := NewClient(pod.Data.Namespace) 40 | if err != nil { 41 | return nil 42 | } 43 | ctx := context.Background() 44 | containers, err := client.Containers(ctx,fmt.Sprintf("labels.%q==%s", "pod", pod.Data.Name)) 45 | if err!=nil{ 46 | return nil 47 | } 48 | return containers 49 | } 50 | -------------------------------------------------------------------------------- /pkg/kubelet/utils/lock.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | ) 7 | 8 | var locks = sync.Map{} 9 | var lockForLocks = sync.Mutex{} 10 | 11 | func RLock(namespace, podName string) { 12 | key := fmt.Sprintf("%s-%s", namespace, podName) 13 | if mutex, ok := locks.Load(key); ok { 14 | mutex.(*sync.RWMutex).RLock() 15 | } else { 16 | lockForLocks.Lock() 17 | mutex := &sync.RWMutex{} 18 | mutex.RLock() 19 | locks.Store(key, mutex) 20 | lockForLocks.Unlock() 21 | } 22 | } 23 | 24 | func Lock(namespace, podName string) { 25 | key := fmt.Sprintf("%s-%s", namespace, podName) 26 | if mutex, ok := locks.Load(key); ok { 27 | mutex.(*sync.RWMutex).Lock() 28 | } else { 29 | lockForLocks.Lock() 30 | mutex := &sync.RWMutex{} 31 | mutex.Lock() 32 | locks.Store(key, mutex) 33 | lockForLocks.Unlock() 34 | } 35 | } 36 | func RUnLock(namespace, podName string) { 37 | key := fmt.Sprintf("%s-%s", namespace, podName) 38 | if mutex, ok := locks.Load(key); ok { 39 | mutex.(*sync.RWMutex).RUnlock() 40 | } else { 41 | panic("unlock a non-exist lock") 42 | } 43 | } 44 | 45 | func UnLock(namespace, podName string) { 46 | key := fmt.Sprintf("%s-%s", namespace, podName) 47 | if mutex, ok := locks.Load(key); ok { 48 | mutex.(*sync.RWMutex).Unlock() 49 | } else { 50 | panic("unlock a non-exist lock") 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /pkg/kubelet/utils/lock_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | ) 7 | 8 | func TestReadLock(t *testing.T) { 9 | 10 | wg := sync.WaitGroup{} 11 | wg.Add(1) 12 | 13 | RLock("test", "pod1") 14 | go func() { 15 | RLock("test", "pod1") 16 | wg.Done() 17 | }() 18 | wg.Wait() 19 | RUnLock("test", "pod1") 20 | } 21 | 22 | //deadlock is correct 23 | 24 | //func TestWriteLock(t *testing.T) { 25 | // wg := sync.WaitGroup{} 26 | // wg.Add(1) 27 | // 28 | // RLock("test", "pod1") 29 | // go func() { 30 | // Lock("test", "pod1") 31 | // wg.Done() 32 | // }() 33 | // wg.Wait() 34 | // RUnLock("test", "pod1") 35 | //} 36 | -------------------------------------------------------------------------------- /pkg/kubeproxy/ipvs/ops.go: -------------------------------------------------------------------------------- 1 | package ipvs 2 | 3 | import ( 4 | "fmt" 5 | "github.com/mqliang/libipvs" 6 | log "github.com/sirupsen/logrus" 7 | "net" 8 | "os/exec" 9 | "strconv" 10 | "syscall" 11 | "time" 12 | ) 13 | 14 | var handler libipvs.IPVSHandle 15 | 16 | func Init() { 17 | h, err := libipvs.New() 18 | handler = h 19 | if err != nil { 20 | fmt.Println(err.Error()) 21 | } 22 | 23 | _, err = exec.Command("sysctl", []string{"net.ipv4.vs.conntrack=1"}...).CombinedOutput() 24 | if err != nil { 25 | fmt.Println(err.Error()) 26 | } 27 | 28 | } 29 | 30 | func TestConfig() { 31 | svc := addService("10.9.0.1", 12) 32 | bindEndpoint(svc, "10.2.17.54", 12345) 33 | } 34 | 35 | func AddService(ip string, port uint16) { 36 | serviceIP := ip + ":" + strconv.Itoa(int(port)) 37 | if _, ok := Services[serviceIP]; ok { 38 | return 39 | } 40 | svc := addService(ip, port) 41 | Services[serviceIP] = &ServiceNode{ 42 | Service: svc, 43 | Visited: true, 44 | Endpoints: map[string]*EndpointNode{}, 45 | } 46 | log.Info("[kubeproxy] Add service ", serviceIP) 47 | } 48 | 49 | func addService(ip string, port uint16) *libipvs.Service { 50 | // Create a service struct and add it to the ipvs. 51 | // Equal to the cmd: ipvsadm -A -t 10.10.0.1:8410 -s rr 52 | svc := &libipvs.Service{ 53 | Address: net.ParseIP(ip), 54 | AddressFamily: syscall.AF_INET, 55 | Protocol: libipvs.Protocol(syscall.IPPROTO_TCP), 56 | Port: port, 57 | SchedName: libipvs.RoundRobin, 58 | } 59 | 60 | if err := handler.NewService(svc); err != nil { 61 | fmt.Println(err.Error()) 62 | } 63 | 64 | // Bind the ip address to the NIC (flannel.1 here) 65 | // Equal to the cmd: ip addr add 10.10.0.1/24 dev flannel.1 66 | args := []string{"addr", "add", ip + "/24", "dev", "flannel.1"} 67 | _, err := exec.Command("ip", args...).CombinedOutput() 68 | if err != nil { 69 | fmt.Println(err.Error()) 70 | } 71 | 72 | // Configure the iptable: add SNAT rule 73 | // Equal to the cmd: iptables -t nat -A POSTROUTING -m ipvs --vaddr 10.9.0.1 --vport 12 -j MASQUERADE 74 | args = []string{"-t", "nat", "-A", "POSTROUTING", "-m", "ipvs", "--vaddr", ip, "--vport", strconv.Itoa(int(svc.Port)), "-j", "MASQUERADE"} 75 | _, err = exec.Command("iptables", args...).CombinedOutput() 76 | if err != nil { 77 | fmt.Println(err.Error()) 78 | } 79 | 80 | return svc 81 | } 82 | 83 | func DeleteService(key string) { 84 | log.Info("[kubeproxy] Delete service ", key) 85 | node := Services[key] 86 | if node != nil { 87 | deleteService(node.Service) 88 | } 89 | delete(Services, key) 90 | } 91 | 92 | func deleteService(svc *libipvs.Service) { 93 | if err := handler.DelService(svc); err != nil { 94 | fmt.Println(err.Error()) 95 | } 96 | } 97 | 98 | func AddEndpoint(key string, ip string, port uint16) { 99 | svc, exist := Services[key] 100 | for !exist { 101 | time.Sleep(1) 102 | log.Info("[proxy] Add Endpoint: service doesn't exist!") 103 | svc, exist = Services[key] 104 | } 105 | dst := bindEndpoint(svc.Service, ip, port) 106 | podIP := ip + ":" + strconv.Itoa(int(port)) 107 | svc.Endpoints[podIP] = &EndpointNode{ 108 | Endpoint: dst, 109 | Visited: true, 110 | } 111 | log.Info("[kubeproxy] Add endpoint ", podIP, " service:", key) 112 | } 113 | 114 | func bindEndpoint(svc *libipvs.Service, ip string, port uint16) *libipvs.Destination { 115 | dst := libipvs.Destination{ 116 | Address: net.ParseIP(ip), 117 | AddressFamily: syscall.AF_INET, 118 | Port: port, 119 | } 120 | 121 | //print(svc.Address.String() + ":" + strconv.Itoa(int(svc.Port))) 122 | args := []string{"-a", "-t", svc.Address.String() + ":" + strconv.Itoa(int(svc.Port)), "-r", ip + ":" + strconv.Itoa(int(port)), "-m"} 123 | _, err := exec.Command("ipvsadm", args...).CombinedOutput() 124 | if err != nil { 125 | fmt.Println(err.Error()) 126 | } 127 | 128 | return &dst 129 | } 130 | 131 | func DeleteEndpoint(svcKey string, dstKey string) { 132 | if svc, ok := Services[svcKey]; ok { 133 | dst := svc.Endpoints[dstKey].Endpoint 134 | unbindEndpoint(svc.Service, dst) 135 | delete(svc.Endpoints, dstKey) 136 | } 137 | log.Info("[kubeproxy] Delete endpoint ", dstKey, " service:", svcKey) 138 | } 139 | 140 | func unbindEndpoint(svc *libipvs.Service, dst *libipvs.Destination) { 141 | if err := handler.DelDestination(svc, dst); err != nil { 142 | fmt.Println(err.Error()) 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /pkg/kubeproxy/ipvs/state.go: -------------------------------------------------------------------------------- 1 | package ipvs 2 | 3 | import "github.com/mqliang/libipvs" 4 | 5 | type ServiceNode struct { 6 | Service *libipvs.Service 7 | Endpoints map[string]*EndpointNode 8 | Visited bool 9 | } 10 | type EndpointNode struct { 11 | Endpoint *libipvs.Destination 12 | //endpoints []*libipvs.Destination 13 | Visited bool 14 | } 15 | 16 | var Services = make(map[string]*ServiceNode) 17 | 18 | //var Endpoints map[string]EndpointNode 19 | -------------------------------------------------------------------------------- /pkg/kubeproxy/proxy.go: -------------------------------------------------------------------------------- 1 | package kubeproxy 2 | 3 | /* 主要工作: 4 | 1. 监听service资源的创建。创建service 5 | 2. 监听service资源的删除。删除service 6 | 3. 监听endpoint的创建。设置dest规则。 7 | 4. 监听endpoint的删除。删除对应dest规则。 8 | */ 9 | 10 | import ( 11 | "minik8s/config" 12 | "minik8s/pkg/apiobject" 13 | "minik8s/pkg/kubeproxy/ipvs" 14 | "minik8s/utils" 15 | "strconv" 16 | ) 17 | 18 | func Run() { 19 | ipvs.Init() 20 | //ipvs.TestConfig() 21 | var p proxyServiceHandler 22 | var e proxyEndpointHandler 23 | go utils.Sync(p) 24 | go utils.Sync(e) 25 | utils.WaitForever() 26 | //fmt.Println("end") 27 | 28 | } 29 | 30 | /* ========== Start Service Handler ========== */ 31 | 32 | type proxyServiceHandler struct { 33 | } 34 | 35 | func (p proxyServiceHandler) HandleCreate(message []byte) { 36 | 37 | } 38 | 39 | func (p proxyServiceHandler) HandleDelete(message []byte) { 40 | svc := &apiobject.Service{} 41 | svc.UnMarshalJSON(message) 42 | 43 | for _, p := range svc.Spec.Ports { 44 | key := svc.Status.ClusterIP + ":" + strconv.Itoa(int(p.Port)) 45 | ipvs.DeleteService(key) 46 | } 47 | 48 | } 49 | 50 | func (p proxyServiceHandler) HandleUpdate(message []byte) { 51 | svc := &apiobject.Service{} 52 | svc.UnMarshalJSON(message) 53 | 54 | for _, p := range svc.Spec.Ports { 55 | ipvs.AddService(svc.Status.ClusterIP, uint16(p.Port)) 56 | } 57 | 58 | } 59 | 60 | func (p proxyServiceHandler) GetType() config.ObjType { 61 | return config.SERVICE 62 | } 63 | 64 | /* ========== Start Endpoint Handler ========== */ 65 | 66 | type proxyEndpointHandler struct { 67 | } 68 | 69 | func (e proxyEndpointHandler) HandleCreate(message []byte) { 70 | edpt := &apiobject.Endpoint{} 71 | edpt.UnMarshalJSON(message) 72 | 73 | key := edpt.Spec.SvcIP + ":" + strconv.Itoa(int(edpt.Spec.SvcPort)) 74 | ipvs.AddEndpoint(key, edpt.Spec.DestIP, uint16(edpt.Spec.DestPort)) 75 | } 76 | 77 | func (e proxyEndpointHandler) HandleDelete(message []byte) { 78 | edpt := &apiobject.Endpoint{} 79 | edpt.UnMarshalJSON(message) 80 | 81 | svcKey := edpt.Spec.SvcIP + ":" + strconv.Itoa(int(edpt.Spec.SvcPort)) 82 | dstKey := edpt.Spec.DestIP + ":" + strconv.Itoa(int(edpt.Spec.DestPort)) 83 | ipvs.DeleteEndpoint(svcKey, dstKey) 84 | } 85 | 86 | func (e proxyEndpointHandler) HandleUpdate(message []byte) { 87 | 88 | } 89 | 90 | func (e proxyEndpointHandler) GetType() config.ObjType { 91 | return config.ENDPOINT 92 | } 93 | 94 | //func HandleServiceChange(message string) { 95 | // // traverse the service list, add the new service 96 | // serviceList := gjson.Get(message, "").Array() 97 | // for _, svc := range serviceList { 98 | // clusterIP := gjson.Get(svc.String(), "spec.clusterIP").String() 99 | // ports := gjson.Get(svc.String(), "spec.ports").Array() 100 | // for _, port := range ports { 101 | // serviceIP := clusterIP + ":" + port.String() 102 | // if node, ok := ipvs.Services[serviceIP]; !ok { 103 | // ipvs.AddService(clusterIP, uint16(port.Int())) 104 | // } else { 105 | // node.Visited = true 106 | // } 107 | // } 108 | // } 109 | // 110 | // // traverse the service list in proxy, delete the service not visited 111 | // for k, node := range ipvs.Services { 112 | // if node.Visited == false { 113 | // ipvs.DeleteService(k, node) 114 | // } else { 115 | // node.Visited = false 116 | // } 117 | // } 118 | //} 119 | // 120 | //func HandleEndpointChange(message string) { 121 | // eptList := gjson.Get(message, "").Array() 122 | // for _, epts := range eptList { 123 | // clusterIP := gjson.Get(epts.String(), "clusterIP").String() 124 | // port := gjson.Get(epts.String(), "port").String() 125 | // serviceIP := clusterIP + port 126 | // if svc, ok := ipvs.Services[serviceIP]; ok { 127 | // dests := gjson.Get(epts.String(), "subsets").Array() 128 | // // traverse the endpoints list, add the new endpoints 129 | // for _, dest := range dests { 130 | // ip := gjson.Get(dest.String(), "IP").String() 131 | // port := gjson.Get(dest.String(), "Port").Int() 132 | // podIP := ip + ":" + string(port) 133 | // if edpNode, ok := svc.Endpoints[podIP]; !ok { 134 | // ipvs.AddEndpoint(svc, ip, uint16(port)) 135 | // } else { 136 | // edpNode.Visited = true 137 | // } 138 | // } 139 | // // traverse the endpoint list in proxy, delete the endpoint not visited 140 | // for k, node := range svc.Endpoints { 141 | // if node.Visited == false { 142 | // ipvs.DeleteEndpoint(svc, node.Endpoint, k) 143 | // } else { 144 | // node.Visited = false 145 | // } 146 | // } 147 | // 148 | // } 149 | // } 150 | // 151 | //} 152 | 153 | func syncRunner() { 154 | 155 | } 156 | -------------------------------------------------------------------------------- /pkg/kubeproxy/proxy_test.go: -------------------------------------------------------------------------------- 1 | package kubeproxy 2 | 3 | import ( 4 | "minik8s/pkg/kubeproxy/ipvs" 5 | "testing" 6 | ) 7 | 8 | func TestProxy(t *testing.T) { 9 | ipvs.Init() 10 | var e proxyEndpointHandler 11 | var s proxyServiceHandler 12 | /* test add service and add endpoint */ 13 | svcJson := "{\n \"apiVersion\": \"v1\",\n \"kind\": \"Service\",\n \"metadata\": {\n \"name\": \"service-practice\"\n },\n \"spec\": {\n \"selector\": {\n \"app\": \"deploy-1\"\n },\n \"type\": \"ClusterIP\",\n \"ports\": [\n {\n \"name\": \"service-port1\",\n \"protocol\": \"TCP\",\n \"port\": 8080,\n \"targetPort\": \"p1\"\n }\n ]\n },\n \"status\":{\n \"ClusterIP\":\"10.10.0.2\"\n }\n}" 14 | s.HandleUpdate([]byte(svcJson)) 15 | edptJson := "{\n \"metadata\": {\n \"name\": \"my-service\"\n },\n \"spec\": {\n \"svcIP\": \"10.10.0.2\",\n \"svcPort\": 8080,\n \"dstIP\": \"10.2.17.54\",\n \"dstPort\": 12345\n }\n}" 16 | edptJson2 := "{\n \"metadata\": {\n \"name\": \"my-service\"\n },\n \"spec\": {\n \"svcIP\": \"10.10.0.2\",\n \"svcPort\": 8080,\n \"dstIP\": \"10.2.18.54\",\n \"dstPort\": 12345\n }\n}" 17 | e.HandleCreate([]byte(edptJson)) 18 | e.HandleCreate([]byte(edptJson2)) 19 | 20 | if svc, ok := ipvs.Services["10.10.0.2:8080"]; !ok { 21 | t.Error("Add Service Fail") 22 | } else { 23 | if _, ok := svc.Endpoints["10.2.17.54:12345"]; !ok { 24 | t.Error("Add Endpoint Fail") 25 | } 26 | if _, ok := svc.Endpoints["10.2.18.54:12345"]; !ok { 27 | t.Error("Add Endpoint Fail") 28 | } 29 | } 30 | 31 | /* test delete endpoint */ 32 | e.HandleDelete([]byte(edptJson)) 33 | svc := ipvs.Services["10.10.0.2:8080"] 34 | if _, ok := svc.Endpoints["10.2.17.54:12345"]; ok { 35 | t.Error("Add Endpoint Fail") 36 | } 37 | if _, ok := svc.Endpoints["10.2.18.54:12345"]; !ok { 38 | t.Error("Add Endpoint Fail") 39 | } 40 | 41 | /* test delete service */ 42 | s.HandleDelete([]byte(svcJson)) 43 | if _, ok := ipvs.Services["10.10.0.2:8080"]; ok { 44 | t.Error("Add Endpoint Fail") 45 | } 46 | e.HandleDelete([]byte(edptJson2)) 47 | } 48 | -------------------------------------------------------------------------------- /pkg/kubescheduler/doc.go: -------------------------------------------------------------------------------- 1 | package kubescheduler 2 | -------------------------------------------------------------------------------- /pkg/kubescheduler/filter/configfilter_test.go: -------------------------------------------------------------------------------- 1 | package filter 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "minik8s/pkg/apiobject" 6 | "minik8s/pkg/kubescheduler/testutils" 7 | "testing" 8 | ) 9 | 10 | func TestConfigFilter_PreFilter(t *testing.T) { 11 | var pod *apiobject.Pod 12 | filter := NewConfigFilter() 13 | 14 | // test empty pod 15 | ret := filter.PreFilter(pod) 16 | if ret != false { 17 | t.Error("[TestConfigFilter_PreFilter] test empty pod fail") 18 | } 19 | 20 | // test illegal pod 21 | pod = testutils.CreateIllegalPod() 22 | ret = filter.PreFilter(pod) 23 | if ret != false { 24 | t.Error("[TestConfigFilter_PreFilter] test illegal pod fail") 25 | } 26 | 27 | // test legal pod 28 | pod = testutils.CreatePod() 29 | ret = filter.PreFilter(pod) 30 | if ret != true { 31 | t.Error("[TestConfigFilter_PreFilter] test legal pod fail") 32 | } 33 | } 34 | 35 | func TestConfigFilter_CheckNodeStatus(t *testing.T) { 36 | nodes := make([]*apiobject.Node, 0) 37 | node1 := testutils.CreateNode("test-node1", apiobject.Ready, "100m", "256Mi", "200m", "512Mi", "192.168.1.13") 38 | nodes = append(nodes, node1) 39 | 40 | node2 := testutils.CreateNode("test-node2", apiobject.NetworkUnavailable, "100m", "256Mi", "200m", "512Mi", "192.168.1.13") 41 | nodes = append(nodes, node2) 42 | 43 | node3 := testutils.CreateNode("test-node3", apiobject.Ready, "100m", "256Mi", "200m", "512Mi", "") 44 | nodes = append(nodes, node3) 45 | 46 | filter := NewConfigFilter() 47 | 48 | result := filter.CheckNodeStatus(nodes) 49 | if len(result) != 1 { 50 | t.Error("[TestConfigFilter_CheckNodeStatus] test fail") 51 | } 52 | } 53 | 54 | func TestConfigFilter_CheckNodeSelector(t *testing.T) { 55 | nodes := make([]*apiobject.Node, 0) 56 | node1 := testutils.CreateNode("test-node1", apiobject.Ready, "100m", "256Mi", "200m", "512Mi", "192.168.1.13") 57 | nodes = append(nodes, node1) 58 | 59 | node2 := testutils.CreateNode("test-node2", apiobject.Ready, "100m", "256Mi", "200m", "512Mi", "192.168.1.14") 60 | node2.Data.Labels = map[string]string{ 61 | "disktype": "ssd", 62 | } 63 | nodes = append(nodes, node2) 64 | 65 | filter := NewConfigFilter() 66 | 67 | pod := testutils.CreatePod() 68 | 69 | result := filter.CheckNodeSelector(pod, nodes) 70 | 71 | if len(result) != 1 { 72 | t.Error("[TestConfigFilter_CheckNodeSelector] test fail") 73 | } 74 | } 75 | 76 | func TestConfigFilter_GetResourceRequest(t *testing.T) { 77 | pod := testutils.CreatePod() 78 | filter := NewConfigFilter() 79 | 80 | cpu, memory := filter.GetResourceRequest(pod) 81 | 82 | log.Info("[TestConfigFilter_GetResourceRequest] cpu: ", cpu, " memory: ", memory) 83 | } 84 | 85 | func TestConfigFilter_CheckResource(t *testing.T) { 86 | nodes := make([]*apiobject.Node, 0) 87 | node1 := testutils.CreateNode("test-node1", apiobject.Ready, "100m", "256Mi", "200m", "512Mi", "192.168.119.132") 88 | nodes = append(nodes, node1) 89 | 90 | node2 := testutils.CreateNode("test-node2", apiobject.Ready, "150m", "256Mi", "300m", "345Mi", "192.168.119.128") 91 | nodes = append(nodes, node2) 92 | 93 | node3 := testutils.CreateNode("test-node3", apiobject.Ready, "10m", "10Mi", "20m", "20Mi", "192.168.119.134") 94 | nodes = append(nodes, node3) 95 | 96 | filter := NewConfigFilter() 97 | pod := testutils.CreatePod() 98 | 99 | cpuRequest, memoryRequest := filter.GetResourceRequest(pod) 100 | 101 | result := filter.CheckResource(cpuRequest, memoryRequest, nodes) 102 | if len(result) != 2 { 103 | t.Error("[TestConfigFilter_CheckResource] test fail") 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /pkg/kubescheduler/filter/templatefilter.go: -------------------------------------------------------------------------------- 1 | package filter 2 | 3 | import "minik8s/pkg/apiobject" 4 | 5 | // TemplateFilter is a interface for filter 6 | type TemplateFilter interface { 7 | // Filter give a list a node and a pod, return a list of node 8 | Filter(pod *apiobject.Pod, nodes []*apiobject.Node) []*apiobject.Node 9 | // PreFilter runs a set of functions against a pod. If any of the functions returns an error, the pod is rejected. 10 | PreFilter(pod *apiobject.Pod) bool 11 | } 12 | -------------------------------------------------------------------------------- /pkg/kubescheduler/policy/lrscheduler.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "minik8s/pkg/apiobject" 6 | "minik8s/pkg/kubescheduler/filter" 7 | ) 8 | 9 | // LeastRequestScheduler support select node that has least scheduled pods 10 | type LeastRequestScheduler struct { 11 | Filter filter.TemplateFilter 12 | frequency map[string]int 13 | } 14 | 15 | func NewLeastRequestScheduler(filter filter.TemplateFilter) *LeastRequestScheduler { 16 | frq := make(map[string]int) 17 | return &LeastRequestScheduler{ 18 | frequency: frq, 19 | Filter: filter, 20 | } 21 | } 22 | 23 | func (s *LeastRequestScheduler) Schedule(pod *apiobject.Pod, nodes []*apiobject.Node) []*apiobject.Node { 24 | // first precheck the pod 25 | ret := s.Filter.PreFilter(pod) 26 | if !ret { 27 | return nil 28 | } 29 | 30 | // then filter the nodes 31 | nodes = s.Filter.Filter(pod, nodes) 32 | 33 | // sort the node by their score 34 | length := len(nodes) 35 | scores := make([]float64, length) 36 | for i, node := range nodes { 37 | scores[i] = s.Score(node) 38 | log.Info("[Schedule] node ", node.Data.Name, " score: ", scores[i]) 39 | } 40 | 41 | // sort the node by their score, from low to high 42 | for i := 0; i < length; i++ { 43 | for j := i + 1; j < length; j++ { 44 | if scores[i] > scores[j] { 45 | scores[i], scores[j] = scores[j], scores[i] 46 | nodes[i], nodes[j] = nodes[j], nodes[i] 47 | } 48 | } 49 | } 50 | if length > 0 { 51 | s.frequency[nodes[0].Data.Name] += 1 52 | } 53 | return nodes 54 | } 55 | 56 | func (s *LeastRequestScheduler) Score(node *apiobject.Node) float64 { 57 | score, ok := s.frequency[node.Data.Name] 58 | if !ok { 59 | score = 0 60 | } 61 | return float64(score) 62 | } 63 | -------------------------------------------------------------------------------- /pkg/kubescheduler/policy/lrscheduler_test.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "minik8s/pkg/apiobject" 6 | "minik8s/pkg/kubescheduler/filter" 7 | "minik8s/pkg/kubescheduler/testutils" 8 | "testing" 9 | ) 10 | 11 | func TestResourceScheduler_Schedule(t *testing.T) { 12 | pod := testutils.CreatePod() 13 | 14 | // create nodes 15 | nodes := make([]*apiobject.Node, 0) 16 | 17 | node1 := testutils.CreateNode("test-node1", apiobject.Ready, "100m", "256Mi", "200m", "512Mi", "192.168.119.132") 18 | node1.Data.Labels = map[string]string{ 19 | "disktype": "ssd", 20 | } 21 | nodes = append(nodes, node1) 22 | 23 | node2 := testutils.CreateNode("test-node2", apiobject.Ready, "150m", "256Mi", "300m", "345Mi", "192.168.119.128") 24 | node2.Data.Labels = map[string]string{ 25 | "disktype": "ssd", 26 | } 27 | nodes = append(nodes, node2) 28 | 29 | node3 := testutils.CreateNode("test-node3", apiobject.Ready, "150m", "256Mi", "300m", "345Mi", "192.168.119.128") 30 | node3.Data.Labels = map[string]string{ 31 | "disktype": "ssd", 32 | } 33 | nodes = append(nodes, node3) 34 | 35 | // create scheduler 36 | concreteFilter := filter.NewConfigFilter() 37 | var f filter.TemplateFilter 38 | f = concreteFilter 39 | scheduler := NewLeastRequestScheduler(f) 40 | // schedule pod for three times 41 | for i := 0; i < 3; i++ { 42 | log.Info("[TestResourceScheduler_Schedule] schedule pod for ", i, " time[s]") 43 | selectedNode := scheduler.Schedule(pod, nodes) 44 | for _, n := range selectedNode { 45 | log.Info("[TestResourceScheduler_Schedule] the selected node is: ", n.Data.Name) 46 | } 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /pkg/kubescheduler/policy/resourcescheduler.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | "minik8s/pkg/apiobject" 5 | "minik8s/pkg/kubescheduler/filter" 6 | "minik8s/utils/resourceutils" 7 | "sort" 8 | 9 | log "github.com/sirupsen/logrus" 10 | ) 11 | 12 | type ResourceScheduler struct { 13 | Filter filter.TemplateFilter 14 | PodQueue map[string]*apiobject.Pod 15 | } 16 | 17 | func NewResourceScheduler(filter filter.TemplateFilter) *ResourceScheduler { 18 | newQueue := make(map[string]*apiobject.Pod) 19 | return &ResourceScheduler{ 20 | Filter: filter, 21 | PodQueue: newQueue, 22 | } 23 | } 24 | 25 | func (s ResourceScheduler) Schedule(pod *apiobject.Pod, nodes []*apiobject.Node) []*apiobject.Node { 26 | // first precheck the pod 27 | ret := s.Filter.PreFilter(pod) 28 | if !ret { 29 | return nil 30 | } 31 | 32 | // then filter the nodes 33 | nodes = s.Filter.Filter(pod, nodes) 34 | 35 | // sort the node by their score 36 | length := len(nodes) 37 | scores := make([]float64, length) 38 | for i, node := range nodes { 39 | scores[i] = s.Score(node) 40 | log.Info("[Schedule] node ", node.Data.Name, " score: ", scores[i]) 41 | } 42 | sort.Slice(nodes, func(i, j int) bool { 43 | return scores[i] > scores[j] 44 | }) 45 | return nodes 46 | } 47 | 48 | // Score is according the node's capacity 49 | func (s ResourceScheduler) Score(node *apiobject.Node) float64 { 50 | totalScore := 0.0 51 | if node.Status.Allocatable == nil { 52 | return totalScore 53 | } 54 | 55 | cpu, ok := node.Status.Allocatable["cpu"] 56 | cpuCap, capok := node.Status.Capability["cpu"] 57 | if ok && capok { 58 | cpuAvailable, _ := resourceutils.ParseQuantity(cpu) 59 | cpuCap, _ := resourceutils.ParseQuantity(cpuCap) 60 | totalScore += cpuAvailable / cpuCap 61 | } 62 | 63 | memory, ok := node.Status.Allocatable["memory"] 64 | memoryCap, capok := node.Status.Capability["memory"] 65 | if ok && capok { 66 | memoryAvailable, _ := resourceutils.ParseQuantity(memory) 67 | memoryCap, _ := resourceutils.ParseQuantity(memoryCap) 68 | totalScore += memoryAvailable / memoryCap 69 | } 70 | 71 | return totalScore 72 | } 73 | -------------------------------------------------------------------------------- /pkg/kubescheduler/policy/resourcescheduler_test.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | log "github.com/sirupsen/logrus" 7 | "minik8s/pkg/apiobject" 8 | "minik8s/pkg/kubescheduler/filter" 9 | "minik8s/pkg/kubescheduler/testutils" 10 | "testing" 11 | ) 12 | 13 | func TestConfigScheduler_Schedule(t *testing.T) { 14 | pod := testutils.CreatePod() 15 | 16 | // print the pod 17 | jsonBytes, _ := json.MarshalIndent(pod, "", " ") 18 | fmt.Println(string(jsonBytes)) 19 | 20 | nodes := make([]*apiobject.Node, 0) 21 | node1 := testutils.CreateNode("test-node1", apiobject.Ready, "100m", "256Mi", "200m", "512Mi", "192.168.119.132") 22 | node1.Data.Labels = map[string]string{ 23 | "disktype": "ssd", 24 | } 25 | 26 | nodes = append(nodes, node1) 27 | 28 | node2 := testutils.CreateNode("test-node2", apiobject.Ready, "150m", "256Mi", "300m", "345Mi", "192.168.119.128") 29 | node2.Data.Labels = map[string]string{ 30 | "disktype": "ssd", 31 | } 32 | nodes = append(nodes, node2) 33 | 34 | node3 := testutils.CreateNode("test-node3", apiobject.Ready, "10m", "10Mi", "20m", "20Mi", "192.168.119.134") 35 | nodes = append(nodes, node3) 36 | 37 | node4 := testutils.CreateNode("test-node4", apiobject.NetworkUnavailable, "100m", "256Mi", "200m", "512Mi", "192.168.1.13") 38 | nodes = append(nodes, node4) 39 | 40 | for _, node := range nodes { 41 | jsonBytes, _ := json.MarshalIndent(node, "", " ") 42 | fmt.Println(string(jsonBytes)) 43 | } 44 | 45 | concreteFilter := filter.NewConfigFilter() 46 | var f filter.TemplateFilter 47 | f = concreteFilter 48 | scheduler := NewResourceScheduler(f) 49 | selectedNodes := scheduler.Schedule(pod, nodes) 50 | if len(selectedNodes) != 2 { 51 | t.Errorf("expected 2 nodes, but got %d", len(selectedNodes)) 52 | } 53 | 54 | for _, node := range selectedNodes { 55 | log.Info("[TestConfigScheduler_Schedule] selected node: ", node.Data.Name) 56 | } 57 | 58 | } 59 | -------------------------------------------------------------------------------- /pkg/kubescheduler/policy/templatescheduler.go: -------------------------------------------------------------------------------- 1 | package policy 2 | 3 | import "minik8s/pkg/apiobject" 4 | 5 | // 需要的函数 6 | // Filter 7 | // - isNodeSuitable 8 | // Bind: Bind binds a pod to a node 9 | // Score: Score extensions implement scoring functions for Scheduler 10 | // priority: gets a priority function for the custom scheduler 11 | // PreFilter: PreFilter runs a set of functions against a pod. If any of the functions returns an error, the pod is rejected. 12 | 13 | type Scheduler interface { 14 | // Schedule schedules a pod on a node 15 | Schedule(pod *apiobject.Pod, nodes []*apiobject.Node) []*apiobject.Node 16 | 17 | // Score extensions implement scoring functions for Scheduler 18 | Score(node *apiobject.Node) float64 19 | 20 | // Bind(pod *apiobject.Pod, node *apiobject.Node) error // Bind binds a pod to a node, now in apiserver 21 | } 22 | -------------------------------------------------------------------------------- /pkg/kubescheduler/run.go: -------------------------------------------------------------------------------- 1 | package kubescheduler 2 | 3 | import ( 4 | "minik8s/config" 5 | "minik8s/pkg/apiobject" 6 | filter2 "minik8s/pkg/kubescheduler/filter" 7 | "minik8s/pkg/kubescheduler/policy" 8 | "minik8s/utils" 9 | "net/http" 10 | "time" 11 | 12 | "github.com/gorilla/websocket" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | type Config struct { 17 | Policy string 18 | } 19 | 20 | // generate the new pointer slice 21 | func toPointerSlice(slice []apiobject.Node) []*apiobject.Node { 22 | result := make([]*apiobject.Node, len(slice)) 23 | for i, v := range slice { 24 | // create a new pointer which pointer to the node 25 | node := v 26 | result[i] = &node 27 | } 28 | return result 29 | } 30 | 31 | func toValueSlice(slice []*apiobject.Node) []apiobject.Node { 32 | result := make([]apiobject.Node, len(slice)) 33 | for i, v := range slice { 34 | result[i] = *v 35 | } 36 | return result 37 | } 38 | 39 | func connect(scheduler policy.Scheduler) error { 40 | // create websocket connection 41 | headers := http.Header{} 42 | headers.Set("X-Source", "scheduler") 43 | 44 | dialer := websocket.Dialer{} 45 | conn, _, err := dialer.Dial("ws://"+config.ApiServerIp+"/api/v1/watch/podList", headers) 46 | if err != nil { 47 | log.Error("[Run] scheduler websocket connect fail") 48 | return err 49 | } 50 | defer conn.Close() 51 | 52 | // create http client for ask api server 53 | httpMethod := "GET" 54 | httpUrl := "http://" + config.ApiServerIp + "/api/v1/nodes" 55 | 56 | // keep reading from websocket 57 | for { 58 | _, message, err := conn.ReadMessage() 59 | 60 | if err != nil { 61 | log.Error("[Run] scheduler websocket read message fail") 62 | return err 63 | } 64 | 65 | if len(message) == 0 { 66 | continue 67 | } 68 | 69 | // parse message 70 | pod := &apiobject.Pod{} 71 | err = pod.UnMarshalJSON(message) 72 | if err != nil { 73 | log.Error("[Run] scheduler websocket unmarshal pod message fail") 74 | conn.WriteMessage(websocket.TextMessage, []byte{}) 75 | } 76 | if pod == nil { 77 | log.Error("[Run] scheduler websocket pod is nil") 78 | conn.WriteMessage(websocket.TextMessage, []byte{}) 79 | } 80 | 81 | // check whether pod is need to be scheduled 82 | if pod.Status.Phase != apiobject.Pending { 83 | log.Error("[Run] scheduler websocket pod is nil or pod is not pending, the pod phase is: ", pod.Status.Phase) 84 | conn.WriteMessage(websocket.TextMessage, []byte{}) 85 | } 86 | 87 | // get the nodes that pod will be scheduled to 88 | response, err := utils.SendRequest(httpMethod, []byte{}, httpUrl) 89 | if err != nil { 90 | log.Error("[Run] scheduler http request fail, the error message is: ", err) 91 | conn.WriteMessage(websocket.TextMessage, []byte{}) 92 | } 93 | 94 | // get the nodes 95 | node := &apiobject.Node{} 96 | nodes, err := node.UnMarshalJSONList([]byte(response)) 97 | if err != nil { 98 | log.Error("[Run] scheduler unmarshal nodes fail, the error message is: ", err) 99 | conn.WriteMessage(websocket.TextMessage, []byte{}) 100 | } 101 | 102 | // schedule pod 103 | nodeList := toPointerSlice(nodes) 104 | nodeForCandidates := scheduler.Schedule(pod, nodeList) 105 | nodeCandidates := toValueSlice(nodeForCandidates) 106 | 107 | // marshal the nodes that pod will be scheduled to and send to api server 108 | jsonBytes, err := apiobject.MarshalJSONList(nodeCandidates) 109 | if err != nil { 110 | log.Error("[Run] scheduler marshal nodes fail, the error message is: ", err) 111 | } else { 112 | log.Info("[Run] the node candidate count is: ", len(nodeCandidates)) 113 | if len(nodeCandidates) > 0 { 114 | log.Info("[Run] the node candidate is: ", nodeCandidates[0].Data.Name) 115 | } 116 | } 117 | conn.WriteMessage(websocket.TextMessage, jsonBytes) 118 | } 119 | } 120 | 121 | func Run(c Config) { 122 | // init scheduler and filter 123 | policyName := c.Policy 124 | var filter filter2.TemplateFilter 125 | concreteFilter := filter2.ConfigFilter{Name: "ConfigFilter"} 126 | filter = concreteFilter 127 | var scheduler policy.Scheduler 128 | 129 | if policyName == "default" || policyName == "frequency" { 130 | concreteScheduler := policy.NewLeastRequestScheduler(filter) 131 | scheduler = concreteScheduler 132 | } else if policyName == "resource" { 133 | concreteScheduler := policy.NewResourceScheduler(filter) 134 | scheduler = concreteScheduler 135 | } 136 | 137 | for { 138 | err := connect(scheduler) 139 | if err != nil { 140 | log.Error("[Run] scheduler connect fail, the error message is: ", err) 141 | } 142 | time.Sleep(5 * time.Second) 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /pkg/kubescheduler/testutils/builder.go: -------------------------------------------------------------------------------- 1 | package testutils 2 | 3 | import "minik8s/pkg/apiobject" 4 | 5 | func CreatePod() *apiobject.Pod { 6 | return &apiobject.Pod{ 7 | APIVersion: "v1", 8 | Data: apiobject.MetaData{ 9 | Name: "test-pod", 10 | Namespace: "default", 11 | }, 12 | Spec: apiobject.PodSpec{ 13 | NodeSelector: map[string]string{ 14 | "disktype": "ssd", 15 | }, 16 | Containers: []apiobject.Container{ 17 | { 18 | Name: "test-container", 19 | Image: "nginx", 20 | Resources: apiobject.Resources{ 21 | Limits: apiobject.Limit{ 22 | Cpu: "200m", 23 | Memory: "512Mi", 24 | }, 25 | Requests: apiobject.Request{ 26 | Cpu: "100m", 27 | Memory: "256Mi", 28 | }, 29 | }, 30 | }, 31 | }, 32 | }, 33 | Status: apiobject.PodStatus{ 34 | Phase: apiobject.Pending, 35 | HostIp: "", 36 | PodIp: "", 37 | }, 38 | } 39 | } 40 | 41 | func CreateIllegalPod() *apiobject.Pod { 42 | return &apiobject.Pod{ 43 | APIVersion: "v1", 44 | Data: apiobject.MetaData{ 45 | Name: "test-pod", 46 | Namespace: "default", 47 | }, 48 | Spec: apiobject.PodSpec{ 49 | NodeSelector: map[string]string{ 50 | "disktype": "ssd", 51 | }, 52 | Containers: []apiobject.Container{ 53 | { 54 | Name: "test-container", 55 | Image: "nginx", 56 | Resources: apiobject.Resources{ 57 | Limits: apiobject.Limit{ 58 | Cpu: "200m", 59 | Memory: "512Mi", 60 | }, 61 | Requests: apiobject.Request{ 62 | Cpu: "100m", 63 | Memory: "256Mi", 64 | }, 65 | }, 66 | }, 67 | }, 68 | }, 69 | Status: apiobject.PodStatus{ 70 | Phase: apiobject.Scheduled, 71 | HostIp: "", 72 | PodIp: "", 73 | }, 74 | } 75 | } 76 | 77 | // create node for test use 78 | func CreateNode(name string, status apiobject.NodeStatusTag, cpuMin string, memoryMin string, cpuMax string, memoryMax string, ip string) *apiobject.Node { 79 | return &apiobject.Node{ 80 | APIVersion: "v1", 81 | Data: apiobject.MetaData{ 82 | Name: name, 83 | Namespace: "default", 84 | }, 85 | Spec: apiobject.NodeSpec{ 86 | Unschedulable: false, 87 | PodCIDR: "10.100.10.14/24", 88 | }, 89 | Status: apiobject.NodeStatus{ 90 | Capability: map[string]string{ 91 | "cpu": cpuMax, 92 | "memory": memoryMax, 93 | }, 94 | Allocatable: map[string]string{ 95 | "cpu": cpuMin, 96 | "memory": memoryMin, 97 | }, 98 | Conditions: []apiobject.Condition{ 99 | { 100 | Status: status, 101 | }, 102 | }, 103 | Addresses: []apiobject.Address{ 104 | { 105 | Type: "InternalIP", 106 | Address: ip, 107 | }, 108 | }, 109 | }, 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /pkg/serverless/activator/deploy_test.go: -------------------------------------------------------------------------------- 1 | package activator 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | ) 7 | 8 | func generateImage(name string) string { 9 | return serverIp + ":5000/" + name + ":latest" 10 | } 11 | 12 | func TestGenerateReplicaSet(t *testing.T) { 13 | replica := GenerateReplicaSet("test", "serverless", generateImage("test"), 0) 14 | if replica.Data.Name != "test" { 15 | t.Errorf("GenerateReplicaSet failed, expected %s, got %s", "test", replica.Data.Name) 16 | } 17 | if replica.Data.Namespace != "serverless" { 18 | t.Errorf("GenerateReplicaSet failed, expected %s, got %s", "serverless", replica.Data.Namespace) 19 | } 20 | if replica.Spec.Replicas != 0 { 21 | t.Errorf("GenerateReplicaSet failed, expected %d, got %d", 0, replica.Spec.Replicas) 22 | } 23 | 24 | // print the replicaSet 25 | replicaJson, err := json.MarshalIndent(replica, "", " ") 26 | if err != nil { 27 | t.Errorf("GenerateReplicaSet failed, error marshalling replicas: %s", err) 28 | } 29 | 30 | t.Logf("replicaSet: %s", replicaJson) 31 | } 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /pkg/serverless/autoscaler/metric.go: -------------------------------------------------------------------------------- 1 | package autoscaler 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "minik8s/pkg/apiobject" 6 | "minik8s/config" 7 | "minik8s/utils" 8 | "time" 9 | ) 10 | 11 | // query the pod ips of the replicaSet 12 | func QueryPodIps() (map[string][]string, error) { 13 | // find all the pods of the replicaSet 14 | podUrl := "http://" + config.ApiServerIp + "/api/v1/namespaces/serverless/pods" 15 | response, err := utils.SendRequest("GET", nil, podUrl) 16 | if err != nil { 17 | log.Error("[QueryPodIps] error getting pods: ", err) 18 | return nil, err 19 | } 20 | 21 | result := make(map[string][]string) 22 | podTool := &apiobject.Pod{} 23 | podList, error := podTool.UnMarshalJsonList([]byte(response)) 24 | if error != nil { 25 | log.Error("[QueryPodIps] error unmarshalling pods: ", error) 26 | return nil, error 27 | } 28 | 29 | for _, pod := range podList { 30 | pos, ok := result[pod.Data.Name] 31 | if !ok { 32 | pos = make([]string, 0) 33 | } 34 | pos = append(pos, pod.Status.PodIp) 35 | result[pod.Data.Name] = pos 36 | } 37 | return result, nil 38 | } 39 | 40 | 41 | // PeriodicMetric check the invoke frequency periodically, delete the function if it is not invoked for a long time 42 | func PeriodicMetric(timeInterval int) { 43 | for { 44 | // get all replicas 45 | replicaUrl := "http://" + config.ApiServerIp + "/api/v1/namespaces/serverless/replicas" 46 | response, err := utils.SendRequest("GET", nil, replicaUrl) 47 | if err != nil { 48 | log.Error("[PeriodicMetric] error getting replicas: ", err) 49 | continue 50 | } 51 | 52 | var replicaTool = &apiobject.ReplicationController{} 53 | replicaList, err := replicaTool.UnMarshalJSONList([]byte(response)) 54 | if err != nil { 55 | log.Error("[PeriodicMetric] error unmarshalling replicas: ", err) 56 | continue 57 | } 58 | 59 | // get all replicaSet's pod ips 60 | if err != nil { 61 | log.Error("[PeriodicMetric] error querying pod ips: ", err) 62 | continue 63 | } 64 | 65 | // update the replicas information 66 | for _, replica := range replicaList { 67 | // get the according record in map 68 | RecordMutex.RLock() 69 | record := GetRecord(replica.Data.Name) 70 | RecordMutex.RUnlock() 71 | if record == nil { 72 | record = &Record{ 73 | Name: replica.Data.Name, 74 | Replicas: replica.Status.Replicas, 75 | PodIps: make(map[string]int32), 76 | CallCount: 0, 77 | } 78 | RecordMutex.Lock() 79 | SetRecord(replica.Data.Name, record) 80 | RecordMutex.Unlock() 81 | } else { 82 | // if the call times is 0, scale to zero 83 | // scale according to the call times 84 | replica.Status.Scale = record.CallCount 85 | 86 | // update the replicaset 87 | if replica.Status.Scale != replica.Status.Replicas { 88 | replicaUrl := "http://" + config.ApiServerIp + "/api/v1/namespaces/serverless/replicas/" + replica.Data.Name + "/update" 89 | replicaJson, err := replica.MarshalJSON() 90 | if err != nil { 91 | log.Error("[PeriodicMetric] error marshalling replicas: ", err) 92 | continue 93 | } 94 | _, err = utils.SendRequest("POST", replicaJson, replicaUrl) 95 | log.Info("[PeriodicMetric] scale replicas to ", replica.Status.Scale, " for ", replica.Data.Name) 96 | if err != nil { 97 | log.Error("[PeriodicMetric] error updating replicas: ", err) 98 | continue 99 | } 100 | } 101 | 102 | // the replica count in expection 103 | record.Replicas = record.CallCount 104 | record.CallCount = 0 105 | 106 | RecordMutex.Lock() 107 | SetRecord(replica.Data.Name, record) 108 | RecordMutex.Unlock() 109 | } 110 | } 111 | 112 | time.Sleep(time.Duration(timeInterval) * time.Second) 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /pkg/serverless/autoscaler/record.go: -------------------------------------------------------------------------------- 1 | package autoscaler 2 | 3 | import "sync" 4 | 5 | type Record struct { 6 | // Name is the name of the function 7 | Name string `json:"name"` 8 | // the current replica number of the function 9 | Replicas int32 `json:"replicas"` 10 | // the podIps that the function has deployed on 11 | PodIps map[string]int32 `json:"podIps"` 12 | // the call count of the function 13 | CallCount int32 `json:"callCount"` 14 | } 15 | 16 | 17 | 18 | var ( 19 | RecordMap = make(map[string]*Record) 20 | RecordMutex sync.RWMutex // protect the access of RecordMap 21 | ) 22 | 23 | 24 | func GetRecord(name string) *Record { 25 | return RecordMap[name] 26 | } 27 | 28 | func SetRecord(name string, record *Record) { 29 | RecordMap[name] = record 30 | } 31 | 32 | func DeleteRecord(name string) { 33 | delete(RecordMap, name) 34 | } 35 | 36 | 37 | func UpdateRecord(name string) { 38 | record := GetRecord(name) 39 | if record == nil { 40 | return 41 | } 42 | record.CallCount++ 43 | SetRecord(name, record) 44 | } 45 | 46 | 47 | -------------------------------------------------------------------------------- /pkg/serverless/eventfilter/workflowwatcher.go: -------------------------------------------------------------------------------- 1 | package eventfilter 2 | 3 | import ( 4 | "fmt" 5 | "minik8s/config" 6 | "minik8s/pkg/apiobject" 7 | "minik8s/pkg/serverless/workflow" 8 | "net/http" 9 | "time" 10 | 11 | "github.com/gorilla/websocket" 12 | log "github.com/sirupsen/logrus" 13 | "github.com/tidwall/gjson" 14 | ) 15 | 16 | func WorkFlowSync(target string) { 17 | for { 18 | err := workflowConnect(target) 19 | if err != nil { 20 | log.Error("[WorkFlowSync] WebSocket connect fail: ", err) 21 | } 22 | time.Sleep(5 * time.Second) // wait 5 seconds to reconnect 23 | } 24 | } 25 | 26 | 27 | func workflowConnect(target string) error { 28 | // establish WebSocket connection 29 | url := fmt.Sprintf("ws://%s/api/v1/watch/%s", config.ApiServerIp, target) 30 | headers := http.Header{} 31 | headers.Set("X-Source", "workflows") 32 | conn, _, err := websocket.DefaultDialer.Dial(url, headers) 33 | if err != nil { 34 | fmt.Println("WebSocket connect fail", err) 35 | return err 36 | } else { 37 | fmt.Println("WebSocket connect ") 38 | } 39 | defer conn.Close() 40 | 41 | // continue to receive messages and process 42 | for { 43 | _, message, err := conn.ReadMessage() 44 | if err != nil { 45 | fmt.Println("read from websocket fail: ", err) 46 | return err 47 | } 48 | if len(message) == 0 { 49 | continue 50 | } 51 | fmt.Printf("[client %s] %s\n", target, message) 52 | 53 | workFlow := gjson.Get(string(message), "workflow") 54 | if !workFlow.Exists() { 55 | conn.WriteMessage(websocket.TextMessage, []byte("execute: " + "the workFlow is not exist")) 56 | } 57 | workFlowStr := workFlow.String() 58 | log.Info("[WorkFlowSync] workFlow: ", workFlowStr) 59 | 60 | params := gjson.Get(string(message), "params") 61 | if !params.Exists() { 62 | conn.WriteMessage(websocket.TextMessage, []byte("execute: " + "the params is not exist")) 63 | } 64 | paramsStr := params.String() 65 | 66 | go WorkFlowTriggerHandler([]byte(workFlowStr), []byte(paramsStr), conn) 67 | // WorkFlowTriggerHandler([]byte(workFlowStr), []byte(paramsStr), conn) 68 | } 69 | } 70 | 71 | func WorkFlowTriggerHandler(workFlow []byte, paramsStr []byte, conn *websocket.Conn) { 72 | // parse the workFlow 73 | currentWorkFlow := &apiobject.WorkFlow{} 74 | err := currentWorkFlow.UnMarshalJSON(workFlow) 75 | if err != nil { 76 | log.Error("[WorkFlowTriggerHandler] unmarshal workFlow error: ", err) 77 | conn.WriteMessage(websocket.TextMessage, []byte("execute: " + "unmarshal workFlow error")) 78 | } 79 | result, err := workflow.ExecuteWorkFlow(currentWorkFlow, paramsStr) 80 | if err != nil { 81 | log.Error("[WorkFlowTriggerHandler] execute workFlow error: ", err) 82 | conn.WriteMessage(websocket.TextMessage, []byte("execute: " + err.Error())) 83 | } 84 | conn.WriteMessage(websocket.TextMessage, []byte("execute: " + string(result))) 85 | } 86 | 87 | -------------------------------------------------------------------------------- /pkg/serverless/function/image.go: -------------------------------------------------------------------------------- 1 | package function 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "io" 6 | "os" 7 | "os/exec" 8 | "strings" 9 | ) 10 | 11 | const serverIp = "localhost" 12 | 13 | // CreateImage to create image for function 14 | func CreateImage(path string, name string) error { 15 | // 1. create the image 16 | // 1.1 copy the target file to the func.py 17 | srcFile, err := os.Open(path) 18 | if err != nil { 19 | log.Error("[CreateImage] open src file error: ", err) 20 | return err 21 | } 22 | defer srcFile.Close() 23 | 24 | dstFile, err := os.OpenFile("/home/mini-k8s/pkg/serverless/imagedata/func.py", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) 25 | if err != nil { 26 | log.Error("[CreateImage] open dst file error: ", err) 27 | return err 28 | } 29 | defer dstFile.Close() 30 | 31 | _, err = io.Copy(dstFile, srcFile) 32 | if err != nil { 33 | log.Error("[CreateImage] copy file error: ", err) 34 | return err 35 | } 36 | 37 | // 1.2 create the image 38 | cmd := exec.Command("docker", "build", "-t", name, "/home/mini-k8s/pkg/serverless/imagedata/") 39 | err = cmd.Run() 40 | if err != nil { 41 | log.Error("[CreateImage] create image error: ", err) 42 | return err 43 | } 44 | 45 | cmd = exec.Command("docker", "tag", name, serverIp+":5000/"+name+":latest") 46 | err = cmd.Run() 47 | if err != nil { 48 | log.Error("[CreateImage] tag image error: ", err) 49 | return err 50 | } 51 | 52 | // 2. save the image to the registry 53 | err = SaveImage(name) 54 | if err != nil { 55 | log.Error("[CreateImage] save image error: ", err) 56 | return err 57 | } 58 | 59 | return nil 60 | } 61 | 62 | // save the image to the registry 63 | func SaveImage(name string) error { 64 | // 1. tag the image 65 | imageName := serverIp + ":5000/" + name + ":latest" 66 | 67 | // 2. push the image into the registry 68 | cmd := exec.Command("docker", "push", imageName) 69 | err := cmd.Run() 70 | if err != nil { 71 | log.Error("[SaveImage] push image error: ", err) 72 | return err 73 | } 74 | 75 | return nil 76 | } 77 | 78 | 79 | // find the image 80 | func FindImage(name string) bool { 81 | cmd := exec.Command("docker", "images", name) 82 | 83 | // check the output 84 | output, err := cmd.CombinedOutput() 85 | if err != nil { 86 | log.Error("[FindImage] get output error: ", err) 87 | return false 88 | } 89 | 90 | result := strings.TrimSpace(string(output)) 91 | log.Info("[FindImage] the result is: ", result) 92 | 93 | if strings.Contains(result, name) { 94 | return true 95 | } else { 96 | return false 97 | } 98 | } 99 | 100 | 101 | // DeleteImage to delete image for function 102 | func DeleteImage(name string) error { 103 | // if the image not exist, just ignore 104 | imageName := serverIp + ":5000/" + name + ":latest" 105 | if FindImage(imageName) { 106 | cmd := exec.Command("docker", "rmi", imageName) 107 | err := cmd.Run() 108 | if err != nil { 109 | log.Error("[DeleteImage] delete first image error: ", err) 110 | return err 111 | } 112 | } 113 | 114 | if FindImage(name) { 115 | cmd := exec.Command("docker", "rmi", name + ":latest") 116 | err := cmd.Run() 117 | if err != nil { 118 | log.Error("[DeleteImage] delete second image error: ", err) 119 | return err 120 | } 121 | } 122 | 123 | log.Info("[DeleteImage] delete image finished") 124 | return nil 125 | } 126 | 127 | // RunImage to run image for function 128 | func RunImage(name string) error { 129 | // 1. run the image 130 | cmd := exec.Command("docker", "run", "-d", "--name", name, "localhost:5000/"+name+":latest") 131 | err := cmd.Run() 132 | if err != nil { 133 | log.Error("[RunImage] run image error: ", err) 134 | return err 135 | } 136 | return nil 137 | } 138 | -------------------------------------------------------------------------------- /pkg/serverless/function/image_test.go: -------------------------------------------------------------------------------- 1 | package function 2 | 3 | import ( 4 | "os/exec" 5 | "strings" 6 | "testing" 7 | ) 8 | 9 | func TestCreateImage(t *testing.T) { 10 | err := CreateImage("/home/mini-k8s/example/serverless/singlefunc.py", "test") 11 | if err != nil { 12 | t.Errorf("CreateImage failed, error: %s", err) 13 | } 14 | } 15 | 16 | func TestDeleteImage(t *testing.T) { 17 | err := DeleteImage("test") 18 | if err != nil { 19 | t.Errorf("DeleteImage failed, error: %s", err) 20 | } 21 | 22 | // search the image 23 | cmd := exec.Command("docker", "images") 24 | out, err := cmd.Output() 25 | if err != nil { 26 | t.Errorf("DeleteImage failed, error: %s", err) 27 | } 28 | 29 | outputStr := string(out) 30 | imageName := "localhost:5000/test:latest" 31 | if strings.Contains(outputStr, imageName) { 32 | t.Errorf("DeleteImage failed, image %s still exists", imageName) 33 | } 34 | } 35 | 36 | 37 | -------------------------------------------------------------------------------- /pkg/serverless/function/registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # define the serverIp 4 | serverIp="192.168.1.13" 5 | 6 | # if not have registry, then pull from docker hub 7 | if ! docker images | grep -q "registry"; then 8 | docker pull registry 9 | fi 10 | 11 | # if the registry container is not running, then start it 12 | if ! docker ps | grep -q "registry"; then 13 | docker run -d -p 5000:5000 --restart=always --name registry registry 14 | fi 15 | 16 | # if the current ip not equal to the server ip, then mark the server ip as trusted 17 | if [ "$(hostname -I | awk '{print $1}')" != "$serverIp" ]; then 18 | # check whether the file exist 19 | if [ ! -f /etc/docker/daemon.json ]; then 20 | touch /etc/docker/daemon.json 21 | fi 22 | 23 | # add http 24 | echo '{ 25 | "insecure-registries": ["'"$serverIp:5000"'"] 26 | }' > /etc/docker/daemon.json 27 | 28 | systemctl restart docker 29 | fi 30 | 31 | 32 | -------------------------------------------------------------------------------- /pkg/serverless/imagedata/Dockerfile: -------------------------------------------------------------------------------- 1 | # use basic image 2 | # refer from zhihu, it is the fastest way to build image 3 | # FROM ubuntu:20.04 4 | # RUN apt-get update && apt-get install -y python3 python3-pip 5 | FROM python:3.9-slim 6 | 7 | COPY . . 8 | # set workdir 9 | WORKDIR /app 10 | 11 | EXPOSE 8081 12 | 13 | # copy file into workdir 14 | copy . . 15 | # install dependencies from requirements.txt 16 | RUN pip3 install -i https://pypi.tuna.tsinghua.edu.cn/simple --default-timeout=60 --no-cache-dir -r ./requirements.txt 17 | 18 | # set env 19 | ENV PATH="/usr/local/bin:${PATH}" 20 | 21 | # run server.py, start the function server 22 | CMD ["python3", "server.py"] -------------------------------------------------------------------------------- /pkg/serverless/imagedata/__pycache__/func.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IPADSIntern-MiniK8s/MiniK8s/610a8a0a09d3988fecdc60e0cdf69d3030cb9d82/pkg/serverless/imagedata/__pycache__/func.cpython-38.pyc -------------------------------------------------------------------------------- /pkg/serverless/imagedata/func.py: -------------------------------------------------------------------------------- 1 | def run(x, y): 2 | z = x + y 3 | x = x - y 4 | y = y - x 5 | print(z) 6 | return x, y, z -------------------------------------------------------------------------------- /pkg/serverless/imagedata/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==2.0.2 2 | -------------------------------------------------------------------------------- /pkg/serverless/imagedata/server.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, request, Response 2 | import func 3 | 4 | app = Flask(__name__) 5 | 6 | 7 | @app.route('/', methods=['POST']) 8 | def handle_request(): 9 | # `params` is a dict 10 | params = request.json 11 | headers = {'Content-Type': 'text/plain'} 12 | try: 13 | result = func.run(**params) 14 | response = Response(str(result), headers=headers, status=200) 15 | return response 16 | except TypeError as e: 17 | response = Response("", headers=headers, status=200) 18 | return response 19 | except Exception as e: 20 | response = Response(str(e), headers=headers, status=500) 21 | return str(e) 22 | 23 | if __name__ == '__main__': 24 | app.run(host="0.0.0.0", port=8081, debug=True) 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /pkg/serverless/run.go: -------------------------------------------------------------------------------- 1 | package serverless 2 | 3 | import ( 4 | "minik8s/utils" 5 | "minik8s/pkg/serverless/autoscaler" 6 | "minik8s/pkg/serverless/eventfilter" 7 | ) 8 | 9 | func Run() { 10 | go autoscaler.PeriodicMetric(30) 11 | go eventfilter.FunctionSync("functions") 12 | go eventfilter.WorkFlowSync("workflowexecutors") 13 | utils.WaitForever() 14 | } 15 | -------------------------------------------------------------------------------- /utils/client.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "minik8s/config" 6 | "minik8s/pkg/apiobject" 7 | "time" 8 | 9 | "github.com/gorilla/websocket" 10 | log "github.com/sirupsen/logrus" 11 | "github.com/tidwall/gjson" 12 | ) 13 | 14 | type SyncFunc interface { 15 | GetType() config.ObjType 16 | HandleCreate(message []byte) 17 | HandleDelete(message []byte) 18 | HandleUpdate(message []byte) 19 | } 20 | 21 | func Sync(syncFunc SyncFunc) { 22 | 23 | url := fmt.Sprintf("ws://%s/api/v1/watch/%s", config.ApiServerIp, syncFunc.GetType()) 24 | for { 25 | err := connect(url, syncFunc) 26 | if err != nil { 27 | fmt.Println("WebSocket连接错误:", err) 28 | } 29 | 30 | fmt.Println("连接中断,等待重新连接...") 31 | time.Sleep(5 * time.Second) // 等待5秒后进行重连 32 | } 33 | 34 | } 35 | 36 | func connect(url string, syncFunc SyncFunc) error { 37 | // 建立WebSocket连接 38 | conn, _, err := websocket.DefaultDialer.Dial(url, nil) 39 | if err != nil { 40 | fmt.Println("WebSocket连接失败:", err) 41 | return err 42 | } else { 43 | fmt.Println("WebSocket连接成功") 44 | } 45 | 46 | defer conn.Close() 47 | 48 | // 不断地接收消息并处理 49 | for { 50 | _, message, err := conn.ReadMessage() 51 | if err != nil { 52 | fmt.Println("读取消息失败:", err) 53 | return err 54 | } 55 | if len(message) == 0 { 56 | continue 57 | } 58 | // fmt.Printf("[client %s] %s\n", syncFunc.GetType(), message) 59 | 60 | op := gjson.Get(string(message), "metadata.resourcesVersion") 61 | switch op.String() { 62 | case "create": 63 | { 64 | go syncFunc.HandleCreate(message) 65 | } 66 | case "delete": 67 | { 68 | go syncFunc.HandleDelete(message) 69 | } 70 | case "update": 71 | { 72 | go syncFunc.HandleUpdate(message) 73 | } 74 | } 75 | 76 | } 77 | } 78 | 79 | func CreateObject(obj apiobject.Object, ty config.ObjType, ns string) { 80 | if ns == "" { 81 | ns = "default" 82 | } 83 | res, _ := obj.MarshalJSON() 84 | // log.Info("[create obj]", string(res)) 85 | //POST /api/v1/namespaces/{namespace}/{resource}" 86 | url := fmt.Sprintf("http://%s/api/v1/namespaces/%s/%s", config.ApiServerIp, ns, ty) 87 | if info, err := SendRequest("POST", res, url); err != nil { 88 | log.Error("create object ", info) 89 | } 90 | } 91 | 92 | func UpdateObject(obj apiobject.Object, ty config.ObjType, ns string, name string) { 93 | if ns == "" { 94 | ns = "default" 95 | } 96 | res, _ := obj.MarshalJSON() 97 | // log.Info("[update obj]", string(res)) 98 | //POST /api/v1/namespaces/{namespace}/{resource}/{name}/update" 99 | url := fmt.Sprintf("http://%s/api/v1/namespaces/%s/%s/%s/update", config.ApiServerIp, ns, ty, name) 100 | if info, err := SendRequest("POST", res, url); err != nil { 101 | log.Error("update object ", info) 102 | } 103 | } 104 | 105 | func UpdateObjectStatus(obj apiobject.Object, ty config.ObjType, ns string, name string) { 106 | if ns == "" { 107 | ns = "default" 108 | } 109 | res, _ := obj.MarshalJSON() 110 | // log.Info("[update obj status]", string(res)) 111 | //POST /api/v1/namespaces/{namespace}/{resource}/{name}/status" 112 | url := fmt.Sprintf("http://%s/api/v1/namespaces/%s/%s/%s/status", config.ApiServerIp, ns, ty, name) 113 | if info, err := SendRequest("POST", res, url); err != nil { 114 | log.Error("update object ", info) 115 | } 116 | } 117 | 118 | func DeleteObject(ty config.ObjType, ns string, name string) { 119 | if ns == "" { 120 | ns = "default" 121 | } 122 | // log.Info("[delete obj]", name) 123 | //DELETE /api/v1/namespaces/{namespace}/{resource}" 124 | url := fmt.Sprintf("http://%s/api/v1/namespaces/%s/%s/%s", config.ApiServerIp, ns, ty, name) 125 | if info, err := SendRequest("DELETE", nil, url); err != nil { 126 | log.Error("delete object ", info) 127 | } 128 | } 129 | 130 | func GetObject(ty config.ObjType, ns string, name string) string { 131 | if ns == "" { 132 | ns = "default" 133 | } 134 | // log.Info("[get obj]", name) 135 | //GET /api/v1/pods 136 | var url string 137 | if ns != "nil" { 138 | if name == "" { 139 | url = fmt.Sprintf("http://%s/api/v1/namespaces/%s/%s", config.ApiServerIp, ns, ty) 140 | } else { 141 | url = fmt.Sprintf("http://%s/api/v1/namespaces/%s/%s/%s", config.ApiServerIp, ns, ty, name) 142 | } 143 | } else { 144 | if name == "" { 145 | url = fmt.Sprintf("http://%s/api/v1/%s", config.ApiServerIp, ty) 146 | } else { 147 | url = fmt.Sprintf("http://%s/api/v1/%s/%s", config.ApiServerIp, ty, name) 148 | } 149 | } 150 | 151 | var str []byte 152 | if info, err := SendRequest("GET", str, url); err != nil { 153 | log.Error("[get obj]", info) 154 | return "" 155 | } else { 156 | return info 157 | } 158 | } 159 | 160 | 161 | -------------------------------------------------------------------------------- /utils/http.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "net/http" 7 | 8 | log "github.com/sirupsen/logrus" 9 | ) 10 | 11 | func SendJsonObject(method string, jsonObject []byte, url string) bool { 12 | request, err := http.NewRequest(method, url, bytes.NewBuffer(jsonObject)) 13 | 14 | if err != nil { 15 | log.Error(err) 16 | return false 17 | } 18 | 19 | request.Header.Set("content-type", "application/json") 20 | //request.Header.Set("accept", "application/json, text/plain, */*") 21 | 22 | client := &http.Client{} 23 | resp, err := client.Do(request) 24 | if err != nil { 25 | //log.Fatal("client.Do err:") 26 | log.Error(err) 27 | return false 28 | } 29 | body := &bytes.Buffer{} 30 | _, err = body.ReadFrom(resp.Body) 31 | if err != nil { 32 | log.Error(err) 33 | return false 34 | } 35 | resp.Body.Close() 36 | //fmt.Println(resp.StatusCode) 37 | if resp.StatusCode != http.StatusOK { 38 | fmt.Println(body) 39 | } 40 | return true 41 | } 42 | 43 | func SendRequest(method string, str []byte, url string) (string, error) { 44 | request, err := http.NewRequest(method, url, bytes.NewBuffer(str)) 45 | if err != nil { 46 | return "", err 47 | } 48 | request.Header.Set("content-type", "application/json") 49 | 50 | client := &http.Client{} 51 | resp, err := client.Do(request) 52 | 53 | body := &bytes.Buffer{} 54 | if err != nil { 55 | log.Error(err) 56 | } else { 57 | _, err := body.ReadFrom(resp.Body) 58 | if err != nil { 59 | log.Error(err) 60 | } 61 | resp.Body.Close() 62 | //fmt.Println(resp.StatusCode) 63 | } 64 | return body.String(), err 65 | 66 | } 67 | 68 | func SendRequestWithHb(method string, str []byte, url string, source string) (string, error) { 69 | request, err := http.NewRequest(method, url, bytes.NewBuffer(str)) 70 | if err != nil { 71 | return "", err 72 | } 73 | request.Header.Set("content-type", "application/json") 74 | request.Header.Set("source", source) 75 | 76 | client := &http.Client{} 77 | resp, err := client.Do(request) 78 | 79 | body := &bytes.Buffer{} 80 | if err != nil { 81 | log.Error(err) 82 | } else { 83 | _, err := body.ReadFrom(resp.Body) 84 | if err != nil { 85 | log.Error(err) 86 | } 87 | resp.Body.Close() 88 | //fmt.Println(resp.StatusCode) 89 | } 90 | return body.String(), err 91 | 92 | } 93 | -------------------------------------------------------------------------------- /utils/rand.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | var rng = struct { 10 | sync.Mutex 11 | rand *rand.Rand 12 | }{ 13 | rand: rand.New(rand.NewSource(time.Now().UnixNano())), 14 | } 15 | 16 | // Int returns a non-negative pseudo-random int. 17 | func Int() int { 18 | rng.Lock() 19 | defer rng.Unlock() 20 | return rng.rand.Int() 21 | } 22 | 23 | // Intn generates an integer in range [0,max). 24 | // By design this should panic if input is invalid, <= 0. 25 | func Intn(max int) int { 26 | rng.Lock() 27 | defer rng.Unlock() 28 | return rng.rand.Intn(max) 29 | } 30 | 31 | // IntnRange generates an integer in range [min,max). 32 | // By design this should panic if input is invalid, <= 0. 33 | func IntnRange(min, max int) int { 34 | rng.Lock() 35 | defer rng.Unlock() 36 | return rng.rand.Intn(max-min) + min 37 | } 38 | 39 | // IntnRange generates an int64 integer in range [min,max). 40 | // By design this should panic if input is invalid, <= 0. 41 | func Int63nRange(min, max int64) int64 { 42 | rng.Lock() 43 | defer rng.Unlock() 44 | return rng.rand.Int63n(max-min) + min 45 | } 46 | 47 | // Seed seeds the rng with the provided seed. 48 | func Seed(seed int64) { 49 | rng.Lock() 50 | defer rng.Unlock() 51 | 52 | rng.rand = rand.New(rand.NewSource(seed)) 53 | } 54 | 55 | // Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) 56 | // from the default Source. 57 | func Perm(n int) []int { 58 | rng.Lock() 59 | defer rng.Unlock() 60 | return rng.rand.Perm(n) 61 | } 62 | 63 | const ( 64 | // We omit vowels from the set of available characters to reduce the chances 65 | // of "bad words" being formed. 66 | alphanums = "bcdfghjklmnpqrstvwxz2456789" 67 | // No. of bits required to index into alphanums string. 68 | alphanumsIdxBits = 5 69 | // Mask used to extract last alphanumsIdxBits of an int. 70 | alphanumsIdxMask = 1<>= alphanumsIdxBits 97 | remaining-- 98 | } 99 | return string(b) 100 | } 101 | -------------------------------------------------------------------------------- /utils/resourceutils/unit.go: -------------------------------------------------------------------------------- 1 | package resourceutils 2 | 3 | 4 | import ( 5 | "fmt" 6 | log "github.com/sirupsen/logrus" 7 | "regexp" 8 | "strconv" 9 | ) 10 | 11 | 12 | // ParseQuantity parses the quantity string to float64 13 | func ParseQuantity(str string) (float64, error) { 14 | var quantity float64 15 | var err error 16 | 17 | // use the regex to separate the quantity and unit 18 | // for example, 1m -> 1, m 19 | re := regexp.MustCompile(`^([\d\.]+)([a-zA-Z]*)$`) 20 | matches := re.FindStringSubmatch(str) 21 | 22 | if len(matches) == 3 { 23 | // parse the quantity part to float64 24 | quantity, err = strconv.ParseFloat(matches[1], 64) 25 | if err != nil { 26 | return 0, fmt.Errorf("failed to parse quantity: %s", err.Error()) 27 | } 28 | 29 | // convert the unit part to standard unit, for example, m -> 1/100 30 | log.Info("[ParseQuantity] matches[2]: ", matches[2]) 31 | switch matches[2] { 32 | case "m": 33 | quantity /= 1000 34 | case "Ki": 35 | quantity *= 1024 36 | case "Mi": 37 | quantity *= 1024 * 1024 38 | case "M": 39 | quantity *= 1000 * 1000 40 | case "K": 41 | quantity *= 1000 42 | default: 43 | log.Info("[ParseQuantity] invalid unit: ", matches[2]) 44 | } 45 | 46 | return quantity, nil 47 | } 48 | 49 | return 0, fmt.Errorf("invalid quantity string: %s", str) 50 | } 51 | 52 | 53 | func PackQuantity(quantity float64, unit string) string { 54 | switch unit { 55 | case "m": 56 | quantity *= 1000 57 | case "Ki": 58 | quantity /= 1024 59 | case "Mi": 60 | quantity /= 1024 * 1024 61 | case "M": 62 | quantity /= 1000 * 1000 63 | case "K": 64 | quantity /= 1000 65 | default: 66 | log.Info("[PackQuantity] invalid unit: ", unit) 67 | } 68 | return fmt.Sprintf("%f%s", quantity, unit) 69 | } 70 | 71 | 72 | func GetUnit(quantity string) string { 73 | re := regexp.MustCompile(`^([\d\.]+)([a-zA-Z]*)$`) 74 | matches := re.FindStringSubmatch(quantity) 75 | if len(matches) == 3 { 76 | return matches[2] 77 | } 78 | return "" 79 | } 80 | 81 | -------------------------------------------------------------------------------- /utils/resourceutils/unit_test.go: -------------------------------------------------------------------------------- 1 | package resourceutils 2 | 3 | import "testing" 4 | func TestUnit(t *testing.T) { 5 | // test getUnit 6 | str := "100m" 7 | unit := GetUnit(str) 8 | if unit != "m" { 9 | t.Errorf("getUnit error, expect m, get %s", unit) 10 | } 11 | 12 | str = "100Mi" 13 | unit = GetUnit(str) 14 | if unit != "Mi" { 15 | t.Errorf("getUnit error, expect Mi, get %s", unit) 16 | } 17 | 18 | } 19 | 20 | 21 | func TestPackQuantity(t *testing.T) { 22 | // test PackQuantity 23 | quantity := 0.1 24 | unit := "m" 25 | str := PackQuantity(quantity, unit) 26 | t.Logf("str: %s", str) 27 | 28 | quantity = 100.0 29 | unit = "Mi" 30 | str = PackQuantity(quantity, unit) 31 | t.Logf("str: %s", str) 32 | } -------------------------------------------------------------------------------- /utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | /* ========== Resource Function ========== */ 4 | 5 | func IsLabelEqual(a map[string]string, b map[string]string) bool { 6 | for k, v := range a { 7 | if b[k] != v { 8 | return false 9 | } 10 | } 11 | return true 12 | } 13 | 14 | /* ========== Time Function ========== */ 15 | 16 | func WaitForever() { 17 | <-make(chan struct{}) 18 | } 19 | 20 | /* ========== Rand Function ========== */ 21 | 22 | func GenerateName(name string, n int) string { 23 | 24 | return name + "-" + String(n) 25 | 26 | } 27 | --------------------------------------------------------------------------------