├── .devcontainer
└── devcontainer.json
├── .github
└── workflows
│ ├── build-go-binary.yml
│ ├── build-go-modules.yml
│ ├── don't_mess_with_my_docs
│ ├── greetings.yml
│ ├── label.yml
│ ├── main.yml
│ └── stale.yml
├── .gitignore
├── API_DOCS.md
├── CONTRIBUTING.md
├── Dockerfile
├── INSTALL.md
├── LICENSE
├── README.md
├── api
└── api.go
├── apply
└── apply.go
├── go.mod
├── go.sum
├── golang.jpeg
├── install
└── install.go
├── server.go
└── yamls
├── crb.yaml
├── one-to-rule-them-all.yaml
├── pod.yaml
└── sa.yaml
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "image": "mcr.microsoft.com/devcontainers/universal:2",
3 | "features": {
4 | "ghcr.io/mpriscella/features/kind:1": {}
5 | },
6 | "postCreateCommand": "kind create cluster"
7 | }
8 |
--------------------------------------------------------------------------------
/.github/workflows/build-go-binary.yml:
--------------------------------------------------------------------------------
1 | name: Building go binary
2 | on:
3 | push:
4 | pull_request:
5 |
6 | jobs:
7 | build-binary:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - uses: actions/checkout@v3
11 |
12 | - name: Set up Go
13 | uses: actions/setup-go@v3
14 | with:
15 | go-version: 1.18
16 |
17 | - name: Build
18 | run: go build -v ./... && echo Go Binary built
19 |
20 | ## Need to add the Test command here, when we have unit TCs
--------------------------------------------------------------------------------
/.github/workflows/build-go-modules.yml:
--------------------------------------------------------------------------------
1 | name: Go mod tidy
2 | on:
3 | push:
4 | pull_request:
5 |
6 | jobs:
7 | build-modules:
8 | runs-on: ubuntu-latest
9 | env:
10 | GO111MODULE: on
11 | steps:
12 | - uses: actions/checkout@v2
13 | - uses: actions/setup-go@v2
14 | with:
15 | go-version: 1.19
16 |
17 | - name: tidy
18 | uses: paulvollmer/action-golang-gosum@v1.0.0
19 | env:
20 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
21 | - run: go mod tidy
22 |
--------------------------------------------------------------------------------
/.github/workflows/don't_mess_with_my_docs:
--------------------------------------------------------------------------------
1 | name: Only I can do changes to the Docs
2 |
3 | on:
4 | pull_request:
5 | paths:
6 | - '**/*.md'
7 | jobs:
8 | restrict_md_changes:
9 | runs-on: ubuntu-latest
10 |
11 | steps:
12 | - name: Check commit author
13 | id: check_author
14 | run: |
15 | # Get the author of the latest commit
16 | AUTHOR=$(git log -1 --pretty=format:'%an')
17 |
18 | # List of allowed authors (replace with your own)
19 | ALLOWED_AUTHORS="kitarp29"
20 |
21 | # Check if the author is allowed
22 | if [[ ! $ALLOWED_AUTHORS =~ (^| )$AUTHOR($| ) ]]; then
23 | echo "Unauthorized commit by $AUTHOR. Only specific accounts are allowed."
24 | echo "If you see a problem in the Docs, please raise an Issue"
25 | exit 1
26 | fi
27 |
--------------------------------------------------------------------------------
/.github/workflows/greetings.yml:
--------------------------------------------------------------------------------
1 | name: Greetings
2 |
3 | on: [pull_request_target, issues]
4 |
5 | jobs:
6 | greeting:
7 | runs-on: ubuntu-latest
8 | permissions:
9 | issues: write
10 | pull-requests: write
11 | steps:
12 | - uses: actions/first-interaction@v1
13 | with:
14 | repo-token: ${{ secrets.GITHUB_TOKEN }}
15 | issue-message: "Hi! Thanks for creating the issue :) Looking forward to resolve this with you."
16 | pr-message: "Wow!! It's BIG 😉(That's what she said). Thanks for helping this project "
17 |
--------------------------------------------------------------------------------
/.github/workflows/label.yml:
--------------------------------------------------------------------------------
1 | # This workflow will triage pull requests and apply a label based on the
2 | # paths that are modified in the pull request.
3 | #
4 | # To use this workflow, you will need to set up a .github/labeler.yml
5 | # file with configuration. For more information, see:
6 | # https://github.com/actions/labeler
7 |
8 | name: Labeler
9 | on: [pull_request]
10 |
11 | jobs:
12 | label:
13 |
14 | runs-on: ubuntu-latest
15 | permissions:
16 | contents: read
17 | pull-requests: write
18 |
19 | steps:
20 | - uses: actions/labeler@v4
21 | with:
22 | repo-token: "${{ secrets.GITHUB_TOKEN }}"
23 |
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | name: Kube-ez CI
2 | on:
3 | push:
4 | paths:
5 | - '/api/**'
6 | - '/apply/**'
7 | - '/install/**'
8 | - 'kube-ez-chart/**'
9 | - 'yamls/**'
10 | - 'Dockerfile'
11 | - '.github/**'
12 | - '/server.go'
13 | pull_request:
14 | paths:
15 | - '**.go'
16 | - 'Dockerfile'
17 | - '.github/**'
18 | - '**.yaml'
19 | workflow_dispatch:
20 |
21 | env:
22 | # Use docker.io for Docker Hub if empty
23 | REGISTRY: ghcr.io
24 | DOCKER_IMAGE_NAME: kube-ez
25 | DOCKERFILE_PATH: ./Dockerfile
26 |
27 | jobs:
28 |
29 | build-and-push:
30 | runs-on: ubuntu-latest
31 |
32 | steps:
33 | - name: Checkout code
34 | uses: actions/checkout@v2
35 |
36 | - name: Login to Github Container Registry
37 | uses: docker/login-action@v1
38 | with:
39 | registry: ghcr.io
40 | username: ${{ github.actor }}
41 | password: ${{ secrets.GITHUB_TOKEN }}
42 |
43 | - name: Build and push Docker image
44 | uses: docker/build-push-action@v2
45 | with:
46 | context: .
47 | file: ${{ env.DOCKERFILE_PATH }}
48 | push: true
49 | tags: |
50 | ghcr.io/${{ github.repository_owner }}/${{ env.DOCKER_IMAGE_NAME }}:${{ github.sha }}
51 |
52 | Test-on-cluster:
53 | runs-on: ubuntu-latest
54 | needs: [build-and-push]
55 | steps:
56 | - name: Testing on a k8s Kind Cluster
57 | uses: helm/kind-action@v1.4.0
58 | - run: |
59 | kubectl cluster-info
60 | kubectl get nodes
61 |
62 | - name : Preparing cluster for kube-ez
63 | # I know this is Wierd that it works, but running the forward command twice does work!
64 | run: |
65 | kubectl apply -f https://raw.githubusercontent.com/kitarp29/kube-ez/main/yamls/sa.yaml
66 | kubectl apply -f https://raw.githubusercontent.com/kitarp29/kube-ez/main/yamls/crb.yaml
67 | kubectl run kube-ez --image=ghcr.io/${{ github.repository_owner }}/${{ env.DOCKER_IMAGE_NAME }}:${{ github.sha }} --port=8000
68 | sleep 20
69 | kubectl get po
70 | kubectl port-forward kube-ez 8000:8000 &>/dev/null &
71 | sleep 5
72 | kubectl port-forward kube-ez 8000:8000 &>/dev/null &
73 | - run: |
74 | curl -i http://localhost:8000/
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | # This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time.
2 | #
3 | # You can adjust the behavior by modifying this file.
4 | # For more information, see:
5 | # https://github.com/actions/stale
6 | name: Mark stale issues and pull requests
7 |
8 | on:
9 | schedule:
10 | - cron: '36 11 * * *'
11 |
12 | jobs:
13 | stale:
14 |
15 | runs-on: ubuntu-latest
16 | permissions:
17 | issues: write
18 | pull-requests: write
19 |
20 | steps:
21 | - uses: actions/stale@v5
22 | with:
23 | repo-token: ${{ secrets.GITHUB_TOKEN }}
24 | stale-issue-message: 'Stale issue message'
25 | stale-pr-message: 'Stale pull request message'
26 | stale-issue-label: 'no-issue-activity'
27 | stale-pr-label: 'no-pr-activity'
28 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | *.[56789ao]
3 | *.a[56789o]
4 | *.so
5 | *.pyc
6 | ._*
7 | .nfs.*
8 | [56789a].out
9 | *~
10 | *.orig
11 | *.rej
12 | *.exe
13 | .*.swp
14 | core
15 | *.cgo*.go
16 | *.cgo*.c
17 | _cgo_*
18 | _obj
19 | _test
20 | _testmain.go
21 | /VERSION.cache
22 | /bin/
23 | /build.out
24 | /doc/articles/wiki/*.bin
25 | /goinstall.log
26 | /last-change
27 | /misc/cgo/life/run.out
28 | /misc/cgo/stdio/run.out
29 | /misc/cgo/testso/main
30 | /pkg/
31 | /src/*.*/
32 | /src/cmd/cgo/zdefaultcc.go
33 | /src/cmd/dist/dist
34 | /src/cmd/go/internal/cfg/zdefaultcc.go
35 | /src/cmd/go/internal/cfg/zosarch.go
36 | /src/cmd/internal/objabi/zbootstrap.go
37 | /src/go/build/zcgo.go
38 | /src/go/doc/headscan
39 | /src/runtime/internal/sys/zversion.go
40 | /src/unicode/maketables
41 | /test.out
42 | /test/garbage/*.out
43 | /test/pass.out
44 | /test/run.out
45 | /test/times.out
46 | k8-api
--------------------------------------------------------------------------------
/API_DOCS.md:
--------------------------------------------------------------------------------
1 | # **API Documentation**
2 |
3 | This document is a collection of API endpoints for this repository. It will explain the endpoints and how to use them. The endpoints are grouped into sections based on the path. The method, parameters, and response are described below.
4 |
5 | ### **Postman collection** : [Here](https://www.getpostman.com/collections/b14cdaad336ab81340b5)
6 |
7 |
8 |
9 | ## Kubernetes Management Routes:
10 |
11 | - **Home**
12 | ```
13 | Method: GET
14 | Endpoint: /
15 | Parametes: None
16 | Response:
17 | - httpStatusOk: 200
18 | - message: Yes! I am alive!
19 | - type: string
20 | ```
21 | - **Pods**
22 | ```
23 | Method: GET
24 | Endpoint: /pods
25 | Parametes:
26 | - namespace:
27 | - containerDetails:
28 | Response:
29 | - httpStatusOk: 200
30 | - message: List of pods
31 | - type: array
32 | ```
33 | - **Namespace**
34 | ```
35 | Method: GET
36 | Endpoint: /namespace
37 | Parametes: None
38 | Response:
39 | - httpStatusOk: 200
40 | - message: List of namespaces
41 | - type: array
42 | ```
43 | - **Deployments**
44 | ```
45 | Method: GET
46 | Endpoint: /deployments
47 | Parametes:
48 | - namespace:
49 | Response:
50 | - httpStatusOk: 200
51 | - message: List of deployments
52 | - type: array
53 | ```
54 | - **ConfigMaps**
55 | ```
56 | Method: GET
57 | Endpoint: /configmaps
58 | Parametes:
59 | - namespace:
60 | Response:
61 | - httpStatusOk: 200
62 | - message: List of configmaps
63 | - type: array
64 | ```
65 | - **Services**
66 | ```
67 | Method: GET
68 | Endpoint: /services
69 | Parametes:
70 | - namespace:
71 | Response:
72 | - httpStatusOk: 200
73 | - message: List of services
74 | - type: array
75 | ```
76 | - **Events**
77 | ```
78 | Method: GET
79 | Endpoint: /events
80 | Parametes:
81 | - namespace:
82 | Response:
83 | - httpStatusOk: 200
84 | - message: List of events
85 | - type: array
86 | ```
87 | - **Secrets**
88 | ```
89 | Method: GET
90 | Endpoint: /secrets
91 | Parametes:
92 | - namespace:
93 | Response:
94 | - httpStatusOk: 200
95 | - message: List of secrets
96 | - type: array
97 | ```
98 | - **ReplicationControllers**
99 | ```
100 | Method: GET
101 | Endpoint: /replicationcontrollers
102 | Parametes:
103 | - namespace:
104 | Response:
105 | - httpStatusOk: 200
106 | - message: List of replicationcontrollers
107 | - type: array
108 | ```
109 | - **DaemonSets**
110 | ```
111 | Method: GET
112 | Endpoint: /daemonsets
113 | Parametes:
114 | - namespace:
115 | Response:
116 | - httpStatusOk: 200
117 | - message: List of daemonsets
118 | - type: array
119 | ```
120 | - **Pod Logs**
121 | ```
122 | Method: GET
123 | Endpoint: /podlogs
124 | Parametes:
125 | - namespace:
126 | - pod:
127 | Response:
128 | - httpStatusOk: 200
129 | - message: Pod logs
130 | - type: string
131 | ```
132 | - **Create Namespace**
133 | ```
134 | Method: POST
135 | Endpoint: /createNamespace
136 | Parametes:
137 | - namespace:
138 | Response:
139 | - httpStatusOk: 200
140 | - message: Namespace created
141 | - type: string
142 | ```
143 | - **Delete Namespace**
144 | ```
145 | Method: DELETE
146 | Endpoint: /deleteNamespace
147 | Parametes:
148 | - namespace:
149 | Response:
150 | - httpStatusOk: 200
151 | - message: Namespace deleted
152 | - type: string
153 | ```
154 | - **Delete Deployment**
155 | ```
156 | Method: DELETE
157 | Endpoint: /deleteDeployment
158 | Parametes:
159 | - namespace:
160 | - deployment:
161 | Response:
162 | - httpStatusOk: 200
163 | - message: Deployment deleted
164 | - type: string
165 | ```
166 | - **Delete Service**
167 | ```
168 | Method: DELETE
169 | Endpoint: /deleteService
170 | Parametes:
171 | - namespace:
172 | - service:
173 | Response:
174 | - httpStatusOk: 200
175 | - message: Service deleted
176 | - type: string
177 | ```
178 | - **Delete ConfigMap**
179 | ```
180 | Method: DELETE
181 | Endpoint: /deleteConfigMap
182 | Parametes:
183 | - namespace:
184 | - configmap:
185 | Response:
186 | - httpStatusOk: 200
187 | - message: ConfigMap deleted
188 | - type: string
189 | ```
190 | - **Delete Secret**
191 | ```
192 | Method: DELETE
193 | Endpoint: /deleteSecret
194 | Parametes:
195 | - namespace:
196 | - secret:
197 | Response:
198 | - httpStatusOk: 200
199 | - message: Secret deleted
200 | - type: string
201 | ```
202 | - **Delete ReplicationController**
203 | ```
204 | Method: DELETE
205 | Endpoint: /deleteReplicationController
206 | Parametes:
207 | - namespace:
208 | - replicationcontroller:
209 | Response:
210 | - httpStatusOk: 200
211 | - message: ReplicationController deleted
212 | - type: string
213 | ```
214 | - **Delete DaemonSet**
215 | ```
216 | Method: DELETE
217 | Endpoint: /deleteDaemonSet
218 | Parametes:
219 | - namespace:
220 | - daemonset:
221 | Response:
222 | - httpStatusOk: 200
223 | - message: DaemonSet deleted
224 | - type: string
225 | ```
226 | - **Delete Pod**
227 | ```
228 | Method: DELETE
229 | Endpoint: /deletePod
230 | Parametes:
231 | - namespace:
232 | - pod:
233 | Response:
234 | - httpStatusOk: 200
235 | - message: Pod deleted
236 | - type: string
237 | ```
238 | - **Delete Event**
239 | ```
240 | Method: DELETE
241 | Endpoint: /deleteEvent
242 | Parametes:
243 | - namespace:
244 | - event:
245 | Response:
246 | - httpStatusOk: 200
247 | - message: Event deleted
248 | - type: string
249 | ```
250 |
251 |
252 |
253 | ## Apply YAML/JSON Files
254 |
255 | - **Apply**
256 |
257 | > The Container will not have the YAML file preloaded on the container. You can download them using ```wget```
258 |
259 | Method: POST
260 | Endpoint: /apply
261 | Parametes:
262 | - filepath:
263 | Response:
264 | - httpStatusOk: 200
265 | - message: YAML/JSON file applied
266 | - type: string
267 |
268 |
269 | ## Help Routes
270 |
271 | - **Helm Repo Add**
272 | ```
273 | Method: POST
274 | Endpoint: /helmRepoAdd
275 | Parametes:
276 | - url:
277 | - repoName:
278 | Response:
279 | - httpStatusOk: 200
280 | - message: Repo added
281 | - type: string
282 | ```
283 | - **Helm Repo Update**
284 | ```
285 | Method: GET
286 | Endpoint: /helmRepoUpdate
287 | Parameters: None
288 | Response:
289 | - httpStatusOk: 200
290 | - message: Repo updated
291 | - type: string
292 | ```
293 | - **Helm Install**
294 | ```
295 | Method: POST
296 | Endpoint: /helmInstall
297 | Parametes:
298 | - chartName:
299 | - chartVersion:
300 | - namespace:
301 | - values:
302 | Response:
303 | - httpStatusOk: 200
304 | - message: Helm installed
305 | - type: string
306 | ```
307 | - **Helm Delete**
308 | ```
309 | Method: DELETE
310 | Endpoint: /helmDelete
311 | Parametes:
312 | - name :
313 | - namespace:
314 | Response:
315 | - httpStatusOk: 200
316 | - message: Helm deleted
317 | - type: string
318 | ```
319 |
320 |
321 | 🚧 **More Routes under Construction**👷
322 |
323 | Thanks for your patience! 🥰
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # **Contributing**
2 |
3 | Thanks for contributing to the project!🥰
4 | I am glad you are here. Hoping that you liked the project.
5 |
6 | I have setup Codespaces to install and run a Kuberentes cluster for you. You could directly dive into contributing without worrying abut the local setup!
7 | ](https://codespaces.new/kitarp29/kube-ez)
8 |
9 | ## 🔥**Purpose of this file:**
10 | - I will be stating the rules for contributing to the project.
11 | - Most important is that you should be able to run the project locally.
12 | - And I will try to make it as easy as possible for you.
13 | - Also this is for future *Me*, who will definitely forget how and why I did it, what I did 😅
14 |
15 | ## 🚨**Rules for contributing**
16 | 1. **Always** raise an issue before making the pull request.
17 | 2. Try to wait for the issue to get assigned to you.
18 | 3. Make **signed** and **small** commits in your Pull Request.
19 | 4. **Fork** the Repository and then work.
20 | 5. Comment your code as much as possible.
21 | 6. Try to write modular code, I also tried to avoid *Spaghetti*🍝 code.
22 |
23 | ## 💁**How to contribute**
24 | 1. **Always** raise an Issue before making the pull request.
25 | 2. Make **signed** and **small** commits in your pull request.
26 | 3. State the work you did in the issue and in the pull request.
27 | 4. **Fork** the Repository and then work.
28 |
29 | ## 💬**How to run the project locally**
30 | 1. Run the following command:
31 | ```
32 | $ git clone https://github.com/kitarp29/kube-ez.git
33 | $ cd kube-ez
34 | ```
35 | 2. Make sure you have **Golang** installed on your system.
36 |
37 | ```
38 | go --version
39 | ```
40 | > This project when built used ```go version go1.18.3 linux/amd64```
41 | 3. If ```go.mod``` file is not present, then run the following command:
42 | ```
43 | go mod init kube-ez
44 | ```
45 | 4. Now, run the following command:
46 | ```
47 | go mod tidy
48 | ```
49 | > This command will pull all the latest packages from the internet.
50 |
51 | 5. Steps to run the project are mentioned in the [INSTALL.md](https://github.com/kitarp29/kube-ez/blob/main/INSTALL.md)
52 |
53 | ## 🐋 **Docker Image** [Link](https://hub.docker.com/repository/docker/kitarp29/k8s-api)
54 | It's a basic container based on the latest release of **Golang**. The tag *2.0* works well.
55 |
56 | ## 📂 **File Structure**
57 | 1. **api**:
58 | - **api.go**:
59 | This file contains the main logic of the project. It has all the functions that interact with the *client-go* library. It also has the ```main()``` function that starts with the server. It will help us run the project even outside the cluster.
60 | 2. **install**:
61 | - **install.go**:
62 | This file contains the logic of the **install** command. It will apply the changes to the cluster. It handles all the requests related to **Helm** charts. It helps us add/upgrade/delete the charts.
63 | 3. **apply**:
64 | - **apply.go**:
65 | This file contains the logic of the **apply** command. It will apply the changes to the cluster. It helps apply any YAML /JSON File to our cluster.
66 | 4. **yamls**:
67 | - **sa.yaml**: YAML to apply desired ServiceAccount for the project.
68 | - **crb.yaml**: YAML to apply desired CustomResourceDefinition for the project.
69 | - **pod.yaml**: YAML to apply the desired Pod for the project.
70 | 5. **server.go**
71 | - This file contains the logic of the **server** command. It will start the server. It will start the server and listen on the port ```8000```. It has all the routes for the project.
72 | 6. **Dockerfile**
73 | 7. Markdown files
74 | 8. License file
75 |
76 |
77 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Build
2 | FROM golang:alpine AS build
3 |
4 | WORKDIR /k8-api
5 | COPY . .
6 | RUN go mod download && \
7 | GO111MODULE=on CGO_ENABLED=0 go build -ldflags "-s -w"
8 |
9 | # Deploy
10 | FROM alpine
11 | COPY --from=build /k8-api/k8-api .
12 |
13 | ENTRYPOINT ["./k8-api"]
14 | # CMD WALA PART?
15 | EXPOSE 8000
--------------------------------------------------------------------------------
/INSTALL.md:
--------------------------------------------------------------------------------
1 | # 1. **Outside** the Cluster
2 | This is the easiest way to run this project. Provided you have the **kubeconfig** file in your local.
3 |
4 | The code in looks for the file at the path ```"HOME"/.kube/config```
5 |
6 | All you have to do after this is:
7 | - Clone the repo
8 | - Open a terminal in the dir
9 | - Run this:
10 | ```
11 | go mod tidy && go run k8-api
12 | ```
13 | - API will be running at localhost:8000 now.
14 |
15 | ### **The project is up and running🔥!**
16 |
17 |
18 |
19 | # 2. **Inside** the Cluster
20 |
21 | Run this command ✨:
22 |
23 | ```
24 | kubectl apply -f https://raw.githubusercontent.com/kitarp29/kube-ez/main/yamls/one-to-rule-them-all.yaml && sleep 5 && kubectl port-forward kube-ez 8000:8000
25 | ```
26 | > API will be running at localhost:8000 now
27 |
28 | > P.S.: I am using the *sleep* command to give the pod some time to start. You can change it if you want.
29 |
30 | ---OR---
31 |
32 | If you want to do a custom installation. Follow the steps below:
33 |
34 | - Service Account:
35 |
36 | We need to make a custom service account to be able to interact with the cluster. We will use this service account in our pod on which we will run the API.
37 | Command to make custom a service account:
38 |
39 | ```
40 | kubectl apply -f - <
45 | EOF
46 | ```
47 |
48 | > We can also use the *default* service account. But it is not recommended.
49 |
50 | - Cluster Role:
51 |
52 | We need to make a custom cluster role to be able to interact with the cluster. We will use this cluster role to bind to our **Service Account**. The role should have permission to all the resources in order for the project to run smoothly.
53 | I would advise you **not** to make any role and use the *cluster-admin* role directly. Still, if you want to create a custom role, you can do so. [Refer Here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)
54 |
55 |
56 | - Cluster Role Binding:
57 |
58 | We will bind the **Service Account** to the **Cluster Role** we just created. To do so the commands needed are:
59 |
60 | ```
61 | kubectl apply -f - <
66 | subjects:
67 | - kind: ServiceAccount
68 | name:
69 | namespace: default
70 | roleRef:
71 | kind: ClusterRole
72 | name: cluster-admin
73 | apiGroup: rbac.authorization.k8s.io
74 | EOF
75 | ```
76 |
77 | - Deploying the Pod
78 |
79 | This is it! Now that we have the service account and the cluster role binding, we can deploy the pod. We can use this command to deploy the pod:
80 |
81 | ```
82 | kubectl apply -f - <
87 | spec:
88 | serviceAccount:
89 | containers:
90 | - name:
91 | image: kitarp29/k8s-api:9.0
92 | ports:
93 | - containerPort: 8000
94 | EOF
95 | ```
96 | - Port-forward:
97 |
98 | Now that we have the pod deployed, we can use the *port-forward* command to access the pod. Expose a port of the pod to the local machine.
99 | To do so, we can use the following command:
100 |
101 | ```
102 | kubectl port-forward 8000:8000
103 | ```
104 | > API will be running at localhost:8000 now.
105 | ### **The project is up and running🔥!**
106 |
107 |
108 | Now that the Project is up and running for you.
109 | You can learn how to use the API from the [API Docs](https://github.com/kitarp29/kube-ez/blob/main/API_DOCS.md).
110 |
111 | There are multiple endpoints in the API. You can find all the endpoints in the [API Docs](https://github.com/kitarp29/kube-ez/blob/main/API_DOCS.md)
112 |
113 | Moreover you can find the **Postman Collections** [Here](https://www.getpostman.com/collections/b14cdaad336ab81340b5) 📮
114 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Pratik Singh
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # **Kube-ez**
2 | [](https://github.com/kitarp29/kube-ez/graphs/contributors) [](https://github.com/kitarp29/kube-ez/issues/)
3 | [](https://hub.docker.com/repository/docker/kitarp29/k8s-api)
4 | [](https://hub.docker.com/repository/docker/kitarp29/k8s-api)
5 | 
6 |
7 |
8 |
9 | # **Introduction** 👋🏻
10 |
11 | It is built on **Golang** and utilizes the **client-go** library to interact with Kubernetes Cluster.
12 | It is a plug-and-play solution and can be used to create a kube-ez server. In three easy steps, you will have a simple API to interact with your cluster.
13 | The aim is to build a simple API server that can be used to interact with any Kubernetes cluster.
14 |
15 | In my industrial experience, I have realized that **Kubernetes** is a very powerful tool but, only used by a handful of developers in the organization. The problem is not grasping the concept of the cluster. My last Product Manager was much more versed in AWS than I am.
16 | I feel the reason is that there is no easier way to interact with it.
17 | This project will provide a bunch of API endpoints to perform various functions on the cluster. For now, I will have the Postman collections and API docs to achieve it, plan is to build a Dashboard on the API later.
18 |
19 | **Docker Image: [kitarp29/k8s-api](https://hub.docker.com/repository/docker/kitarp29/k8s-api)**
20 | > Use the Docker image with tag 2.0 or above to run the kube-ez server.
21 |
22 |
23 | # **Getting started** ▶️
24 |
25 | To start using the project you need a Kubernetes Cluster and should have the right access to apply changes to the cluster.
26 | There are two ways to run this project.
27 | 1. **Outside the Cluster**
28 | 2. **Inside the Cluster**
29 |
30 | Steps to run the project are mentioned in the [INSTALL.md](https://github.com/kitarp29/kube-ez/blob/main/INSTALL.md)
31 |
32 |
33 |
34 |
35 | # **Project Features** 🤯
36 | - Get details about any resource in the cluster.
37 | - It *detects* if you are trying to run the project inside or outside of a cluster.
38 | - Create new resources in the cluster.
39 | - Delete resources in the cluster.
40 | - Run CLI commands using the API.
41 | - Manage Helm Charts.
42 | - You can add, install, delete and update HELM charts.
43 | - Get live events from the cluster.
44 | - It is a REST API to interact with the cluster.
45 | - It has a health check endpoint as well.
46 | - More coming soon... 🚧
47 |
48 |
49 |
50 | # **API Docs** 📖
51 |
52 | There are multiple endpoints in the API. You can find all the endpoints in the [API Docs](https://github.com/kitarp29/kube-ez/blob/main/API_DOCS.md)
53 |
54 | Moreover you can find the **Postman Collections** [Here](https://www.getpostman.com/collections/b14cdaad336ab81340b5) 📮
55 |
56 |
57 |
58 | # **Contributors Guide** 🥰
59 |
60 | Thanks for considering contributing to the project. If you have any questions, please feel free to contact me at [Twitter](https://twitter.com/kitarp29).
61 | The Contributors Guide is available [Here](https://github.com/kitarp29/kube-ez/blob/main/CONTRIBUTING.md) 📖
62 |
63 |
64 |
65 | # **License** 🍻
66 |
67 | This project is licensed under the **MIT license**. Feel free to use it and if you want to contribute, please feel free to fork the project and make a pull request. Thanks!
68 |
69 |
70 |
71 | # **FAQ** 🤔
72 |
73 | - **Is this a Unique Product?**
74 |
75 | No, this is not a unique product. There are similar implementations made by other developers.
76 |
77 | - **Purpose of this project?**
78 |
79 | It's a pet project to learn *Kubernetes* and *Golang*. I wanted to build this to better understand these two technologies. I also explored *Docker*
80 |
81 |
82 |
83 | ### Thanks for your interest in my API :)
84 |
85 |
--------------------------------------------------------------------------------
/api/api.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "encoding/json"
7 | "fmt"
8 | "io"
9 | "log"
10 | "os"
11 |
12 | "github.com/sirupsen/logrus"
13 |
14 | v1 "k8s.io/api/core/v1"
15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
16 | "k8s.io/client-go/kubernetes"
17 | "k8s.io/client-go/rest"
18 | "k8s.io/client-go/tools/clientcmd"
19 | )
20 |
21 | // setting a Global variable for the clientset so that I can resuse throughout the code
22 | var Kconfig *kubernetes.Clientset
23 |
24 | // These are all the Structs that are used in the API later in this code
25 | type Pod struct {
26 | Name string
27 | Status string
28 | CreatedAt string
29 | UniqueID string
30 | NodeName string
31 | IP string
32 | ContainersCount int
33 | ContainersInfo []Container
34 | Labels map[string]string
35 | }
36 |
37 | type Container struct {
38 | Name string
39 | Image string
40 | ImagePullPolicy string
41 | Container int
42 | Port []v1.ContainerPort
43 | }
44 |
45 | type Deployment struct {
46 | Name string
47 | Status string
48 | CreatedAt string
49 | UniqueID string
50 | Labels map[string]string
51 | }
52 |
53 | type Configmap struct {
54 | Name string
55 | }
56 |
57 | type Service struct {
58 | Name string
59 | Ports string
60 | }
61 |
62 | type Secret struct {
63 | Name string
64 | SecretMap map[string]string
65 | Type string
66 | CreatedAt string
67 | UniqueID string
68 | }
69 |
70 | type Replicationcontroller struct {
71 | Name string
72 | CreatedAt string
73 | UniqueID string
74 | Labels map[string]string
75 | }
76 |
77 | type Daemonset struct {
78 | Name string
79 | CreatedAt string
80 | UniqueID string
81 | Labels map[string]string
82 | }
83 |
84 | type Namespace struct {
85 | Name string
86 | CreatedAt string
87 | UniqueID string
88 | }
89 |
90 | type Event struct {
91 | Name string
92 | Type string
93 | ObjectName string
94 | CreatedAt string
95 | UniqueID string
96 | }
97 |
98 | //This function is used to interact with the Kubernetes Cluster to get the clienset
99 | // It has two options:
100 | // 1. Current it's setup to be used inside a cluster
101 | // 2. We can configure it to be used outside the cluster
102 |
103 | func Main() {
104 | logrus.Info("Shared Informer app started")
105 |
106 | // This checks if you have a Kubernetes config file in your home directory. If not it will try to create in in-cluster config and use that.
107 | var config *rest.Config
108 | var err error
109 | config, err = clientcmd.BuildConfigFromFlags("", os.Getenv("HOME")+"/.kube/config")
110 | if err != nil {
111 | // If the Kubeconfig file is not available, use the in-cluster config
112 | logrus.Info("Using in-cluster configuration. Since couldn't find a kubeconfig file.")
113 | config, err = rest.InClusterConfig()
114 | if err != nil {
115 | fmt.Printf("Error loading in-cluster configuration: %s\n", err)
116 | // BUS YHI TKK THA JO THA!!
117 | // So, at this point we tried to connect with local config file. Also tried to connect to one inside a cluster.
118 | logrus.Error(err.Error())
119 | }
120 | }
121 |
122 | clientset, err := kubernetes.NewForConfig(config)
123 | if err != nil {
124 | logrus.Error(err.Error())
125 | }
126 | // comment till here
127 |
128 | Kconfig = clientset
129 | }
130 |
131 | // This function is used to get the list of all the pods in the cluster with container details
132 | func Pods(AgentNamespace string, ContainerDetails bool, log *logrus.Entry) string {
133 | // for Pods
134 | clientset := Kconfig
135 |
136 | if AgentNamespace == "" {
137 | log.Info("Namespace is empty")
138 | log.Info("Namespace = default")
139 | AgentNamespace = "default"
140 | }
141 |
142 | var podInfo []Pod
143 | var containerInfo []Container
144 | pods, err := clientset.CoreV1().Pods(AgentNamespace).List(context.Background(), metav1.ListOptions{})
145 | if err != nil {
146 | defer func() {
147 | if r := recover(); r != nil {
148 | log.Error("Recovered in Pods(): ", r)
149 | }
150 | }()
151 | log.Panic("Unable to find pods. Error: " + err.Error())
152 | } else {
153 | for i := 0; i < len(pods.Items); i++ {
154 |
155 | podInfo = append(podInfo,
156 | Pod{
157 | Name: pods.Items[i].Name,
158 | Status: string(pods.Items[i].Status.Phase),
159 | CreatedAt: pods.Items[i].CreationTimestamp.String(),
160 | UniqueID: string(pods.Items[i].GetUID()),
161 | NodeName: string(pods.Items[i].Spec.NodeName),
162 | IP: string(pods.Items[i].Status.PodIP),
163 | ContainersCount: len(pods.Items[i].Spec.Containers),
164 | Labels: pods.Items[i].Labels,
165 | })
166 | if ContainerDetails {
167 |
168 | for j := 0; j < len(pods.Items[i].Spec.Containers); j++ {
169 |
170 | containerInfo = append(containerInfo,
171 | Container{
172 | Name: pods.Items[i].Spec.Containers[j].Name,
173 | Container: j,
174 | Image: pods.Items[i].Spec.Containers[j].Image,
175 | ImagePullPolicy: string(pods.Items[i].Spec.Containers[j].ImagePullPolicy),
176 | Port: pods.Items[i].Spec.Containers[j].Ports,
177 | })
178 | }
179 | }
180 | podInfo[i].ContainersInfo = containerInfo
181 | }
182 |
183 | pods_json, err := json.Marshal(podInfo)
184 | if err != nil {
185 | log.Error(err.Error())
186 | }
187 |
188 | return string(pods_json)
189 | }
190 | log.Error("Error in getting pods")
191 | return "Error"
192 | }
193 |
194 | // This function is used to get the list of all the logs in a pod.
195 | func PodLogs(AgentNamespace string, PodName string, log *logrus.Entry) string {
196 | clientset := Kconfig
197 | req := clientset.CoreV1().Pods(AgentNamespace).GetLogs(PodName, &(v1.PodLogOptions{}))
198 | podLogs, err := req.Stream(context.Background())
199 | if err != nil {
200 | log.Error(err.Error())
201 | return "error in opening stream"
202 | }
203 | defer podLogs.Close()
204 |
205 | buf := new(bytes.Buffer)
206 | _, err = io.Copy(buf, podLogs)
207 | if err != nil {
208 | log.Error(err.Error())
209 | return "error in copy information from podLogs to buf"
210 | }
211 | str := buf.String()
212 |
213 | return str
214 | }
215 |
216 | // This function is used to get the list of all the deployments in the cluster
217 | func Deployments(AgentNamespace string, log *logrus.Entry) string {
218 | clientset := Kconfig
219 | if AgentNamespace == "" {
220 | log.Info("Namespace is empty")
221 | log.Info("Namespace = default")
222 | AgentNamespace = "default"
223 | }
224 |
225 | //fmt.Printf("DEPLOYMENTS \n")
226 | var deploymentInfo []Deployment
227 | deployments, err := clientset.AppsV1().Deployments(AgentNamespace).List(context.Background(), metav1.ListOptions{})
228 | if err != nil {
229 | defer func() {
230 | if r := recover(); r != nil {
231 | log.Error("Recovered in Deployments(): ", r)
232 | }
233 | }()
234 | log.Panic("Unable to find Deployments. Error: " + err.Error())
235 | } else {
236 |
237 | for i := 0; i < len(deployments.Items); i++ {
238 | //fmt.Println((deployments.Items[i].Status.Conditions))
239 |
240 | deploymentInfo = append(deploymentInfo,
241 | Deployment{
242 | Name: deployments.Items[i].Name,
243 | Status: string(deployments.Items[i].Status.Conditions[0].Type),
244 | CreatedAt: deployments.Items[i].CreationTimestamp.String(),
245 | UniqueID: string(deployments.Items[i].UID),
246 | Labels: deployments.Items[i].Labels,
247 | })
248 | }
249 |
250 | deployment_json, err := json.Marshal(deploymentInfo)
251 | if err != nil {
252 | log.Error(err.Error())
253 | log.Fatal(err)
254 | }
255 |
256 | return string(deployment_json)
257 | }
258 | log.Error("Error in getting deployments")
259 | return "Error"
260 | }
261 |
262 | // This function is used to get the list of all the Configmaps in the cluster
263 | func Configmaps(AgentNamespace string, log *logrus.Entry) string {
264 | clientset := Kconfig
265 |
266 | if AgentNamespace == "" {
267 | log.Info("Namespace is empty")
268 | log.Info("Namespace = default")
269 | AgentNamespace = "default"
270 | }
271 |
272 | var configmapsInfo []Configmap
273 | configmaps, err := clientset.CoreV1().ConfigMaps(AgentNamespace).List(context.Background(), metav1.ListOptions{})
274 | if err != nil {
275 | defer func() {
276 | if r := recover(); r != nil {
277 | log.Error("Recovered in Configmaps(): ", r)
278 | }
279 | }()
280 | log.Panic("Unable to find Configmaps. Error: " + err.Error())
281 | } else {
282 | for i := 0; i < len(configmaps.Items); i++ {
283 | configmapsInfo = append(configmapsInfo, Configmap{configmaps.Items[i].Name})
284 | }
285 |
286 | configmap_json, err := json.Marshal(configmapsInfo)
287 | if err != nil {
288 | log.Print(err.Error())
289 | log.Fatal(err)
290 | }
291 |
292 | return string(configmap_json)
293 | }
294 | log.Error("Error in getting configmaps")
295 | return "Error"
296 | }
297 |
298 | // This function is used to get the list of all the Services in the cluster
299 | func Services(AgentNamespace string, log *logrus.Entry) string {
300 | clientset := Kconfig
301 |
302 | if AgentNamespace == "" {
303 | log.Info("Namespace is empty")
304 | log.Info("Namespace = default")
305 | AgentNamespace = "default"
306 | }
307 | var servicesInfo []Service
308 |
309 | services, err := clientset.CoreV1().Services(AgentNamespace).List(context.Background(), metav1.ListOptions{})
310 | if err != nil {
311 | defer func() {
312 | if r := recover(); r != nil {
313 | log.Error("Recovered in Services()", r)
314 | }
315 | }()
316 | log.Panic("Unable to find Services. Error: " + err.Error())
317 | } else {
318 | for i := 0; i < len(services.Items); i++ {
319 | servicesInfo = append(servicesInfo, Service{Name: services.Items[i].Name, Ports: services.Items[i].Spec.Ports[0].TargetPort.String()})
320 | }
321 | service_json, err := json.Marshal(servicesInfo)
322 | if err != nil {
323 | log.Error(err.Error())
324 | log.Fatal(err)
325 | }
326 | //fmt.Println(string(pods_json))
327 | return string(service_json)
328 | }
329 | log.Error("Error in getting services")
330 | return "Error"
331 | }
332 |
333 | // This function is used to get the list of all the events in the cluster
334 | func Events(AgentNamespace string, log *logrus.Entry) string {
335 | clientset := Kconfig
336 |
337 | var eventsInfo []Event
338 | if AgentNamespace == "" {
339 | log.Info("Namespace is empty")
340 | log.Info("Namespace = default")
341 | AgentNamespace = "default"
342 | }
343 | events, err := clientset.CoreV1().Events(AgentNamespace).List(context.Background(), metav1.ListOptions{})
344 | if err != nil {
345 | defer func() {
346 | if r := recover(); r != nil {
347 | log.Error("Recovered in Events()", r)
348 | }
349 | }()
350 | log.Panic("Unable to find events. Error: " + err.Error())
351 | } else {
352 | for i := 0; i < len(events.Items); i++ {
353 | eventsInfo = append(eventsInfo,
354 | Event{
355 | Name: events.Items[i].Name,
356 | ObjectName: (events.Items[i].InvolvedObject.Name),
357 | CreatedAt: events.Items[i].LastTimestamp.String(),
358 | UniqueID: string(events.Items[i].UID),
359 | Type: events.Items[i].Type,
360 | })
361 | }
362 | event_json, err := json.Marshal(eventsInfo)
363 | if err != nil {
364 | defer func() {
365 | if r := recover(); r != nil {
366 | log.Error("Recovered in Events()", r)
367 | }
368 | }()
369 | log.Panic("Issue in marshaling JSON in Events. Error: " + err.Error())
370 | }
371 | //fmt.Println(string(pods_json))
372 | return string(event_json)
373 | }
374 | log.Error("Error in getting events")
375 | return "Error"
376 | }
377 |
378 | // This function is used to get the list of all the secrets in the cluster
379 | func Secrets(AgentNamespace string, log *logrus.Entry) string {
380 | clientset := Kconfig
381 | if AgentNamespace == "" {
382 | log.Info("Namespace is empty")
383 | log.Info("Namespace = default")
384 | AgentNamespace = "default"
385 | }
386 | secrets, err := clientset.CoreV1().Secrets(AgentNamespace).List(context.Background(), metav1.ListOptions{})
387 | if err != nil {
388 | defer func() {
389 | if r := recover(); r != nil {
390 | log.Error("Recovered in Secrets()", r)
391 | }
392 | }()
393 | log.Panic("Unable to find secrets. Error: " + err.Error())
394 | } else {
395 | var secretInfo []Secret
396 | for i := 0; i < len(secrets.Items); i++ {
397 | secretInfo = append(secretInfo,
398 | Secret{
399 | Name: secrets.Items[i].Name,
400 | Type: string(secrets.Items[i].Type),
401 | CreatedAt: secrets.Items[i].CreationTimestamp.String(),
402 | UniqueID: string(secrets.Items[i].UID),
403 | })
404 | tmp := make(map[string]string)
405 | for key, value := range secrets.Items[i].Data {
406 | tmp[key] = string(value)
407 | }
408 | secretInfo[i].SecretMap = tmp
409 | }
410 | secret_json, err := json.Marshal(secretInfo)
411 | if err != nil {
412 | defer func() {
413 | if r := recover(); r != nil {
414 | log.Error("Recovered in Secrets()", r)
415 | }
416 | }()
417 | log.Panic("Error in Marshalling JSON in Secrets. Error: " + err.Error())
418 | }
419 | //fmt.Println(string(secret_json))
420 | return string(secret_json)
421 | }
422 | log.Error("Error in getting secrets")
423 | return "Error"
424 | }
425 |
426 | // This function is used to get the list of all the ReplicaController in the cluster
427 | func ReplicationController(AgentNamespace string, log *logrus.Entry) string {
428 | clientset := Kconfig
429 | if AgentNamespace == "" {
430 | log.Info("Namespace is empty")
431 | log.Info("Namespace = default")
432 | AgentNamespace = "default"
433 | }
434 | replicationcontrollers, err := clientset.CoreV1().ReplicationControllers(AgentNamespace).List(context.Background(), metav1.ListOptions{})
435 | if err != nil {
436 | defer func() {
437 | if r := recover(); r != nil {
438 | log.Error("Recovered in ReplicaController()", r)
439 | }
440 | }()
441 | log.Panic("Unable to find ReplicaControllers. Error: " + err.Error())
442 | } else {
443 | var replicationcontrollerInfo []Replicationcontroller
444 | for i := 0; i < len(replicationcontrollers.Items); i++ {
445 | replicationcontrollerInfo = append(replicationcontrollerInfo,
446 | Replicationcontroller{
447 | Name: replicationcontrollers.Items[i].Name,
448 | CreatedAt: replicationcontrollers.Items[i].CreationTimestamp.String(),
449 | UniqueID: string(replicationcontrollers.Items[i].UID),
450 | Labels: (replicationcontrollers.Items[i].Labels),
451 | })
452 | }
453 | replicationcontroller_json, err := json.Marshal(replicationcontrollerInfo)
454 | if err != nil {
455 | defer func() {
456 | if r := recover(); r != nil {
457 | log.Error("Recovered in ReplicaController()", r)
458 | }
459 | }()
460 | log.Panic("Error in Marshalling JSON in ReplicaController. Error: " + err.Error())
461 | }
462 | //fmt.Println(string(replicationcontroller_json))
463 | return string(replicationcontroller_json)
464 | }
465 | log.Error("Error in getting replicationcontrollers")
466 | return "Error"
467 | }
468 |
469 | // This function is used to get the list of all the Daemonsets in the cluster
470 | func DaemonSet(AgentNamespace string, log *logrus.Entry) string {
471 | clientset := Kconfig
472 | if AgentNamespace == "" {
473 | log.Info("Namespace is empty")
474 | log.Info("Namespace = default")
475 | AgentNamespace = "default"
476 | }
477 | daemonsets, err := clientset.ExtensionsV1beta1().DaemonSets(AgentNamespace).List(context.Background(), metav1.ListOptions{})
478 | if err != nil {
479 | log.Error(err.Error())
480 | } else {
481 | var daemonsetInfo []Daemonset
482 | for i := 0; i < len(daemonsets.Items); i++ {
483 | daemonsetInfo = append(daemonsetInfo,
484 | Daemonset{
485 | Name: daemonsets.Items[i].Name,
486 | CreatedAt: daemonsets.Items[i].CreationTimestamp.String(),
487 | UniqueID: string(daemonsets.Items[i].UID),
488 | Labels: (daemonsets.Items[i].Labels),
489 | })
490 | }
491 | daemonset_json, err := json.Marshal(daemonsetInfo)
492 | if err != nil {
493 | log.Error(err.Error())
494 | }
495 | //fmt.Println(string(daemonset_json))
496 | return string(daemonset_json)
497 | }
498 | log.Error("Error in getting daemonsets")
499 | return "Error"
500 | }
501 |
502 | // This function is used to get the list of all the Namespaces in the cluster
503 | func NameSpace(log *logrus.Entry) string {
504 | clientset := Kconfig
505 | namespaces, err := clientset.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
506 | if err != nil {
507 | defer func() {
508 | if r := recover(); r != nil {
509 | log.Error("Recovered in NameSpace()", r)
510 | }
511 | }()
512 | log.Panic("Unable to find namespaces. Error: " + err.Error())
513 | } else {
514 | var namespaceInfo []Namespace
515 | for i := 0; i < len(namespaces.Items); i++ {
516 | namespaceInfo = append(namespaceInfo,
517 | Namespace{
518 | Name: namespaces.Items[i].Name,
519 | CreatedAt: namespaces.Items[i].CreationTimestamp.String(),
520 | UniqueID: string(namespaces.Items[i].UID),
521 | })
522 | }
523 | namespace_json, err := json.Marshal(namespaceInfo)
524 | if err != nil {
525 | log.Error(err.Error())
526 | log.Fatal(err)
527 | }
528 | return string(namespace_json)
529 | }
530 | log.Error("Error in getting namespaces")
531 | return "Error"
532 | }
533 |
534 | // This function creates Namespace in the cluster
535 | func CreateNamespace(namespace string, log *logrus.Entry) string {
536 | log.Info("Namespace=" + namespace)
537 | clientset := Kconfig
538 | ns := &v1.Namespace{
539 | ObjectMeta: metav1.ObjectMeta{
540 | Name: namespace,
541 | Labels: map[string]string{
542 | "name": namespace,
543 | },
544 | },
545 | }
546 | _, err := clientset.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
547 | if err != nil {
548 | log.Error(err.Error())
549 | return err.Error()
550 | }
551 | log.Info("Namespace" + namespace + "successfully")
552 | return "Namespace: " + namespace + " Created!"
553 | }
554 |
555 | // This function deletes Namespace in the cluster
556 | func DeleteNamespace(namespace string, log *logrus.Entry) string {
557 | clientset := Kconfig
558 | err := clientset.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{})
559 | if err != nil {
560 | log.Error(err.Error())
561 | return err.Error()
562 | }
563 | log.Info("Namespace: " + namespace + " Deleted!")
564 | return "Namespace: " + namespace + " Deleted!"
565 | }
566 |
567 | // This function Deletes the Deployments
568 | func DeleteDeployment(namespace string, deployment string, log *logrus.Entry) string {
569 | clientset := Kconfig
570 | err := clientset.AppsV1().Deployments(namespace).Delete(context.Background(), deployment, metav1.DeleteOptions{})
571 | if err != nil {
572 | log.Error(err.Error())
573 | return err.Error()
574 | }
575 | log.Info("Deployment: " + deployment + " Deleted!")
576 | return "Deployment: " + deployment + " Deleted!"
577 | }
578 |
579 | // This function Deletes the services
580 | func DeleteService(namespace string, service string, log *logrus.Entry) string {
581 | clientset := Kconfig
582 | err := clientset.CoreV1().Services(namespace).Delete(context.Background(), service, metav1.DeleteOptions{})
583 | if err != nil {
584 | log.Error(err.Error())
585 | return err.Error()
586 | }
587 | log.Info("Service: " + service + " Deleted!")
588 | return "Service: " + service + " Deleted!"
589 | }
590 |
591 | // This function Deletes the ConfigMap
592 | func DeleteConfigMap(namespace string, configmap string, log *logrus.Entry) string {
593 | clientset := Kconfig
594 | err := clientset.CoreV1().ConfigMaps(namespace).Delete(context.Background(), configmap, metav1.DeleteOptions{})
595 | if err != nil {
596 | log.Error(err.Error())
597 | return err.Error()
598 | }
599 | log.Info("ConfigMap: " + configmap + " Deleted!")
600 | return "ConfigMap: " + configmap + " Deleted!"
601 | }
602 |
603 | // This function Deletes the Secrets
604 | func DeleteSecret(namespace string, secret string, log *logrus.Entry) string {
605 | clientset := Kconfig
606 | err := clientset.CoreV1().Secrets(namespace).Delete(context.Background(), secret, metav1.DeleteOptions{})
607 | if err != nil {
608 | log.Error(err.Error())
609 | return err.Error()
610 | }
611 | log.Info("Secret: " + secret + " Deleted!")
612 | return "Secret: " + secret + " Deleted!"
613 | }
614 |
615 | // This function Deletes the ReplicationController
616 | func DeleteReplicationController(namespace string, replicationcontroller string, log *logrus.Entry) string {
617 | clientset := Kconfig
618 | err := clientset.CoreV1().ReplicationControllers(namespace).Delete(context.Background(), replicationcontroller, metav1.DeleteOptions{})
619 | if err != nil {
620 | log.Error(err.Error())
621 | return err.Error()
622 | }
623 | log.Info("ReplicationController: " + replicationcontroller + " Deleted!")
624 | return "ReplicationController: " + replicationcontroller + " Deleted!"
625 | }
626 |
627 | // This function Deletes the DaemonSet
628 | func DeleteDaemonSet(namespace string, daemonset string, log *logrus.Entry) string {
629 | clientset := Kconfig
630 | err := clientset.ExtensionsV1beta1().DaemonSets(namespace).Delete(context.Background(), daemonset, metav1.DeleteOptions{})
631 | if err != nil {
632 | log.Error(err.Error())
633 | return err.Error()
634 | }
635 | log.Info("DaemonSet: " + daemonset + " Deleted!")
636 | return "DaemonSet: " + daemonset + " Deleted!"
637 | }
638 |
639 | // This function Deletes the Pod
640 | func DeletePod(namespace string, pod string) string {
641 | clientset := Kconfig
642 | err := clientset.CoreV1().Pods(namespace).Delete(context.Background(), pod, metav1.DeleteOptions{})
643 | if err != nil {
644 | log.Print(err.Error())
645 | return err.Error()
646 | }
647 | return "Pod: " + pod + " Deleted!"
648 | }
649 |
650 | // This function Deletes the Event
651 | func DeleteEvent(namespace string, event string, log *logrus.Entry) string {
652 | clientset := Kconfig
653 | err := clientset.CoreV1().Events(namespace).Delete(context.Background(), event, metav1.DeleteOptions{})
654 | if err != nil {
655 | log.Error(err.Error())
656 | return err.Error()
657 | }
658 | log.Info("Event: " + event + " Deleted!")
659 | return "Event: " + event + " Deleted!"
660 | }
661 |
662 | // This function Deletes EVERYTHING in the namespace. My lil nuke!! MUWAHAHAHA
663 | func DeleteAll(namespace string, log *logrus.Entry) string {
664 | clientset := Kconfig
665 | deployments, err := clientset.AppsV1().Deployments(namespace).List(context.Background(), metav1.ListOptions{})
666 | if err != nil {
667 | log.Error(err.Error())
668 | return err.Error()
669 | }
670 | for i := 0; i < len(deployments.Items); i++ {
671 | err := clientset.AppsV1().Deployments(namespace).Delete(context.Background(), deployments.Items[i].Name, metav1.DeleteOptions{})
672 | if err != nil {
673 | log.Error(err.Error())
674 | return err.Error()
675 | }
676 | }
677 | services, err := clientset.CoreV1().Services(namespace).List(context.Background(), metav1.ListOptions{})
678 | if err != nil {
679 | log.Error(err.Error())
680 | return err.Error()
681 | }
682 | for i := 0; i < len(services.Items); i++ {
683 | err := clientset.CoreV1().Services(namespace).Delete(context.Background(), services.Items[i].Name, metav1.DeleteOptions{})
684 | if err != nil {
685 | log.Error(err.Error())
686 | return err.Error()
687 | }
688 | }
689 | configmaps, err := clientset.CoreV1().ConfigMaps(namespace).List(context.Background(), metav1.ListOptions{})
690 | if err != nil {
691 | log.Error(err.Error())
692 | return err.Error()
693 | }
694 | for i := 0; i < len(configmaps.Items); i++ {
695 | err := clientset.CoreV1().ConfigMaps(namespace).Delete(context.Background(), configmaps.Items[i].Name, metav1.DeleteOptions{})
696 | if err != nil {
697 | log.Error(err.Error())
698 | return err.Error()
699 | }
700 | }
701 | secrets, err := clientset.CoreV1().Secrets(namespace).List(context.Background(), metav1.ListOptions{})
702 | if err != nil {
703 | log.Error(err.Error())
704 | return err.Error()
705 | }
706 | for i := 0; i < len(secrets.Items); i++ {
707 | err := clientset.CoreV1().Secrets(namespace).Delete(context.Background(), secrets.Items[i].Name, metav1.DeleteOptions{})
708 | if err != nil {
709 | log.Error(err.Error())
710 | return err.Error()
711 | }
712 | }
713 | replicationcontrollers, err := clientset.CoreV1().ReplicationControllers(namespace).List(context.Background(), metav1.ListOptions{})
714 | if err != nil {
715 | log.Print(err.Error())
716 | return err.Error()
717 | }
718 | for i := 0; i < len(replicationcontrollers.Items); i++ {
719 | err := clientset.CoreV1().ReplicationControllers(namespace).Delete(context.Background(), replicationcontrollers.Items[i].Name, metav1.DeleteOptions{})
720 | if err != nil {
721 | log.Error(err.Error())
722 | return err.Error()
723 | }
724 | }
725 | daemonsets, err := clientset.ExtensionsV1beta1().DaemonSets(namespace).List(context.Background(), metav1.ListOptions{})
726 | if err != nil {
727 | log.Error(err.Error())
728 | return err.Error()
729 | }
730 | for i := 0; i < len(daemonsets.Items); i++ {
731 | err := clientset.ExtensionsV1beta1().DaemonSets(namespace).Delete(context.Background(), daemonsets.Items[i].Name, metav1.DeleteOptions{})
732 | if err != nil {
733 | log.Error(err.Error())
734 | return err.Error()
735 | }
736 | }
737 | pods, err := clientset.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{})
738 | if err != nil {
739 | log.Error(err.Error())
740 | return err.Error()
741 | }
742 | for i := 0; i < len(pods.Items); i++ {
743 | err := clientset.CoreV1().Pods(namespace).Delete(context.Background(), pods.Items[i].Name, metav1.DeleteOptions{})
744 | if err != nil {
745 | log.Error(err.Error())
746 | return err.Error()
747 | }
748 | }
749 | events, err := clientset.CoreV1().Events(namespace).List(context.Background(), metav1.ListOptions{})
750 | if err != nil {
751 | log.Error(err.Error())
752 | return err.Error()
753 | }
754 | for i := 0; i < len(events.Items); i++ {
755 | err := clientset.CoreV1().Events(namespace).Delete(context.Background(), events.Items[i].Name, metav1.DeleteOptions{})
756 | if err != nil {
757 | log.Error(err.Error())
758 | return err.Error()
759 | }
760 | }
761 | log.Info("Everything in " + namespace + " Deleted!")
762 | return "All Deleted!"
763 | }
764 |
--------------------------------------------------------------------------------
/apply/apply.go:
--------------------------------------------------------------------------------
1 | package apply
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "io"
7 | "io/ioutil"
8 | "os"
9 |
10 | "github.com/sirupsen/logrus"
11 | "k8s.io/apimachinery/pkg/api/meta"
12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
13 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
14 | "k8s.io/apimachinery/pkg/runtime"
15 | "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
16 | yamlutil "k8s.io/apimachinery/pkg/util/yaml"
17 | "k8s.io/client-go/dynamic"
18 | "k8s.io/client-go/kubernetes"
19 | "k8s.io/client-go/restmapper"
20 | "k8s.io/client-go/tools/clientcmd"
21 | )
22 |
23 | func Main(filename string, log *logrus.Entry) string {
24 |
25 | b, err := ioutil.ReadFile(filename)
26 | if err != nil {
27 | log.Error(err.Error())
28 | return (err.Error())
29 | }
30 | log.Info("%q \n", string(b))
31 |
32 | kubeconfig := os.Getenv("KUBECONFIG")
33 |
34 | config, _ := clientcmd.BuildConfigFromFlags("", kubeconfig)
35 |
36 | c, err := kubernetes.NewForConfig(config)
37 | if err != nil {
38 | log.Error(err.Error())
39 | return (err.Error())
40 | }
41 |
42 | dd, err := dynamic.NewForConfig(config)
43 | if err != nil {
44 | log.Error(err.Error())
45 | return (err.Error())
46 | }
47 |
48 | decoder := yamlutil.NewYAMLOrJSONDecoder(bytes.NewReader(b), 100)
49 | for {
50 | var rawObj runtime.RawExtension
51 | if err = decoder.Decode(&rawObj); err != nil {
52 | log.Error(err.Error())
53 | break
54 | }
55 |
56 | obj, gvk, err := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme).Decode(rawObj.Raw, nil, nil)
57 | if err != nil {
58 | log.Error(err.Error())
59 | return (err.Error())
60 | }
61 |
62 | unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
63 | if err != nil {
64 | log.Error(err.Error())
65 | return (err.Error())
66 | }
67 |
68 | unstructuredObj := &unstructured.Unstructured{Object: unstructuredMap}
69 |
70 | gr, err := restmapper.GetAPIGroupResources(c.Discovery())
71 | if err != nil {
72 | log.Error(err.Error())
73 | return (err.Error())
74 | }
75 |
76 | mapper := restmapper.NewDiscoveryRESTMapper(gr)
77 | mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
78 | if err != nil {
79 | log.Error(err.Error())
80 | return (err.Error())
81 | }
82 |
83 | var dri dynamic.ResourceInterface
84 | if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
85 | if unstructuredObj.GetNamespace() == "" {
86 | unstructuredObj.SetNamespace("default")
87 | }
88 | dri = dd.Resource(mapping.Resource).Namespace(unstructuredObj.GetNamespace())
89 | } else {
90 | dri = dd.Resource(mapping.Resource)
91 | }
92 |
93 | if _, err := dri.Create(context.Background(), unstructuredObj, metav1.CreateOptions{}); err != nil {
94 | log.Error(err.Error())
95 | return (err.Error())
96 | }
97 | }
98 | if err != io.EOF {
99 | log.Error(err.Error())
100 | return (err.Error())
101 | }
102 | return filename + " Applied!"
103 | }
104 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module k8-api
2 |
3 | go 1.18
4 |
5 | require (
6 | github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2
7 | github.com/gofrs/flock v0.8.1
8 | github.com/labstack/echo v3.3.10+incompatible
9 | github.com/pkg/errors v0.9.1
10 | github.com/sirupsen/logrus v1.9.3
11 | github.com/unrolled/secure v1.13.0
12 | gopkg.in/yaml.v2 v2.4.0
13 | helm.sh/helm/v3 v3.11.1
14 | k8s.io/api v0.26.0
15 | k8s.io/apimachinery v0.26.0
16 | k8s.io/client-go v0.26.0
17 | )
18 |
19 | require (
20 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
21 | github.com/BurntSushi/toml v1.2.1 // indirect
22 | github.com/MakeNowJust/heredoc v1.0.0 // indirect
23 | github.com/Masterminds/goutils v1.1.1 // indirect
24 | github.com/Masterminds/semver/v3 v3.2.0 // indirect
25 | github.com/Masterminds/sprig/v3 v3.2.3 // indirect
26 | github.com/Masterminds/squirrel v1.5.3 // indirect
27 | github.com/Microsoft/hcsshim v0.9.10 // indirect
28 | github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
29 | github.com/beorn7/perks v1.0.1 // indirect
30 | github.com/cespare/xxhash/v2 v2.2.0 // indirect
31 | github.com/chai2010/gettext-go v1.0.2 // indirect
32 | github.com/containerd/containerd v1.6.26 // indirect
33 | github.com/containerd/log v0.1.0 // indirect
34 | github.com/cyphar/filepath-securejoin v0.2.3 // indirect
35 | github.com/davecgh/go-spew v1.1.1 // indirect
36 | github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
37 | github.com/docker/cli v20.10.21+incompatible // indirect
38 | github.com/docker/distribution v2.8.1+incompatible // indirect
39 | github.com/docker/docker v24.0.9+incompatible // indirect
40 | github.com/docker/docker-credential-helpers v0.7.0 // indirect
41 | github.com/docker/go-connections v0.4.0 // indirect
42 | github.com/docker/go-metrics v0.0.1 // indirect
43 | github.com/docker/go-units v0.4.0 // indirect
44 | github.com/emicklei/go-restful/v3 v3.10.1 // indirect
45 | github.com/evanphx/json-patch v5.6.0+incompatible // indirect
46 | github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
47 | github.com/fatih/color v1.13.0 // indirect
48 | github.com/go-errors/errors v1.0.1 // indirect
49 | github.com/go-gorp/gorp/v3 v3.0.2 // indirect
50 | github.com/go-logr/logr v1.2.3 // indirect
51 | github.com/go-openapi/jsonpointer v0.19.5 // indirect
52 | github.com/go-openapi/jsonreference v0.20.0 // indirect
53 | github.com/go-openapi/swag v0.19.14 // indirect
54 | github.com/gobwas/glob v0.2.3 // indirect
55 | github.com/gogo/protobuf v1.3.2 // indirect
56 | github.com/golang/protobuf v1.5.3 // indirect
57 | github.com/google/btree v1.0.1 // indirect
58 | github.com/google/gnostic v0.5.7-v3refs // indirect
59 | github.com/google/go-cmp v0.5.9 // indirect
60 | github.com/google/gofuzz v1.2.0 // indirect
61 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
62 | github.com/google/uuid v1.3.0 // indirect
63 | github.com/gorilla/mux v1.8.0 // indirect
64 | github.com/gosuri/uitable v0.0.4 // indirect
65 | github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
66 | github.com/huandu/xstrings v1.3.3 // indirect
67 | github.com/imdario/mergo v0.3.12 // indirect
68 | github.com/inconshreveable/mousetrap v1.0.1 // indirect
69 | github.com/jmoiron/sqlx v1.3.5 // indirect
70 | github.com/josharian/intern v1.0.0 // indirect
71 | github.com/json-iterator/go v1.1.12 // indirect
72 | github.com/klauspost/compress v1.15.9 // indirect
73 | github.com/labstack/gommon v0.4.0 // indirect
74 | github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
75 | github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
76 | github.com/lib/pq v1.10.7 // indirect
77 | github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
78 | github.com/mailru/easyjson v0.7.6 // indirect
79 | github.com/mattn/go-colorable v0.1.12 // indirect
80 | github.com/mattn/go-isatty v0.0.14 // indirect
81 | github.com/mattn/go-runewidth v0.0.9 // indirect
82 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
83 | github.com/mitchellh/copystructure v1.2.0 // indirect
84 | github.com/mitchellh/go-wordwrap v1.0.0 // indirect
85 | github.com/mitchellh/reflectwalk v1.0.2 // indirect
86 | github.com/moby/locker v1.0.1 // indirect
87 | github.com/moby/spdystream v0.2.0 // indirect
88 | github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
89 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
90 | github.com/modern-go/reflect2 v1.0.2 // indirect
91 | github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
92 | github.com/morikuni/aec v1.0.0 // indirect
93 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
94 | github.com/opencontainers/go-digest v1.0.0 // indirect
95 | github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
96 | github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
97 | github.com/prometheus/client_golang v1.14.0 // indirect
98 | github.com/prometheus/client_model v0.3.0 // indirect
99 | github.com/prometheus/common v0.37.0 // indirect
100 | github.com/prometheus/procfs v0.8.0 // indirect
101 | github.com/rubenv/sql-migrate v1.2.0 // indirect
102 | github.com/russross/blackfriday/v2 v2.1.0 // indirect
103 | github.com/shopspring/decimal v1.2.0 // indirect
104 | github.com/spf13/cast v1.4.1 // indirect
105 | github.com/spf13/cobra v1.6.1 // indirect
106 | github.com/spf13/pflag v1.0.5 // indirect
107 | github.com/stretchr/objx v0.5.0 // indirect
108 | github.com/valyala/bytebufferpool v1.0.0 // indirect
109 | github.com/valyala/fasttemplate v1.2.1 // indirect
110 | github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
111 | github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
112 | github.com/xeipuuv/gojsonschema v1.2.0 // indirect
113 | github.com/xlab/treeprint v1.1.0 // indirect
114 | go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
115 | golang.org/x/crypto v0.14.0 // indirect
116 | golang.org/x/net v0.17.0 // indirect
117 | golang.org/x/oauth2 v0.10.0 // indirect
118 | golang.org/x/sync v0.3.0 // indirect
119 | golang.org/x/sys v0.13.0 // indirect
120 | golang.org/x/term v0.13.0 // indirect
121 | golang.org/x/text v0.13.0 // indirect
122 | golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
123 | google.golang.org/appengine v1.6.7 // indirect
124 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
125 | google.golang.org/grpc v1.58.3 // indirect
126 | google.golang.org/protobuf v1.31.0 // indirect
127 | gopkg.in/inf.v0 v0.9.1 // indirect
128 | gopkg.in/yaml.v3 v3.0.1 // indirect
129 | k8s.io/apiextensions-apiserver v0.26.0 // indirect
130 | k8s.io/apiserver v0.26.0 // indirect
131 | k8s.io/cli-runtime v0.26.0 // indirect
132 | k8s.io/component-base v0.26.0 // indirect
133 | k8s.io/klog/v2 v2.80.1 // indirect
134 | k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
135 | k8s.io/kubectl v0.26.0 // indirect
136 | k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect
137 | oras.land/oras-go v1.2.2 // indirect
138 | sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
139 | sigs.k8s.io/kustomize/api v0.12.1 // indirect
140 | sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect
141 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
142 | sigs.k8s.io/yaml v1.3.0 // indirect
143 | )
144 |
--------------------------------------------------------------------------------
/golang.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kitarp29/kube-ez/ff29e34ec03a570d5d6ca7f8cb6f2fb567967a0b/golang.jpeg
--------------------------------------------------------------------------------
/install/install.go:
--------------------------------------------------------------------------------
1 | package install
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io/ioutil"
7 | "log"
8 | "os"
9 | "path/filepath"
10 | "strings"
11 | "sync"
12 | "time"
13 |
14 | "gopkg.in/yaml.v2"
15 |
16 | "github.com/gofrs/flock"
17 | "github.com/pkg/errors"
18 | "github.com/sirupsen/logrus"
19 |
20 | "helm.sh/helm/v3/pkg/action"
21 | "helm.sh/helm/v3/pkg/chart"
22 | "helm.sh/helm/v3/pkg/chart/loader"
23 | "helm.sh/helm/v3/pkg/cli"
24 | "helm.sh/helm/v3/pkg/cli/values"
25 | "helm.sh/helm/v3/pkg/downloader"
26 | "helm.sh/helm/v3/pkg/getter"
27 | "helm.sh/helm/v3/pkg/repo"
28 | )
29 |
30 | var settings *cli.EnvSettings = cli.New()
31 |
32 | // RepoAdd adds repo with given name and url
33 | func RepoAdd(name, url string, log *logrus.Entry) string {
34 | repoFile := settings.RepositoryConfig
35 |
36 | //Ensure the file directory exists as it is required for file locking
37 | err := os.MkdirAll(filepath.Dir(repoFile), os.ModePerm)
38 | if err != nil && !os.IsExist(err) {
39 | log.Error(err.Error())
40 | }
41 |
42 | // Acquire a file lock for process synchronization
43 | fileLock := flock.New(strings.Replace(repoFile, filepath.Ext(repoFile), ".lock", 1))
44 | lockCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
45 | defer cancel()
46 |
47 | locked, err := fileLock.TryLockContext(lockCtx, time.Second)
48 | if err == nil && locked {
49 | defer func() {
50 | if err := fileLock.Unlock(); err != nil {
51 | log.Error(err.Error())
52 | }
53 | }()
54 | }
55 | if err != nil {
56 | log.Error(err.Error())
57 | }
58 |
59 | b, err := ioutil.ReadFile(repoFile)
60 | if err != nil && !os.IsNotExist(err) {
61 | log.Error(err.Error())
62 | }
63 |
64 | var f repo.File
65 | if err := yaml.Unmarshal(b, &f); err != nil {
66 | log.Error(err.Error())
67 | }
68 |
69 | if f.Has(name) {
70 | log.Info("repository name (%s) already exists\n", name)
71 | return "repository name already exists"
72 | }
73 |
74 | c := repo.Entry{
75 | Name: name,
76 | URL: url,
77 | }
78 |
79 | r, err := repo.NewChartRepository(&c, getter.All(settings))
80 | if err != nil {
81 | log.Error(err.Error())
82 | }
83 |
84 | if _, err := r.DownloadIndexFile(); err != nil {
85 | err := errors.Wrapf(err, "looks like %q is not a valid chart repository or cannot be reached", url)
86 | log.Error(err.Error())
87 | }
88 |
89 | f.Update(&c)
90 |
91 | if err := f.WriteFile(repoFile, 0644); err != nil {
92 | log.Error(err.Error())
93 | }
94 | log.Info("%q has been added to your repositories\n", name)
95 | return "Repo added"
96 | }
97 |
98 | // RepoUpdate updates charts for all helm repos
99 | func RepoUpdate(log *logrus.Entry) string {
100 | repoFile := settings.RepositoryConfig
101 |
102 | f, err := repo.LoadFile(repoFile)
103 | if os.IsNotExist(errors.Cause(err)) || len(f.Repositories) == 0 {
104 | defer func() {
105 | if r := recover(); r != nil {
106 | log.Error("no repositories found. You must add one before updating)", r)
107 | }
108 | }()
109 | log.Panic("no repositories found. You must add one before updating. Error: " + err.Error())
110 | return "no repositories found. You must add one before updating"
111 | }
112 | var repos []*repo.ChartRepository
113 | for _, cfg := range f.Repositories {
114 | r, err := repo.NewChartRepository(cfg, getter.All(settings))
115 | if err != nil {
116 | defer func() {
117 | if r := recover(); r != nil {
118 | log.Error("Recovered in RepoUpdate()", r)
119 | }
120 | }()
121 | log.Panic("Error: " + err.Error())
122 | return err.Error()
123 | }
124 | repos = append(repos, r)
125 | }
126 |
127 | log.Info("Hang tight while we grab the latest from your chart repositories...\n")
128 | var wg sync.WaitGroup
129 | for _, re := range repos {
130 | wg.Add(1)
131 | go func(re *repo.ChartRepository) {
132 | defer wg.Done()
133 | if _, err := re.DownloadIndexFile(); err != nil {
134 | log.Error("...Unable to get an update from the %q chart repository (%s):\n\t%s\n", re.Config.Name, re.Config.URL, err)
135 | log.Error(err.Error())
136 | } else {
137 | log.Info("...Successfully got an update from the %q chart repository\n", re.Config.Name)
138 | }
139 | }(re)
140 | }
141 | wg.Wait()
142 | log.Info("Update Complete. ⎈ Happy Helming!⎈\n")
143 | return "Update Complete. ⎈ Happy Helming!⎈"
144 | }
145 |
146 | // InstallChart
147 | func InstallChart(name, repo, chart, namespace string, log *logrus.Entry) string {
148 | os.Setenv("HELM_NAMESPACE", namespace)
149 | actionConfig := new(action.Configuration)
150 | if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), os.Getenv("HELM_DRIVER"), debug); err != nil {
151 | log.Error(err.Error())
152 | return err.Error()
153 | }
154 | client := action.NewInstall(actionConfig)
155 |
156 | if client.Version == "" && client.Devel {
157 | client.Version = ">0.0.0-0"
158 | }
159 | //name, chart, err := client.NameAndChart(args)
160 | client.ReleaseName = name
161 | cp, err := client.ChartPathOptions.LocateChart(fmt.Sprintf("%s/%s", repo, chart), settings)
162 | if err != nil {
163 | log.Error(err.Error())
164 | return err.Error()
165 | }
166 |
167 | debug("CHART PATH: %s\n", cp)
168 |
169 | p := getter.All(settings)
170 | valueOpts := &values.Options{}
171 | vals, err := valueOpts.MergeValues(p)
172 | if err != nil {
173 | log.Error(err.Error())
174 | return err.Error()
175 | }
176 |
177 | // Add args
178 | // if err := strvals.ParseInto(args["set"], vals); err != nil {
179 | // log.Fatal(errors.Wrap(err, "failed parsing --set data"))
180 | // }
181 |
182 | // Check chart dependencies to make sure all are present in /charts
183 | chartRequested, err := loader.Load(cp)
184 | if err != nil {
185 | log.Error(err.Error())
186 | return err.Error()
187 | }
188 |
189 | validInstallableChart, err := isChartInstallable(chartRequested)
190 | if !validInstallableChart {
191 | log.Error(err.Error())
192 | return err.Error()
193 | }
194 |
195 | if req := chartRequested.Metadata.Dependencies; req != nil {
196 | // If CheckDependencies returns an error, we have unfulfilled dependencies.
197 | // As of Helm 2.4.0, this is treated as a stopping condition:
198 | // https://github.com/helm/helm/issues/2209
199 | if err := action.CheckDependencies(chartRequested, req); err != nil {
200 | if client.DependencyUpdate {
201 | man := &downloader.Manager{
202 | Out: os.Stdout,
203 | ChartPath: cp,
204 | Keyring: client.ChartPathOptions.Keyring,
205 | SkipUpdate: false,
206 | Getters: p,
207 | RepositoryConfig: settings.RepositoryConfig,
208 | RepositoryCache: settings.RepositoryCache,
209 | }
210 | if err := man.Update(); err != nil {
211 | log.Error(err.Error())
212 | }
213 | } else {
214 | log.Error(err.Error())
215 | }
216 | }
217 | }
218 |
219 | client.Namespace = settings.Namespace()
220 | release, err := client.Run(chartRequested, vals)
221 | if err != nil {
222 | log.Error(err.Error())
223 | return err.Error()
224 | }
225 | log.Info(release.Manifest)
226 | return "Chart installed"
227 | }
228 |
229 | func isChartInstallable(ch *chart.Chart) (bool, error) {
230 | switch ch.Metadata.Type {
231 | case "", "application":
232 | return true, nil
233 | }
234 | return false, errors.Errorf("%s charts are not installable", ch.Metadata.Type)
235 | }
236 |
237 | func debug(format string, v ...interface{}) {
238 | format = fmt.Sprintf("[debug] %s\n", format)
239 | err := log.Output(2, fmt.Sprintf(format, v...))
240 | if err != nil {
241 | log.Print(err.Error())
242 | }
243 | }
244 |
245 | func DeleteChart(name, namespace string, log *logrus.Entry) string {
246 | os.Setenv("HELM_NAMESPACE", namespace)
247 | actionConfig := new(action.Configuration)
248 | if err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), os.Getenv("HELM_DRIVER"), debug); err != nil {
249 | log.Error(err.Error())
250 | return err.Error()
251 | }
252 | client := action.NewUninstall(actionConfig)
253 | res, err := client.Run(name)
254 | if err != nil {
255 | log.Error(err.Error())
256 | return err.Error()
257 | }
258 | return res.Info
259 | }
260 |
--------------------------------------------------------------------------------
/server.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | api "k8-api/api"
7 | apply "k8-api/apply"
8 | "k8-api/install"
9 | "net/http"
10 | "runtime"
11 | "time"
12 |
13 | "github.com/distribution/distribution/v3/uuid"
14 | "github.com/labstack/echo"
15 | "github.com/labstack/echo/middleware"
16 | "github.com/sirupsen/logrus"
17 | "github.com/unrolled/secure"
18 | )
19 |
20 | func timeoutMiddleware(next echo.HandlerFunc) echo.HandlerFunc {
21 | return func(c echo.Context) error {
22 | start := time.Now()
23 | err := next(c)
24 | duration := time.Since(start)
25 |
26 | if err != nil {
27 | return err
28 | }
29 |
30 | if duration > 6*time.Second {
31 | return echo.NewHTTPError(http.StatusRequestTimeout, "Request timed out")
32 | }
33 |
34 | return nil
35 | }
36 | }
37 |
38 | func retryMax(next echo.HandlerFunc) echo.HandlerFunc {
39 | return func(c echo.Context) error {
40 | for i := 0; i < 5; i++ {
41 | err := next(c)
42 | if err == nil {
43 | return nil
44 | }
45 | time.Sleep(1 * time.Second)
46 | }
47 | return errors.New("Tried Multiple times, but failed. Time to restart the server!")
48 | }
49 | }
50 |
51 | func main() {
52 |
53 | e := echo.New()
54 |
55 | // Setting up Logging
56 | log := logrus.New()
57 |
58 | //making the logs in JSON format
59 | log.SetReportCaller(true)
60 | log.Formatter = &logrus.JSONFormatter{
61 | CallerPrettyfier: func(f *runtime.Frame) (string, string) {
62 | return fmt.Sprintf("%s()", f.Function), fmt.Sprintf("%s:%d", f.File, f.Line)
63 | },
64 | }
65 |
66 | // Securing the API, customise as per your usage
67 | // Add more options as per your need from Here: https://github.com/unrolled/secure#available-options
68 | secureMiddleware := secure.New(secure.Options{
69 | SSLRedirect: false,
70 | // SSLHost : "localhost" Remove this if you are not using on localhost
71 | })
72 |
73 | // Middleware to secure the API
74 | e.Use(echo.WrapMiddleware(secureMiddleware.Handler))
75 |
76 | // Middleware to add UUID to each request, helps us to track the request in case of any error
77 | e.Use(func(next echo.HandlerFunc) echo.HandlerFunc {
78 | return func(c echo.Context) error {
79 | c.Set("uuid", "kube-ez-"+uuid.Generate().String()[:8])
80 | cc := c
81 | return next(cc)
82 | }
83 | })
84 |
85 | // Middleware to set the order of the log that is genererated
86 | e.Use(middleware.LoggerWithConfig(middleware.LoggerConfig{
87 | Format: `{"level":"INFO","time":"${time_rfc3339_nano}","id":"${id}","remote_ip":"${remote_ip}",` +
88 | `"host":"${host}","method":"${method}","uri":"${uri}","user_agent":"${user_agent}",` +
89 | `"status":${status},"error":"${error}","latency":${latency},"latency_human":"${latency_human}"` +
90 | `,"bytes_in":${bytes_in},"bytes_out":${bytes_out}}` + "\n",
91 | CustomTimeFormat: "2006-01-02 15:04:05",
92 | }))
93 |
94 | // These two middlewares are used to handle the timeout and retry the request
95 | e.Use(timeoutMiddleware, retryMax)
96 | // Calling the Main fucntion that connects with the kubernetes cluster
97 | api.Main()
98 |
99 | //Middlewae to handle CORS
100 | e.Use(middleware.CORSWithConfig(middleware.CORSConfig{
101 | AllowOrigins: []string{"*"},
102 | AllowMethods: []string{echo.GET, echo.HEAD, echo.PUT, echo.PATCH, echo.POST, echo.DELETE},
103 | }))
104 |
105 | // All the routes are described this point forward
106 | e.GET("/", func(c echo.Context) error {
107 | return c.String(http.StatusOK, "Yes! I am alive!\n")
108 | })
109 |
110 | e.GET("/pods", func(c echo.Context) error {
111 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
112 | l.Info("Get pods intitiated")
113 | namespace := c.QueryParam("namespace")
114 | containerDetails := c.QueryParam("containerDetails") == "True" || c.QueryParam("containerDetails") == "true"
115 | return c.String(http.StatusOK, api.Pods(namespace, containerDetails, l))
116 | })
117 |
118 | e.GET("/namespace", func(c echo.Context) error {
119 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
120 | l.Info("Get Namespace intitiated")
121 | return c.String(http.StatusOK, api.NameSpace(l))
122 | })
123 |
124 | e.GET("/deployments", func(c echo.Context) error {
125 | namespace := c.QueryParam("namespace")
126 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
127 | l.Info("Get Deployments intitiated")
128 | return c.String(http.StatusOK, api.Deployments(namespace, l))
129 | })
130 |
131 | e.GET("/configmaps", func(c echo.Context) error {
132 | namespace := c.QueryParam("namespace")
133 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
134 | l.Info("Get Configmaps intitiated")
135 | return c.String(http.StatusOK, api.Configmaps(namespace, l))
136 | })
137 |
138 | e.GET("/services", func(c echo.Context) error {
139 | namespace := c.QueryParam("namespace")
140 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
141 | l.Info("Get Services intitiated")
142 | return c.String(http.StatusOK, api.Services(namespace, l))
143 | })
144 |
145 | e.GET("/events", func(c echo.Context) error {
146 | namespace := c.QueryParam("namespace")
147 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
148 | l.Info("Get Events intitiated")
149 | return c.String(http.StatusOK, api.Events(namespace, l))
150 | })
151 |
152 | e.GET("/secrets", func(c echo.Context) error {
153 | namespace := c.QueryParam("namespace")
154 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
155 | l.Info("Get Secrets intitiated")
156 | return c.String(http.StatusOK, api.Secrets(namespace, l))
157 | })
158 |
159 | e.GET("/replicationController", func(c echo.Context) error {
160 | namespace := c.QueryParam("namespace")
161 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
162 | l.Info("Get RepilicationControllers intitiated")
163 | return c.String(http.StatusOK, api.ReplicationController(namespace, l))
164 | })
165 |
166 | e.GET("/daemonset", func(c echo.Context) error {
167 | namespace := c.QueryParam("namespace")
168 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
169 | l.Info("Get Daemaonsets intitiated")
170 | return c.String(http.StatusOK, api.DaemonSet(namespace, l))
171 | })
172 |
173 | e.GET("/podLogs", func(c echo.Context) error {
174 | namespace := c.QueryParam("namespace")
175 | pod := c.QueryParam("pod")
176 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
177 | l.Info("Get Pod's Logs intitiated")
178 | return c.String(http.StatusOK, api.PodLogs(namespace, pod, l))
179 | })
180 |
181 | e.GET("/helmRepoUpdate", func(c echo.Context) error {
182 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
183 | l.Info("Get Helm Repo updates intitiated")
184 | return c.String(http.StatusOK, install.RepoUpdate(l))
185 | })
186 |
187 | e.POST("/helmRepoAdd", func(c echo.Context) error {
188 | url := c.QueryParam("url")
189 | repoName := c.QueryParam("repoName")
190 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
191 | l.Info("Adding Helm Repo intitiated")
192 | return c.String(http.StatusOK, install.RepoAdd(repoName, url, l))
193 | })
194 |
195 | e.POST("/helmInstall", func(c echo.Context) error {
196 | namespace := c.QueryParam("namespace")
197 | chartName := c.QueryParam("chartName")
198 | name := c.QueryParam("name")
199 | repo := c.QueryParam("repo")
200 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
201 | l.Info("Adding Helm Install intitiated")
202 | return c.String(http.StatusOK, install.InstallChart(namespace, chartName, name, repo, l))
203 | })
204 |
205 | e.POST("/createNamespace", func(c echo.Context) error {
206 | namespace := c.FormValue("namespace")
207 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
208 | l.Info("Creating Namespace intitiated")
209 | return c.String(http.StatusOK, api.CreateNamespace(namespace, l))
210 | })
211 |
212 | e.POST("/applyFile", func(c echo.Context) error {
213 | filepath := c.FormValue("filepath")
214 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
215 | l.Info("Intiating File appliying")
216 | return c.String(http.StatusOK, apply.Main(filepath, l))
217 | })
218 |
219 | e.DELETE("/deleteHelm", func(c echo.Context) error {
220 | namespace := c.FormValue("namespace")
221 | name := c.FormValue("name")
222 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
223 | l.Info("Delete Helm intitiated")
224 | return c.String(http.StatusOK, install.DeleteChart(name, namespace, l))
225 | })
226 |
227 | e.DELETE("/deleteNamespace", func(c echo.Context) error {
228 | namespace := c.FormValue("namespace")
229 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
230 | l.Info("Deleting Namespace intitiated")
231 | return c.String(http.StatusOK, api.DeleteNamespace(namespace, l))
232 | })
233 |
234 | e.DELETE("/deleteDeployment", func(c echo.Context) error {
235 | namespace := c.FormValue("namespace")
236 | deployment := c.FormValue("deployment")
237 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
238 | l.Info("Delete Deployment intitiated")
239 | return c.String(http.StatusOK, api.DeleteDeployment(namespace, deployment, l))
240 | })
241 |
242 | e.DELETE("/deleteService", func(c echo.Context) error {
243 | namespace := c.FormValue("namespace")
244 | service := c.FormValue("service")
245 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
246 | l.Info("Delete Service intitiated")
247 | return c.String(http.StatusOK, api.DeleteService(namespace, service, l))
248 | })
249 |
250 | e.DELETE("/deleteConfigMap", func(c echo.Context) error {
251 | namespace := c.FormValue("namespace")
252 | configMap := c.FormValue("configMap")
253 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
254 | l.Info("Delete Configmap intitiated")
255 | return c.String(http.StatusOK, api.DeleteConfigMap(namespace, configMap, l))
256 | })
257 |
258 | e.DELETE("/deleteSecret", func(c echo.Context) error {
259 | namespace := c.FormValue("namespace")
260 | secret := c.FormValue("secret")
261 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
262 | l.Info("Delete Secret intitiated")
263 | return c.String(http.StatusOK, api.DeleteSecret(namespace, secret, l))
264 | })
265 |
266 | e.DELETE("/deleteReplicationController", func(c echo.Context) error {
267 | namespace := c.FormValue("namespace")
268 | replicationController := c.FormValue("replicationController")
269 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
270 | l.Info("Delete ReplicationControlller intitiated")
271 | return c.String(http.StatusOK, api.DeleteReplicationController(namespace, replicationController, l))
272 | })
273 |
274 | e.DELETE("/deleteDaemonSet", func(c echo.Context) error {
275 | namespace := c.FormValue("namespace")
276 | daemonSet := c.FormValue("daemonSet")
277 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
278 | l.Info("Delete Daemonset intitiated")
279 | return c.String(http.StatusOK, api.DeleteDaemonSet(namespace, daemonSet, l))
280 | })
281 |
282 | e.DELETE("/deletePod", func(c echo.Context) error {
283 | namespace := c.FormValue("namespace")
284 | pod := c.FormValue("pod")
285 | return c.String(http.StatusOK, api.DeletePod(namespace, pod))
286 | })
287 |
288 | e.DELETE("/deleteEvent", func(c echo.Context) error {
289 | namespace := c.FormValue("namespace")
290 | event := c.FormValue("event")
291 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
292 | l.Info("Delete Event intitiated")
293 | return c.String(http.StatusOK, api.DeleteEvent(namespace, event, l))
294 | })
295 |
296 | e.DELETE("/deleteAll", func(c echo.Context) error {
297 | namespace := c.FormValue("namespace")
298 | l := log.WithFields(logrus.Fields{"uuid": c.Get("uuid")})
299 | l.Info("Delete All intitiated")
300 | return c.String(http.StatusOK, api.DeleteAll(namespace, l))
301 | })
302 |
303 | // Run Server
304 | e.Logger.Fatal(e.Start(":8000"))
305 | }
306 |
--------------------------------------------------------------------------------
/yamls/crb.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: kube-ez
5 | subjects:
6 | - kind: ServiceAccount
7 | name: kube-ez # name of your service account
8 | namespace: default # this is the namespace your service account is in
9 | roleRef: # referring to your ClusterRole
10 | kind: ClusterRole
11 | name: cluster-admin # or the custom role you created in the last step
12 | apiGroup: rbac.authorization.k8s.io
--------------------------------------------------------------------------------
/yamls/one-to-rule-them-all.yaml:
--------------------------------------------------------------------------------
1 | # One YAML to rule them all...One YAML to find them...One YAML to bring them all and in the darkness bind them
2 | ---
3 | apiVersion: v1
4 | kind: ServiceAccount
5 | metadata:
6 | name: kube-ez # Or Your custom Name
7 | ---
8 | apiVersion: rbac.authorization.k8s.io/v1
9 | kind: ClusterRoleBinding
10 | metadata:
11 | name: kube-ez
12 | subjects:
13 | - kind: ServiceAccount
14 | name: kube-ez # name of your service account
15 | namespace: default # this is the namespace your service account is in
16 | roleRef: # referring to your ClusterRole
17 | kind: ClusterRole
18 | name: cluster-admin # or the custom role you created in the last step
19 | apiGroup: rbac.authorization.k8s.io
20 | ---
21 | apiVersion: apps/v1
22 | kind: Deployment
23 | metadata:
24 | name: kube-ez
25 | spec:
26 | replicas: 1
27 | selector:
28 | matchLabels:
29 | app: kube-ez
30 | template:
31 | metadata:
32 | labels:
33 | app: kube-ez
34 | spec:
35 | serviceAccount: kube-ez
36 | containers:
37 | - name: kube-ez
38 | image: kitarp29/k8s-api:10
39 | imagePullPolicy: Always
40 | ports:
41 | - containerPort: 8000
42 | securityContext:
43 | allowPrivilegeEscalation: false
44 | livenessProbe:
45 | initialDelaySeconds: 60
46 | periodSeconds: 300
47 | successThreshold: 1
48 | failureThreshold: 3
49 | httpGet:
50 | path: /
51 | port: 8000
52 | restartPolicy: Always
--------------------------------------------------------------------------------
/yamls/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: kube-ez
5 | spec:
6 | serviceAccount: kube-ez
7 | containers:
8 | - name: kube-ez
9 | image: kitarp29/k8s-api:latest
10 | ports:
11 | - containerPort: 8000
12 | securityContext:
13 | allowPrivilegeEscalation: false
14 | livenessProbe:
15 | initialDelaySeconds: 60
16 | periodSeconds: 300
17 | successThreshold: 1
18 | failureThreshold: 3
19 | httpGet:
20 | path: /
21 | port: 8000
--------------------------------------------------------------------------------
/yamls/sa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: kube-ez #Or Your custom Name
--------------------------------------------------------------------------------