├── docker └── base │ └── Dockerfile ├── controller ├── Dockerfile └── main.go ├── infra └── kubernetes │ ├── nginx-example │ ├── svc.yaml │ ├── ingress.yaml │ └── deployment.yaml │ └── ingress-controller │ ├── default-backend.yaml │ └── nginx-ingress-controller.yaml └── kubernetes └── third-party-resource.yaml /docker/base/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | RUN apk update && apk add openssl ca-certificates 3 | -------------------------------------------------------------------------------- /controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | COPY controller controller 3 | ENTRYPOINT ["/controller"] 4 | -------------------------------------------------------------------------------- /infra/kubernetes/nginx-example/svc.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-example 5 | spec: 6 | selector: 7 | app: nginx-example 8 | ports: 9 | - port: 80 10 | -------------------------------------------------------------------------------- /kubernetes/third-party-resource.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: ThirdPartyResource 3 | metadata: 4 | name: kubereview-deployment.kamalmarhubi.com 5 | description: "An instance of an application for review" 6 | versions: 7 | - name: v1 8 | -------------------------------------------------------------------------------- /infra/kubernetes/nginx-example/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: nginx-example 5 | annotations: 6 | kubernetes.io/ingress.class: "nginx" 7 | spec: 8 | rules: 9 | - host: nginx-example.k8s.kamal.cloud 10 | http: 11 | paths: 12 | - backend: 13 | serviceName: nginx-example 14 | servicePort: 80 15 | -------------------------------------------------------------------------------- /infra/kubernetes/nginx-example/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-example 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | app: nginx-example 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx:alpine 15 | ports: 16 | - containerPort: 80 17 | -------------------------------------------------------------------------------- /infra/kubernetes/ingress-controller/default-backend.yaml: -------------------------------------------------------------------------------- 1 | # From https://github.com/kubernetes/ingress/blob/e8b61b40d9d00e33ef7fdd0671b822f33f373da6/examples/deployment/nginx/default-backend.yaml 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: default-http-backend 6 | labels: 7 | k8s-app: default-http-backend 8 | namespace: kube-system 9 | spec: 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | k8s-app: default-http-backend 15 | spec: 16 | terminationGracePeriodSeconds: 60 17 | containers: 18 | - name: default-http-backend 19 | # Any image is permissable as long as: 20 | # 1. It serves a 404 page at / 21 | # 2. It serves 200 on a /healthz endpoint 22 | image: gcr.io/google_containers/defaultbackend:1.0 23 | livenessProbe: 24 | httpGet: 25 | path: /healthz 26 | port: 8080 27 | scheme: HTTP 28 | initialDelaySeconds: 30 29 | timeoutSeconds: 5 30 | ports: 31 | - containerPort: 8080 32 | resources: 33 | limits: 34 | cpu: 10m 35 | memory: 20Mi 36 | requests: 37 | cpu: 10m 38 | memory: 20Mi 39 | --- 40 | apiVersion: v1 41 | kind: Service 42 | metadata: 43 | name: default-http-backend 44 | namespace: kube-system 45 | labels: 46 | k8s-app: default-http-backend 47 | spec: 48 | ports: 49 | - port: 80 50 | targetPort: 8080 51 | selector: 52 | k8s-app: default-http-backend 53 | -------------------------------------------------------------------------------- /infra/kubernetes/ingress-controller/nginx-ingress-controller.yaml: -------------------------------------------------------------------------------- 1 | # Ingress controller service. 2 | # From https://github.com/kubernetes/ingress/blob/e8b61b40d9d00e33ef7fdd0671b822f33f373da6/examples/static-ip/nginx/static-ip-svc.yaml 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: nginx-ingress-lb 7 | annotations: 8 | # This line does something something source IPs that I'm going to just 9 | # ignore. 10 | # service.beta.kubernetes.io/external-traffic: OnlyLocal 11 | labels: 12 | app: nginx-ingress-lb 13 | spec: 14 | type: LoadBalancer 15 | # Static IP in my project. Must be regional IP, not global. 16 | loadBalancerIP: 173.255.114.90 17 | ports: 18 | - port: 80 19 | name: http 20 | targetPort: 80 21 | - port: 443 22 | name: https 23 | targetPort: 443 24 | selector: 25 | # Selects nginx-ingress-controller pods 26 | k8s-app: nginx-ingress-controller 27 | --- 28 | # Ingress controller deployment. 29 | # From https://github.com/kubernetes/ingress/blob/e8b61b40d9d00e33ef7fdd0671b822f33f373da6/examples/static-ip/nginx/nginx-ingress-controller.yaml 30 | apiVersion: extensions/v1beta1 31 | kind: Deployment 32 | metadata: 33 | name: nginx-ingress-controller 34 | labels: 35 | k8s-app: nginx-ingress-controller 36 | spec: 37 | replicas: 1 38 | template: 39 | metadata: 40 | labels: 41 | k8s-app: nginx-ingress-controller 42 | spec: 43 | # hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration 44 | # however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host 45 | # that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used 46 | # like with kubeadm 47 | # hostNetwork: true 48 | terminationGracePeriodSeconds: 60 49 | containers: 50 | - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.3 51 | name: nginx-ingress-controller 52 | readinessProbe: 53 | httpGet: 54 | path: /healthz 55 | port: 10254 56 | scheme: HTTP 57 | livenessProbe: 58 | httpGet: 59 | path: /healthz 60 | port: 10254 61 | scheme: HTTP 62 | initialDelaySeconds: 10 63 | timeoutSeconds: 1 64 | ports: 65 | - containerPort: 80 66 | hostPort: 80 67 | - containerPort: 443 68 | hostPort: 443 69 | env: 70 | - name: POD_NAME 71 | valueFrom: 72 | fieldRef: 73 | fieldPath: metadata.name 74 | - name: POD_NAMESPACE 75 | valueFrom: 76 | fieldRef: 77 | fieldPath: metadata.namespace 78 | args: 79 | - /nginx-ingress-controller 80 | - --default-backend-service=kube-system/default-http-backend 81 | - --publish-service=$(POD_NAMESPACE)/nginx-ingress-lb 82 | -------------------------------------------------------------------------------- /controller/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "flag" 7 | "fmt" 8 | "log" 9 | "net/http" 10 | "os" 11 | "strings" 12 | 13 | "k8s.io/apimachinery/pkg/api/errors" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/fields" 16 | "k8s.io/apimachinery/pkg/labels" 17 | "k8s.io/apimachinery/pkg/runtime" 18 | "k8s.io/apimachinery/pkg/runtime/schema" 19 | "k8s.io/apimachinery/pkg/runtime/serializer" 20 | "k8s.io/apimachinery/pkg/selection" 21 | "k8s.io/apimachinery/pkg/util/intstr" 22 | "k8s.io/apimachinery/pkg/watch" 23 | 24 | "k8s.io/client-go/kubernetes" 25 | "k8s.io/client-go/pkg/api" 26 | apiv1 "k8s.io/client-go/pkg/api/v1" 27 | "k8s.io/client-go/pkg/apis/extensions/v1beta1" 28 | "k8s.io/client-go/rest" 29 | "k8s.io/client-go/tools/cache" 30 | "k8s.io/client-go/tools/clientcmd" 31 | 32 | // Load GCP auth provider. 33 | _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" 34 | ) 35 | 36 | const ( 37 | KubeReviewDomain = "kubereview.kamalmarhubi.com" 38 | ReviewDeploymentResourceDescription = "An instance of an application for review" 39 | ReviewDeploymentResourceGroup = KubeReviewDomain 40 | ReviewDeploymentLabel = KubeReviewDomain + "/" + "reviewdeployment" 41 | 42 | ReviewDeploymentResourcePath = "reviewdeployments" 43 | ReviewDeploymentResourceName = "review-deployment." + KubeReviewDomain 44 | ReviewDeploymentResourceVersion = "v1" 45 | ReviewDeploymentResourceKind = "ReviewDeploymentResource" 46 | ReviewDeploymentResourceGroupVersion = ReviewDeploymentResourceGroup + "/" + ReviewDeploymentResourceVersion 47 | ) 48 | 49 | var kubeconfig = flag.String("kubeconfig", "", "Path to a kube config. Only required if out-of-cluster.") 50 | var port = flag.Int("port", 80, "Port to listen on") 51 | 52 | var requirement *labels.Requirement 53 | 54 | func init() { 55 | req, err := labels.NewRequirement("harr", selection.DoesNotExist, nil) 56 | if err != nil { 57 | log.Fatalf("Bad requirement: %v", err) 58 | } 59 | requirement = req 60 | } 61 | 62 | const ( 63 | BuilderImage = "gcr.io/absolute-realm-611/kubereview-builder" 64 | DownloaderImage = "gcr.io/absolute-realm-611/kubereview-downloader" 65 | CloudProject = "absolute-realm-611" 66 | ServiceAccount = "builder@absolute-realm-611.iam.gserviceaccount.com" 67 | ServiceAccountKeyFile = "/etc/kubereview/builder-key.json" 68 | ) 69 | 70 | func main() { 71 | flag.Parse() 72 | 73 | // Create the client config. Use kubeconfig if given, otherwise assume in-cluster. 74 | config, err := buildConfig(*kubeconfig) 75 | if err != nil { 76 | log.Fatalf("Could not build client config: %v", err) 77 | } 78 | 79 | clientset, err := kubernetes.NewForConfig(config) 80 | if err != nil { 81 | log.Fatal(err) 82 | } 83 | 84 | createThirdPartyResourceIfMissing(clientset) 85 | 86 | reviewDeploymentClient, reviewDeploymentScheme, err := NewClient(config) 87 | 88 | // start a watcher on instances of our TPR 89 | watcher := Watcher{ 90 | clientset, 91 | reviewDeploymentClient, 92 | reviewDeploymentScheme, 93 | } 94 | 95 | ctx, cancelFunc := context.WithCancel(context.Background()) 96 | defer cancelFunc() 97 | go watcher.Run(ctx) 98 | 99 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 100 | var reviewDeployments ReviewDeploymentList 101 | 102 | err = reviewDeploymentClient.Get(). 103 | Resource("reviewdeployments"). 104 | Namespace(api.NamespaceDefault). 105 | // LabelsSelectorParam(labels.NewSelector().Add(*requirement)). 106 | Do().Into(&reviewDeployments) 107 | 108 | if err != nil { 109 | log.Printf("error getting review deployments: %v", err) 110 | w.WriteHeader(http.StatusInternalServerError) 111 | return 112 | } 113 | fmt.Fprintf(w, "%v", reviewDeployments) 114 | }) 115 | 116 | http.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) { 117 | log.Print(*r) 118 | b := buildJob{ 119 | repo: os.Args[1], 120 | zipURL: os.Args[2], 121 | buildContextDir: os.Args[3], 122 | imageName: os.Args[4], 123 | imageTag: os.Args[5], 124 | } 125 | 126 | toCreate := b.Pod() 127 | pod, e := clientset.CoreV1().Pods(api.NamespaceDefault).Create(toCreate) 128 | 129 | if e != nil { 130 | log.Print(e) 131 | } 132 | 133 | enc := json.NewEncoder(w) 134 | enc.SetIndent("", " ") 135 | enc.Encode(pod) 136 | }) 137 | 138 | log.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", *port), nil)) 139 | } 140 | 141 | func buildConfig(kubeconfig string) (*rest.Config, error) { 142 | if kubeconfig != "" { 143 | return clientcmd.BuildConfigFromFlags("", kubeconfig) 144 | } 145 | return rest.InClusterConfig() 146 | } 147 | 148 | type buildJob struct { 149 | repo, zipURL, buildContextDir, imageName, imageTag string 150 | } 151 | 152 | func (b *buildJob) getBuildImageName() string { 153 | return fmt.Sprintf("gcr.io/%v/%v:%v", CloudProject, b.imageName, b.imageTag) 154 | } 155 | 156 | func (b *buildJob) Pod() *apiv1.Pod { 157 | initContainers, _ := json.Marshal([]apiv1.Container{ 158 | { 159 | Name: "download", 160 | Image: DownloaderImage, 161 | ImagePullPolicy: apiv1.PullAlways, 162 | Args: []string{b.zipURL}, 163 | Env: []apiv1.EnvVar{ 164 | {Name: "WORKSPACE", Value: "/workspace/src"}, 165 | }, 166 | VolumeMounts: []apiv1.VolumeMount{ 167 | {Name: "workspace-volume", MountPath: "/workspace"}, 168 | }, 169 | }, 170 | }) 171 | 172 | return &apiv1.Pod{ 173 | ObjectMeta: metav1.ObjectMeta{ 174 | GenerateName: fmt.Sprintf("kubereview-build-%s-", strings.Replace(b.repo, "/", "--", -1)), 175 | Annotations: map[string]string{ 176 | "pod.beta.kubernetes.io/init-containers": string(initContainers), 177 | }, 178 | }, 179 | Spec: apiv1.PodSpec{ 180 | // TODO ActiveDeadlineSeconds: ActiveDeadlineSeconds, 181 | RestartPolicy: apiv1.RestartPolicyNever, 182 | Containers: []apiv1.Container{ 183 | apiv1.Container{ 184 | Name: "build", 185 | Image: BuilderImage, 186 | ImagePullPolicy: apiv1.PullAlways, 187 | Args: []string{b.imageName, b.imageTag}, 188 | Env: []apiv1.EnvVar{ 189 | {Name: "CLOUD_PROJECT", Value: CloudProject}, 190 | {Name: "SERVICE_ACCOUNT", Value: ServiceAccount}, 191 | {Name: "SERVICE_ACCOUNT_KEY_FILE", Value: ServiceAccountKeyFile}, 192 | {Name: "BUILD_CONTEXT_DIR", Value: b.buildContextDir}, 193 | {Name: "WORKSPACE", Value: "/workspace/src"}, 194 | }, 195 | VolumeMounts: []apiv1.VolumeMount{ 196 | {Name: "workspace-volume", MountPath: "/workspace"}, 197 | {Name: "keyfile", MountPath: "/etc/kubereview", ReadOnly: true}, 198 | }, 199 | }, 200 | }, 201 | Volumes: []apiv1.Volume{ 202 | { 203 | Name: "keyfile", 204 | VolumeSource: apiv1.VolumeSource{ 205 | Secret: &apiv1.SecretVolumeSource{ 206 | SecretName: "builder-key", 207 | }, 208 | }, 209 | }, 210 | { 211 | Name: "workspace-volume", 212 | VolumeSource: apiv1.VolumeSource{ 213 | EmptyDir: &apiv1.EmptyDirVolumeSource{}, 214 | }, 215 | }, 216 | }, 217 | }, 218 | } 219 | } 220 | 221 | func createThirdPartyResourceIfMissing(clientset *kubernetes.Clientset) { 222 | // initialize third party resource if it does not exist 223 | tpr, err := clientset.ExtensionsV1beta1(). 224 | ThirdPartyResources(). 225 | Get(ReviewDeploymentResourceName, metav1.GetOptions{}) 226 | 227 | if err == nil { 228 | log.Printf("Third party resource %v already exists", tpr.Name) 229 | } else { 230 | if !errors.IsNotFound(err) { 231 | log.Fatalf("Error checking for third party resource: %v", err) 232 | } else { 233 | tpr := &v1beta1.ThirdPartyResource{ 234 | ObjectMeta: metav1.ObjectMeta{ 235 | Name: ReviewDeploymentResourceName, 236 | }, 237 | Versions: []v1beta1.APIVersion{ 238 | {Name: "v1"}, 239 | }, 240 | Description: ReviewDeploymentResourceDescription, 241 | } 242 | log.Printf("Creating third party resource: %#v", tpr) 243 | 244 | result, err := clientset.ExtensionsV1beta1().ThirdPartyResources().Create(tpr) 245 | if err != nil { 246 | panic(err) 247 | } 248 | log.Printf("Created: %#v", result, tpr) 249 | } 250 | } 251 | } 252 | 253 | // Adapted from 254 | // https://github.com/kubernetes/client-go/blob/76153773eaa3a268131d3d993290a194a1370585/examples/third-party-resources/types.go 255 | type ReviewDeploymentSpec struct { 256 | Repo string `json:"repo"` 257 | PullRequestID int `json:"pullRequest"` 258 | Ref string `json:"ref"` 259 | BuildContextDir *string `json:"buildContextDir,omitempty"` 260 | } 261 | 262 | type ReviewDeploymentStatus struct { 263 | BuildPod string `json:"buildPod,omitempty"` 264 | Image string `json:"image,omitempty"` 265 | Pod string `json:"pod,omitempty"` 266 | Service string `json:"service,omitempty"` 267 | Ingress string `json:"ingress,omitempty"` 268 | } 269 | 270 | type ReviewDeployment struct { 271 | metav1.TypeMeta `json:",inline"` 272 | metav1.ObjectMeta `json:"metadata"` 273 | 274 | Spec ReviewDeploymentSpec `json:"spec"` 275 | Status ReviewDeploymentStatus `json:"status,omitempty"` 276 | } 277 | 278 | type ReviewDeploymentList struct { 279 | metav1.TypeMeta `json:",inline"` 280 | metav1.ListMeta `json:"metadata"` 281 | 282 | Items []ReviewDeployment `json:"items"` 283 | } 284 | 285 | // Required to satisfy Object interface 286 | func (e *ReviewDeployment) GetObjectKind() schema.ObjectKind { 287 | return &e.TypeMeta 288 | } 289 | 290 | // Required to satisfy ObjectMetaAccessor interface 291 | func (e *ReviewDeployment) GetObjectMeta() metav1.Object { 292 | return &e.ObjectMeta 293 | } 294 | 295 | // Required to satisfy Object interface 296 | func (el *ReviewDeploymentList) GetObjectKind() schema.ObjectKind { 297 | return &el.TypeMeta 298 | } 299 | 300 | // Required to satisfy ListMetaAccessor interface 301 | func (el *ReviewDeploymentList) GetListMeta() metav1.List { 302 | return &el.ListMeta 303 | } 304 | 305 | // Adapted from https://github.com/nilebox/kubernetes/blob/7891fbbdf6f399be07f2b19e1114346dab07b7b4/staging/src/k8s.io/client-go/examples/third-party-resources/watcher.go 306 | // Watcher is an example of watching on resource create/update/delete events 307 | type Watcher struct { 308 | clientset *kubernetes.Clientset 309 | reviewDeploymentClient *rest.RESTClient 310 | reviewDeploymentScheme *runtime.Scheme 311 | } 312 | 313 | // Run starts an Example resource watcher 314 | func (w *Watcher) Run(ctx context.Context) error { 315 | fmt.Printf("Watch ReviewDeployment objects\n") 316 | 317 | // Watch Example objects 318 | handler := eventHandler{ 319 | clientset: w.clientset, 320 | rdClient: w.reviewDeploymentClient, 321 | } 322 | _, err := watchReviewDeployments(ctx, w.reviewDeploymentClient, w.reviewDeploymentScheme, &handler) 323 | if err != nil { 324 | fmt.Printf("Failed to register watch for ReviewDeployment resource: %v\n", err) 325 | return err 326 | } 327 | 328 | <-ctx.Done() 329 | return ctx.Err() 330 | } 331 | 332 | func watchReviewDeployments(ctx context.Context, reviewDeploymentClient cache.Getter, reviewDeploymentScheme *runtime.Scheme, handler cache.ResourceEventHandler) (cache.Controller, error) { 333 | parameterCodec := runtime.NewParameterCodec(reviewDeploymentScheme) 334 | 335 | source := newListWatchFromClient( 336 | reviewDeploymentClient, 337 | ReviewDeploymentResourcePath, 338 | api.NamespaceAll, 339 | fields.Everything(), 340 | parameterCodec) 341 | 342 | store, controller := cache.NewInformer( 343 | source, 344 | 345 | // The object type. 346 | &ReviewDeployment{}, 347 | 348 | // resyncPeriod 349 | // Every resyncPeriod, all resources in the cache will retrigger events. 350 | // Set to 0 to disable the resync. 351 | 0, 352 | 353 | // Your custom resource event handlers. 354 | handler) 355 | 356 | // store can be used to List and Get 357 | // NEVER modify objects from the store. It's a read-only, local cache. 358 | // You can use reviewDeploymentScheme.Copy() to make a deep copy of original object and modify this copy 359 | for _, obj := range store.List() { 360 | reviewDeployment := obj.(*ReviewDeployment) 361 | reviewDeploymentScheme.Copy(reviewDeployment) 362 | 363 | // This will likely be empty the first run, but may not 364 | fmt.Printf("Existing reviewDeployment: %#v\n", reviewDeployment) 365 | } 366 | 367 | go controller.Run(ctx.Done()) 368 | 369 | return controller, nil 370 | } 371 | 372 | // See the issue comment: https://github.com/kubernetes/kubernetes/issues/16376#issuecomment-272167794 373 | // newListWatchFromClient is a copy of cache.NewListWatchFromClient() method with custom codec 374 | // Cannot use cache.NewListWatchFromClient() because it uses global api.ParameterCodec which uses global 375 | // api.Scheme which does not know about custom types (ReviewDeployment in our case) group/version. 376 | func newListWatchFromClient(c cache.Getter, resource string, namespace string, fieldSelector fields.Selector, paramCodec runtime.ParameterCodec) *cache.ListWatch { 377 | listFunc := func(options metav1.ListOptions) (runtime.Object, error) { 378 | return c.Get(). 379 | Namespace(namespace). 380 | Resource(resource). 381 | VersionedParams(&options, paramCodec). 382 | FieldsSelectorParam(fieldSelector). 383 | Do(). 384 | Get() 385 | } 386 | watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { 387 | return c.Get(). 388 | Prefix("watch"). 389 | Namespace(namespace). 390 | Resource(resource). 391 | VersionedParams(&options, paramCodec). 392 | FieldsSelectorParam(fieldSelector). 393 | Watch() 394 | } 395 | return &cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} 396 | } 397 | 398 | // ReviewDeploymentEventHandler can handle events for ReviewDeployment resource 399 | type ReviewDeploymentEventHandler interface { 400 | OnAdd(obj interface{}) 401 | OnUpdate(oldObj, newObj interface{}) 402 | OnDelete(obj interface{}) 403 | } 404 | 405 | type eventHandler struct { 406 | clientset *kubernetes.Clientset 407 | rdClient *rest.RESTClient 408 | } 409 | 410 | func (rd *ReviewDeployment) BuildJob() *buildJob { 411 | var contextDir string 412 | if rd.Spec.BuildContextDir != nil { 413 | contextDir = *rd.Spec.BuildContextDir 414 | } 415 | return &buildJob{ 416 | repo: rd.Spec.Repo, 417 | zipURL: fmt.Sprintf("https://github.com/%s/archive/%s.zip", rd.Spec.Repo, rd.Spec.Ref), 418 | imageName: fmt.Sprintf( 419 | "%v-pr%v", 420 | strings.Replace(rd.Spec.Repo, "/", "--", -1), 421 | rd.Spec.PullRequestID, 422 | ), 423 | imageTag: rd.Spec.Ref, 424 | buildContextDir: contextDir, 425 | } 426 | } 427 | 428 | func (rd *ReviewDeployment) BuildPod() *apiv1.Pod { 429 | b := rd.BuildJob() 430 | 431 | pod := b.Pod() 432 | if pod.Labels == nil { 433 | pod.Labels = make(map[string]string) 434 | } 435 | pod.Labels[ReviewDeploymentLabel] = rd.Name 436 | 437 | return pod 438 | } 439 | 440 | func (rd *ReviewDeployment) Pod() *apiv1.Pod { 441 | return &apiv1.Pod{ 442 | ObjectMeta: metav1.ObjectMeta{ 443 | Name: rd.Name, 444 | Labels: map[string]string{ReviewDeploymentLabel: rd.Name}, 445 | }, 446 | Spec: apiv1.PodSpec{ 447 | // TODO ActiveDeadlineSeconds: ActiveDeadlineSeconds, 448 | RestartPolicy: apiv1.RestartPolicyNever, 449 | Containers: []apiv1.Container{ 450 | apiv1.Container{ 451 | Name: "main", 452 | Image: rd.Status.Image, 453 | ImagePullPolicy: apiv1.PullAlways, 454 | }, 455 | }, 456 | }, 457 | } 458 | } 459 | 460 | func (rd *ReviewDeployment) Service() *apiv1.Service { 461 | return &apiv1.Service{ 462 | ObjectMeta: metav1.ObjectMeta{ 463 | Name: rd.Name, 464 | Labels: map[string]string{ReviewDeploymentLabel: rd.Name}, 465 | }, 466 | Spec: apiv1.ServiceSpec{ 467 | Ports: []apiv1.ServicePort{{ 468 | Port: 80, 469 | Protocol: apiv1.ProtocolTCP, 470 | TargetPort: intstr.FromInt(80), 471 | }}, 472 | Selector: map[string]string{ReviewDeploymentLabel: rd.Name}, 473 | }, 474 | } 475 | } 476 | 477 | func (rd *ReviewDeployment) Ingress() *v1beta1.Ingress { 478 | return &v1beta1.Ingress{ 479 | ObjectMeta: metav1.ObjectMeta{ 480 | Name: rd.Name, 481 | Annotations: map[string]string{"kubernetes.io/ingress.class": "nginx"}, 482 | Labels: map[string]string{ReviewDeploymentLabel: rd.Name}, 483 | }, 484 | Spec: v1beta1.IngressSpec{ 485 | Rules: []v1beta1.IngressRule{{ 486 | Host: fmt.Sprintf("%v.kubereview.k8s.kamal.cloud", rd.Name), 487 | IngressRuleValue: v1beta1.IngressRuleValue{ 488 | HTTP: &v1beta1.HTTPIngressRuleValue{ 489 | Paths: []v1beta1.HTTPIngressPath{{ 490 | Backend: v1beta1.IngressBackend{ 491 | ServiceName: rd.Name, 492 | ServicePort: intstr.FromInt(80), 493 | }, 494 | }}, 495 | }, 496 | }, 497 | }}, 498 | }, 499 | } 500 | } 501 | 502 | type BuildWatcher struct { 503 | rd *ReviewDeployment 504 | clientset *kubernetes.Clientset 505 | rdClient *rest.RESTClient 506 | w watch.Interface 507 | } 508 | 509 | func (bw *BuildWatcher) Watch() error { 510 | requirement, err := labels.NewRequirement( 511 | ReviewDeploymentLabel, selection.Equals, []string{bw.rd.Name}) 512 | 513 | if err != nil { 514 | log.Printf("Bad label selector requirement: %v", err) 515 | } 516 | 517 | w, err := bw.clientset. 518 | CoreV1(). 519 | Pods(api.NamespaceDefault). 520 | Watch(metav1.ListOptions{ 521 | LabelSelector: labels.NewSelector().Add(*requirement).String()}) 522 | if err != nil { 523 | return err 524 | } 525 | 526 | bw.w = w 527 | 528 | for event := range bw.w.ResultChan() { 529 | switch event.Type { 530 | case watch.Added: 531 | fallthrough 532 | case watch.Modified: 533 | pod, ok := event.Object.(*apiv1.Pod) 534 | if !ok { 535 | log.Printf("GOT WRONG OBJECT TYPE: %v", event.Object) 536 | } 537 | bw.HandleNewPodState(pod) 538 | continue 539 | case watch.Error: 540 | // TODO: Should update rd to have error status? 541 | case watch.Deleted: 542 | // TODO: What happens here? 543 | default: 544 | log.Printf("GOT NONSENSE EVENT %v", event) 545 | } 546 | } 547 | return nil 548 | } 549 | 550 | func (bw *BuildWatcher) RefreshReviewDeployment() error { 551 | var result ReviewDeployment 552 | 553 | err := bw.rdClient.Get(). 554 | Resource(ReviewDeploymentResourcePath). 555 | Namespace(api.NamespaceDefault). 556 | Name(bw.rd.Name).Do().Into(&result) 557 | 558 | if err != nil { 559 | return err 560 | } 561 | bw.rd = &result 562 | return nil 563 | } 564 | 565 | func (bw *BuildWatcher) HandleNewPodState(pod *apiv1.Pod) error { 566 | 567 | // TODO: This should apparently be way less simple and use conditions 568 | // and container statuses? 569 | switch pod.Status.Phase { 570 | // If not done yet, keep waiting. 571 | case apiv1.PodPending: 572 | fallthrough 573 | case apiv1.PodRunning: 574 | return nil 575 | case apiv1.PodSucceeded: 576 | log.Printf("Build pod %v for review deployment %v succeeeded", bw.rd.Status.BuildPod, bw.rd.Name) 577 | err := bw.RefreshReviewDeployment() 578 | if err != nil { 579 | log.Printf("Error refreshing review deployment %v: %v", bw.rd.Name, err) 580 | return err 581 | } 582 | bw.rd.Status.Image = bw.rd.BuildJob().getBuildImageName() 583 | updated, err := UpdateReviewDeployment(bw.rdClient, bw.rd) 584 | if err != nil { 585 | log.Printf("Error updating review deployment %v after build: %v", bw.rd.Name, err) 586 | } 587 | bw.rd = updated 588 | bw.w.Stop() 589 | default: 590 | log.Printf("Build pod %v for review deployment %v failed", bw.rd.Status.BuildPod, bw.rd.Name) 591 | // UpdateTheRdBad() 592 | bw.w.Stop() 593 | } 594 | 595 | return nil 596 | } 597 | 598 | func (h *eventHandler) CreateBuildPod(rd *ReviewDeployment) error { 599 | toCreate := rd.BuildPod() 600 | log.Printf("Creating build pod for %", rd.Name) 601 | pod, err := h.clientset.CoreV1().Pods(api.NamespaceDefault).Create(toCreate) 602 | 603 | if err != nil { 604 | log.Printf("Error creating build pod: %v", err) 605 | return err 606 | } 607 | 608 | log.Printf("Created build pod %v for review deployment %v", pod.Name, rd.Name) 609 | // Update review deployment status. 610 | rd.Status.BuildPod = pod.Name 611 | 612 | _, err = UpdateReviewDeployment(h.rdClient, rd) 613 | if err != nil { 614 | log.Printf("Error updating review deployment %v: %v", rd.Name, err) 615 | } 616 | return nil 617 | } 618 | 619 | func (h *eventHandler) CreatePod(rd *ReviewDeployment) error { 620 | toCreate := rd.Pod() 621 | pod, err := h.clientset.CoreV1().Pods(api.NamespaceDefault).Create(toCreate) 622 | 623 | if err != nil { 624 | log.Printf("Error creating pod: %v", err) 625 | return err 626 | } 627 | 628 | log.Printf("Created pod %v for review deployment %v", pod.Name, rd.Name) 629 | // Update review deployment status. 630 | rd.Status.Pod = pod.Name 631 | 632 | _, err = UpdateReviewDeployment(h.rdClient, rd) 633 | if err != nil { 634 | log.Printf("Error updating review deployment %v: %v", rd.Name, err) 635 | } 636 | return nil 637 | } 638 | 639 | func (h *eventHandler) CreateIngress(rd *ReviewDeployment) error { 640 | toCreate := rd.Ingress() 641 | ingress, err := h.clientset.ExtensionsV1beta1().Ingresses(api.NamespaceDefault).Create(toCreate) 642 | 643 | if err != nil { 644 | log.Printf("Error creating ingress: %v", err) 645 | return err 646 | } 647 | 648 | log.Printf("Created ingress %v for review deployment %v", ingress.Name, rd.Name) 649 | // Update review deployment status. 650 | rd.Status.Ingress = ingress.Name 651 | 652 | _, err = UpdateReviewDeployment(h.rdClient, rd) 653 | if err != nil { 654 | log.Printf("Error updating review deployment %v: %v", rd.Name, err) 655 | } 656 | return nil 657 | } 658 | 659 | func (h *eventHandler) CreateService(rd *ReviewDeployment) error { 660 | toCreate := rd.Service() 661 | svc, err := h.clientset.CoreV1().Services(api.NamespaceDefault).Create(toCreate) 662 | 663 | if err != nil { 664 | log.Printf("Error creating service %v: %v", toCreate.Name, err) 665 | return err 666 | } 667 | 668 | log.Printf("Created service %v for review deployment %v", svc.Name, rd.Name) 669 | // Update review deployment status. 670 | rd.Status.Service = svc.Name 671 | 672 | _, err = UpdateReviewDeployment(h.rdClient, rd) 673 | if err != nil { 674 | log.Printf("Error updating review deployment %v: %v", rd.Name, err) 675 | } 676 | return nil 677 | } 678 | 679 | func (h *eventHandler) handleUpdatedReviewDeployment(rd *ReviewDeployment) { 680 | if rd.Status.Image == "" && rd.Status.BuildPod == "" { 681 | err := h.CreateBuildPod(rd) 682 | if err != nil { 683 | log.Printf("Error creating build pod for %v: %v", rd.Name, err) 684 | } 685 | 686 | bw := BuildWatcher{ 687 | rd: rd, 688 | clientset: h.clientset, 689 | rdClient: h.rdClient, 690 | } 691 | go bw.Watch() 692 | return 693 | } else if rd.Status.Image != "" && rd.Status.Ingress == "" { 694 | // TODO check properly here 695 | // Being slighlty lazy with the check here, as we'll never have Pod set 696 | // without also having the ingress and service set. 697 | h.CreateIngress(rd) 698 | h.CreateService(rd) 699 | h.CreatePod(rd) 700 | } 701 | } 702 | 703 | 704 | func (h *eventHandler) OnAdd(obj interface{}) { 705 | rd := obj.(*ReviewDeployment) 706 | 707 | h.handleUpdatedReviewDeployment(rd) 708 | } 709 | 710 | func UpdateReviewDeployment(c *rest.RESTClient, rd *ReviewDeployment) (*ReviewDeployment, error) { 711 | var result ReviewDeployment 712 | 713 | req := c.Put(). 714 | Resource(ReviewDeploymentResourcePath). 715 | Namespace(api.NamespaceDefault). 716 | Name(rd.Name). 717 | Body(rd) 718 | err := req.Do().Into(&result) 719 | if err != nil { 720 | // TODO(prod): Check for resource version change and retry. 721 | return nil, err 722 | } 723 | return &result, nil 724 | } 725 | 726 | // TODO we probably don't care about updates? 727 | // Maybe just use for state machine violation detection? 728 | // Actually simpler to manage image added here. 729 | func (h *eventHandler) OnUpdate(oldObj, newObj interface{}) { 730 | newRd := newObj.(*ReviewDeployment) 731 | h.handleUpdatedReviewDeployment(newRd) 732 | } 733 | 734 | func (h *eventHandler) OnDelete(obj interface{}) { 735 | // reviewDeployment := obj.(*ReviewDeployment) 736 | } 737 | 738 | // Adapted from 739 | // https://github.com/nilebox/kubernetes/blob/7891fbbdf6f399be07f2b19e1114346dab07b7b4/staging/src/k8s.io/client-go/examples/third-party-resources/client.go 740 | func NewClient(cfg *rest.Config) (*rest.RESTClient, *runtime.Scheme, error) { 741 | groupVersion := schema.GroupVersion{ 742 | Group: ReviewDeploymentResourceGroup, 743 | Version: ReviewDeploymentResourceVersion, 744 | } 745 | 746 | schemeBuilder := runtime.NewSchemeBuilder(func(scheme *runtime.Scheme) error { 747 | scheme.AddKnownTypes( 748 | groupVersion, 749 | &ReviewDeployment{}, 750 | &ReviewDeploymentList{}, 751 | ) 752 | scheme.AddUnversionedTypes(api.Unversioned, &metav1.Status{}) 753 | metav1.AddToGroupVersion(scheme, groupVersion) 754 | return nil 755 | }) 756 | 757 | scheme := runtime.NewScheme() 758 | if err := schemeBuilder.AddToScheme(scheme); err != nil { 759 | return nil, nil, err 760 | } 761 | 762 | config := *cfg 763 | config.GroupVersion = &groupVersion 764 | config.APIPath = "/apis" 765 | config.ContentType = runtime.ContentTypeJSON 766 | config.NegotiatedSerializer = serializer.DirectCodecFactory{ 767 | CodecFactory: serializer.NewCodecFactory(scheme), 768 | } 769 | 770 | client, err := rest.RESTClientFor(&config) 771 | if err != nil { 772 | return nil, nil, err 773 | } 774 | 775 | return client, scheme, nil 776 | } 777 | --------------------------------------------------------------------------------