├── .github
├── ISSUE_TEMPLATE
│ ├── bug-report.md
│ └── feature-request.md
└── workflows
│ ├── chart.yaml
│ ├── markdown-lint.yaml
│ ├── markdown.links.config.json
│ ├── publish-images.yaml
│ └── workflow.yml
├── .gitignore
├── .golangci.yml
├── CODE_OF_CONDUCT.md
├── Dockerfile
├── LICENSE
├── Makefile
├── PROJECT
├── README.md
├── SECURITY.md
├── SUPPORT.md
├── apis
└── v1alpha1
│ ├── doc.go
│ ├── placementpolicy_types.go
│ ├── zz_generated.deepcopy.go
│ └── zz_generated.register.go
├── charts
└── placement-policy-scheduler-plugins
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── README.md
│ ├── crds
│ └── scheduling.x-k8s.io_placementpolicy.yaml
│ ├── templates
│ ├── _helpers.tpl
│ ├── configmap.yaml
│ ├── deployment.yaml
│ └── rbac.yaml
│ └── values.yaml
├── cmd
└── scheduler
│ └── main.go
├── config
├── certmanager
│ ├── certificate.yaml
│ ├── kustomization.yaml
│ └── kustomizeconfig.yaml
├── crd
│ ├── bases
│ │ └── placement-policy.scheduling.x-k8s.io_placementpolicies.yaml
│ ├── kustomization.yaml
│ ├── kustomizeconfig.yaml
│ └── patches
│ │ ├── cainjection_in_placementpolicies.yaml
│ │ └── webhook_in_placementpolicies.yaml
├── default
│ ├── kustomization.yaml
│ ├── manager_auth_proxy_patch.yaml
│ ├── manager_webhook_patch.yaml
│ └── webhookcainjection_patch.yaml
├── manager
│ ├── kustomization.yaml
│ └── manager.yaml
├── prometheus
│ ├── kustomization.yaml
│ └── monitor.yaml
├── rbac
│ ├── auth_proxy_client_clusterrole.yaml
│ ├── auth_proxy_role.yaml
│ ├── auth_proxy_role_binding.yaml
│ ├── auth_proxy_service.yaml
│ ├── kustomization.yaml
│ ├── leader_election_role.yaml
│ ├── leader_election_role_binding.yaml
│ ├── placementpolicy_editor_role.yaml
│ ├── placementpolicy_viewer_role.yaml
│ └── role_binding.yaml
├── samples
│ └── placement-policy.scheduling.x-k8s.io_v1alpha1_placementpolicy.yaml
└── webhook
│ ├── kustomization.yaml
│ ├── kustomizeconfig.yaml
│ └── service.yaml
├── deploy
└── kube-scheduler-configuration.yml
├── examples
├── basic-mixed-node-pools
│ ├── README.md
│ ├── demo_deployment.yml
│ └── v1alpha1_placementpolicy_mixednodepools.yml
├── demo_replicaset.yml
├── harvest-vm
│ ├── README.md
│ └── v1alpha1_placementpolicy_harvestvm.yml
├── v1alpha1_placementpolicy_must_besteffort.yml
├── v1alpha1_placementpolicy_strict_must.yml
└── v1alpha1_placementpolicy_strict_mustnot.yml
├── go.mod
├── go.sum
├── hack
├── boilerplate.go.txt
├── go-install.sh
├── install-etcd.sh
├── integration-test.sh
├── lib
│ ├── etcd.sh
│ ├── golang.sh
│ ├── init.sh
│ ├── logging.sh
│ └── util.sh
├── openapi-violation.list
├── testdata
│ ├── 127.0.0.1_10.0.0.1_kubernetes.default.svc-kubernetes.default-kubernetes-localhost.crt
│ ├── 127.0.0.1_10.0.0.1_kubernetes.default.svc-kubernetes.default-kubernetes-localhost.key
│ └── README.md
├── tools.go
├── update-codegen.sh
└── update-generated-openapi.sh
├── manifest_staging
├── charts
│ └── placement-policy-scheduler-plugins
│ │ ├── .helmignore
│ │ ├── Chart.yaml
│ │ ├── README.md
│ │ ├── crds
│ │ └── scheduling.x-k8s.io_placementpolicy.yaml
│ │ ├── templates
│ │ ├── _helpers.tpl
│ │ ├── configmap.yaml
│ │ ├── deployment.yaml
│ │ └── rbac.yaml
│ │ └── values.yaml
└── deploy
│ └── kube-scheduler-configuration.yml
├── pkg
├── client
│ ├── clientset
│ │ └── versioned
│ │ │ ├── clientset.go
│ │ │ ├── doc.go
│ │ │ ├── fake
│ │ │ ├── clientset_generated.go
│ │ │ ├── doc.go
│ │ │ └── register.go
│ │ │ ├── scheme
│ │ │ ├── doc.go
│ │ │ └── register.go
│ │ │ └── typed
│ │ │ └── apis
│ │ │ └── v1alpha1
│ │ │ ├── apis_client.go
│ │ │ ├── doc.go
│ │ │ ├── fake
│ │ │ ├── doc.go
│ │ │ ├── fake_apis_client.go
│ │ │ └── fake_placementpolicy.go
│ │ │ ├── generated_expansion.go
│ │ │ └── placementpolicy.go
│ ├── informers
│ │ └── externalversions
│ │ │ ├── apis
│ │ │ ├── interface.go
│ │ │ └── v1alpha1
│ │ │ │ ├── interface.go
│ │ │ │ └── placementpolicy.go
│ │ │ ├── factory.go
│ │ │ ├── generic.go
│ │ │ └── internalinterfaces
│ │ │ └── factory_interfaces.go
│ └── listers
│ │ └── apis
│ │ └── v1alpha1
│ │ ├── expansion_generated.go
│ │ └── placementpolicy.go
├── plugins
│ └── placementpolicy
│ │ ├── core
│ │ ├── core.go
│ │ └── sort.go
│ │ ├── placementpolicy.go
│ │ ├── placementpolicy_test.go
│ │ └── state.go
└── utils
│ ├── labels.go
│ └── labels_test.go
└── test
├── e2e
├── kind-config.yaml
├── kubectl.go
├── main_test.go
├── placement_policy_test.go
└── utils.go
└── integration
├── main_test.go
├── placement_policy_int_test.go
├── scheduler.go
└── util.go
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **What steps did you take and what happened:**
11 | [A clear and concise description of what the bug is.]
12 |
13 |
14 | **What did you expect to happen:**
15 |
16 |
17 | **Anything else you would like to add:**
18 | [Miscellaneous information that will assist in solving the issue.]
19 |
20 |
21 | **Environment:**
22 |
23 | - Kubernetes version: (use `kubectl version`):
24 | - Placement Policy Scheduler plugins version:
25 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the solution you'd like**
11 | [A clear and concise description of what you want to happen.]
12 |
13 |
14 | **Anything else you would like to add:**
15 | [Miscellaneous information that will assist in solving the issue.]
16 |
17 |
18 | **Environment:**
19 |
20 | - Kubernetes version: (use `kubectl version`):
21 | - Placement Policy Scheduler plugins version:
22 |
--------------------------------------------------------------------------------
/.github/workflows/chart.yaml:
--------------------------------------------------------------------------------
1 | name: publish_helm_chart
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | paths:
8 | - ".github/workflows/chart.yaml"
9 | - "charts/**"
10 |
11 | permissions:
12 | contents: write
13 |
14 | jobs:
15 | deploy:
16 | runs-on: ubuntu-20.04
17 | steps:
18 | - uses: actions/checkout@v2
19 | with:
20 | submodules: true
21 | fetch-depth: 0
22 | - name: Publish Helm chart
23 | uses: stefanprodan/helm-gh-pages@v1.4.1
24 | with:
25 | token: ${{ secrets.GITHUB_TOKEN }}
26 | charts_dir: charts
27 | target_dir: charts
28 | linting: off
29 |
--------------------------------------------------------------------------------
/.github/workflows/markdown-lint.yaml:
--------------------------------------------------------------------------------
1 | name: Check Markdown
2 |
3 | on:
4 | pull_request:
5 | paths:
6 | - '**.md'
7 | - "docs/**"
8 |
9 | jobs:
10 | markdown-link-check:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v2
14 | - uses: gaurav-nelson/github-action-markdown-link-check@v1
15 | with:
16 | # this will only show errors in the output
17 | use-quiet-mode: 'yes'
18 | # this will show detailed HTTP status for checked links
19 | use-verbose-mode: 'yes'
20 | config-file: '.github/workflows/markdown.links.config.json'
21 |
--------------------------------------------------------------------------------
/.github/workflows/markdown.links.config.json:
--------------------------------------------------------------------------------
1 | {
2 | "aliveStatusCodes": [
3 | 200,
4 | 203
5 | ]
6 | }
7 |
--------------------------------------------------------------------------------
/.github/workflows/publish-images.yaml:
--------------------------------------------------------------------------------
1 | name: publish_images
2 |
3 | on:
4 | create:
5 | tags:
6 | - 'v*'
7 |
8 | permissions:
9 | contents: read
10 | packages: write
11 |
12 | env:
13 | IMAGE_NAME: placement-policy
14 |
15 | jobs:
16 | export-registry:
17 | runs-on: ubuntu-20.04
18 | outputs:
19 | registry: ${{ steps.export.outputs.registry }}
20 | steps:
21 | - id: export
22 | run: |
23 | # registry must be in lowercase
24 | echo "::set-output name=registry::$(echo "ghcr.io/${{ github.repository }}" | tr [:upper:] [:lower:])"
25 | publish-images:
26 | needs: export-registry
27 | env:
28 | REGISTRY: ${{ needs.export-registry.outputs.registry }}
29 | runs-on: ubuntu-20.04
30 | steps:
31 | - uses: actions/checkout@v2
32 | with:
33 | submodules: true
34 | fetch-depth: 0
35 | - name: Login to ghcr.io
36 | uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9
37 | with:
38 | registry: ghcr.io
39 | username: ${{ github.actor }}
40 | password: ${{ secrets.GITHUB_TOKEN }}
41 | - name: Set env
42 | run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
43 | - name: Build ${{ env.IMAGE_NAME}}
44 | run: |
45 | make docker-build
46 | env:
47 | IMAGE_NAME: ${{ env.IMAGE_NAME}}
48 | IMAGE_VERSION: ${{ env.RELEASE_VERSION }}
49 |
--------------------------------------------------------------------------------
/.github/workflows/workflow.yml:
--------------------------------------------------------------------------------
1 | name: test
2 |
3 | on:
4 | pull_request:
5 | paths-ignore:
6 | - docs/**
7 | - '**.md'
8 |
9 | jobs:
10 | lint:
11 | name: "Lint"
12 | runs-on: ubuntu-latest
13 | timeout-minutes: 15
14 | permissions:
15 | contents: read
16 | steps:
17 | - name: Set up Go 1.17
18 | uses: actions/setup-go@v2
19 | with:
20 | go-version: 1.17
21 | - name: Check out code into the Go module directory
22 | uses: actions/checkout@v2
23 | - name: golangci-lint
24 | run: make lint
25 |
26 | unit-test:
27 | name: "Unit Test"
28 | runs-on: ubuntu-latest
29 | timeout-minutes: 10
30 | steps:
31 | - name: Set up Go 1.17
32 | uses: actions/setup-go@v2
33 | with:
34 | go-version: 1.17
35 | - name: Check out code into the Go module directory
36 | uses: actions/checkout@v2
37 | - name: Run unit tests
38 | run: make unit-test
39 |
40 | integration-test:
41 | name: "Integration Test"
42 | runs-on: ubuntu-latest
43 | timeout-minutes: 10
44 | steps:
45 | - name: Set up Go 1.17
46 | uses: actions/setup-go@v2
47 | with:
48 | go-version: 1.17
49 | - name: Check out code into the Go module directory
50 | uses: actions/checkout@v2
51 | - name: Run integration tests
52 | run: make integration-test
53 |
54 | e2e-test:
55 | name: "E2E Test"
56 | runs-on: ubuntu-latest
57 | timeout-minutes: 10
58 | steps:
59 | - name: Set up Go 1.17
60 | uses: actions/setup-go@v2
61 | with:
62 | go-version: 1.17
63 | - name: Check out code into the Go module directory
64 | uses: actions/checkout@v2
65 | - name: Run e2e test
66 | run: OUTPUT_TYPE=type=docker make docker-build e2e-test
67 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 |
8 | # Test binary, built with `go test -c`
9 | *.test
10 |
11 | # Output of the go coverage tool, specifically when used with LiteIDE
12 | *.out
13 |
14 | # Dependency directories (remove the comment below to include it)
15 | # vendor/
16 |
17 | # IDE custom files
18 | .vscode
19 | .idea
20 |
21 | # binary output
22 | bin/
23 | hack/tools/bin/
24 |
25 | # used for the code generators only
26 | vendor/
27 | etcd-*/
28 | etcd
29 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | run:
2 | deadline: 10m
3 |
4 | linters:
5 | disable-all: true
6 | enable:
7 | - deadcode
8 | - errcheck
9 | - errorlint
10 | - goconst
11 | - gocyclo
12 | - gofmt
13 | - goimports
14 | - gosec
15 | - gosimple
16 | - govet
17 | - ineffassign
18 | - misspell
19 | - nakedret
20 | - nilerr
21 | - prealloc
22 | - revive
23 | - staticcheck
24 | - structcheck
25 | - typecheck
26 | - unconvert
27 | - unused
28 | - varcheck
29 | - whitespace
30 | # Run with --fast=false for more extensive checks
31 | fast: true
32 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Microsoft Open Source Code of Conduct
2 |
3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
4 |
5 | Resources:
6 |
7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
10 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Build the manager binary
2 | FROM golang:1.17 as builder
3 |
4 | WORKDIR /workspace
5 | # Copy the Go Modules manifests
6 | COPY go.mod go.mod
7 | COPY go.sum go.sum
8 | # cache deps before building and copying source so that we don't need to re-download as much
9 | # and so that source changes don't invalidate our downloaded layer
10 | RUN go mod download
11 |
12 | # Copy the go source
13 | COPY cmd/scheduler/main.go main.go
14 | COPY apis/ apis/
15 | COPY pkg/ pkg/
16 |
17 | # Build
18 | ARG TARGETARCH
19 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -o manager main.go
20 |
21 | # Use distroless as minimal base image to package the manager binary
22 | # Refer to https://github.com/GoogleContainerTools/distroless for more details
23 | FROM --platform=${TARGETPLATFORM:-linux/amd64} gcr.io/distroless/static:nonroot
24 | WORKDIR /
25 | COPY --from=builder /workspace/manager .
26 |
27 | ENTRYPOINT ["/manager"]
28 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) Microsoft Corporation.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | REGISTRY ?= ghcr.io/azure/placement-policy-scheduler-plugins
2 | IMAGE_NAME := placement-policy
3 | IMAGE_VERSION ?= v0.1.0
4 |
5 | # Directories
6 | ROOT_DIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
7 | BIN_DIR := $(abspath $(ROOT_DIR)/bin)
8 | TOOLS_DIR := hack/tools
9 | TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/bin)
10 |
11 | # Binaries
12 | CONTROLLER_GEN_VER := v0.7.0
13 | CONTROLLER_GEN_BIN := controller-gen
14 | CONTROLLER_GEN := $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER)
15 |
16 | GOLANGCI_LINT_VER := v1.41.1
17 | GOLANGCI_LINT_BIN := golangci-lint
18 | GOLANGCI_LINT := $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER)
19 |
20 | # Scripts
21 | GO_INSTALL := ./hack/go-install.sh
22 | UPDATE_GENERATED_OPENAPI := ./hack/update-generated-openapi.sh
23 | INSTALL_ETCD := ./hack/install-etcd.sh
24 | RUN_TEST := ./hack/integration-test.sh
25 |
26 | ## --------------------------------------
27 | ## Tooling Binaries
28 | ## --------------------------------------
29 |
30 | $(CONTROLLER_GEN):
31 | GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) sigs.k8s.io/controller-tools/cmd/controller-gen $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER)
32 |
33 | $(GOLANGCI_LINT):
34 | GOBIN=$(TOOLS_BIN_DIR) $(GO_INSTALL) github.com/golangci/golangci-lint/cmd/golangci-lint $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER)
35 |
36 | ## --------------------------------------
37 | ## Testing
38 | ## --------------------------------------
39 |
40 | # Run go fmt against code
41 | .PHONY: fmt
42 | fmt:
43 | go fmt ./...
44 |
45 | # Run go vet against code
46 | .PHONY: vet
47 | vet:
48 | go vet ./...
49 |
50 | # Run go mod vendor against go.mod
51 | .PHONY: vendor
52 | vendor:
53 | go mod tidy
54 | go mod vendor
55 |
56 | # Install etcd
57 | .PHONY: install-etcd
58 | install-etcd:
59 | $(INSTALL_ETCD)
60 |
61 |
62 | # Run unit tests
63 | .PHONY: unit-test
64 | unit-test: autogen manager manifests
65 | go test ./pkg/... -mod=vendor -coverprofile cover.out
66 |
67 | ## --------------------------------------
68 | ## Linting
69 | ## --------------------------------------
70 |
71 | .PHONY: lint
72 | lint: $(GOLANGCI_LINT)
73 | $(GOLANGCI_LINT) run -v
74 |
75 | .PHONY: lint-full
76 | lint-full: $(GOLANGCI_LINT) ## Run slower linters to detect possible issues
77 | $(GOLANGCI_LINT) run -v --fast=false
78 |
79 | ## --------------------------------------
80 | ## Code Generation
81 | ## --------------------------------------
82 |
83 | # Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
84 | CRD_OPTIONS ?= "crd:crdVersions=v1"
85 |
86 | # Generate manifests e.g. CRD, RBAC etc.
87 | .PHONY: manifests
88 | manifests: $(CONTROLLER_GEN)
89 | $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
90 |
91 | # Generate code
92 | generate: $(CONTROLLER_GEN)
93 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
94 |
95 | ## --------------------------------------
96 | ## Binaries
97 | ## --------------------------------------
98 |
99 | # Build manager binary
100 | .PHONY: manager
101 | manager: generate fmt vet
102 | go build -o bin/manager cmd/scheduler/main.go
103 |
104 | .PHONY: autogen
105 | autogen: vendor
106 | $(UPDATE_GENERATED_OPENAPI)
107 |
108 | ## --------------------------------------
109 | ## Integration Testing
110 | ## --------------------------------------
111 |
112 | # Run integration tests
113 | .PHONY: integration-test
114 | integration-test: install-etcd autogen manager manifests
115 | $(RUN_TEST)
116 |
117 | ## --------------------------------------
118 | ## E2E Testing
119 | ## --------------------------------------
120 |
121 | # Run all tests
122 | .PHONY: e2e-test
123 | e2e-test:
124 | REGISTRY=${REGISTRY} IMAGE_NAME=${IMAGE_NAME} IMAGE_VERSION=${IMAGE_VERSION} go test -tags=e2e -v ./test/e2e
125 |
126 | ## --------------------------------------
127 | ## Images
128 | ## --------------------------------------
129 |
130 | OUTPUT_TYPE ?= type=registry
131 | BUILDX_BUILDER_NAME ?= img-builder
132 | QEMU_VERSION ?= 5.2.0-2
133 |
134 | .PHONY: docker-buildx-builder
135 | docker-buildx-builder:
136 | @if ! docker buildx ls | grep $(BUILDX_BUILDER_NAME); then \
137 | docker run --rm --privileged multiarch/qemu-user-static:$(QEMU_VERSION) --reset -p yes; \
138 | docker buildx create --name $(BUILDX_BUILDER_NAME) --use; \
139 | docker buildx inspect $(BUILDX_BUILDER_NAME) --bootstrap; \
140 | fi
141 |
142 | .PHONY: docker-build
143 | docker-build: docker-buildx-builder
144 | docker buildx build \
145 | --file Dockerfile \
146 | --output=$(OUTPUT_TYPE) \
147 | --platform="linux/amd64" \
148 | --pull \
149 | --tag $(REGISTRY)/$(IMAGE_NAME):$(IMAGE_VERSION) .
150 |
151 | ## --------------------------------------
152 | ## Release
153 | ## --------------------------------------
154 |
155 | .PHONY: promote-staging-manifest
156 | promote-staging-manifest:
157 | @rm -rf deploy charts/placement-policy-scheduler-plugins
158 | @cp -r manifest_staging/deploy .
159 | @cp -r manifest_staging/charts .
160 |
--------------------------------------------------------------------------------
/PROJECT:
--------------------------------------------------------------------------------
1 | domain: placement-policy.scheduling.x-k8s.io
2 | repo: github.com/Azure/placement-policy-scheduler-plugins
3 | resources:
4 | - group: placement-policy.scheduling.x-k8s.io
5 | kind: PlacementPolicy
6 | version: v1alpha1
7 | version: "2"
8 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## Security
4 |
5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
6 |
7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below.
8 |
9 | ## Reporting Security Issues
10 |
11 | **Please do not report security vulnerabilities through public GitHub issues.**
12 |
13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report).
14 |
15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc).
16 |
17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
18 |
19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
20 |
21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
22 | * Full paths of source file(s) related to the manifestation of the issue
23 | * The location of the affected source code (tag/branch/commit or direct URL)
24 | * Any special configuration required to reproduce the issue
25 | * Step-by-step instructions to reproduce the issue
26 | * Proof-of-concept or exploit code (if possible)
27 | * Impact of the issue, including how an attacker might exploit the issue
28 |
29 | This information will help us triage your report more quickly.
30 |
31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs.
32 |
33 | ## Preferred Languages
34 |
35 | We prefer all communications to be in English.
36 |
37 | ## Policy
38 |
39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd).
40 |
41 |
--------------------------------------------------------------------------------
/SUPPORT.md:
--------------------------------------------------------------------------------
1 | # TODO: The maintainer of this repo has not yet edited this file
2 |
3 | **REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project?
4 |
5 | - **No CSS support:** Fill out this template with information about how to file issues and get help.
6 | - **Yes CSS support:** Fill out an intake form at [aka.ms/spot](https://aka.ms/spot). CSS will work with/help you to determine next steps. More details also available at [aka.ms/onboardsupport](https://aka.ms/onboardsupport).
7 | - **Not sure?** Fill out a SPOT intake as though the answer were "Yes". CSS will help you decide.
8 |
9 | *Then remove this first heading from this SUPPORT.MD file before publishing your repo.*
10 |
11 | # Support
12 |
13 | ## How to file issues and get help
14 |
15 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing
16 | issues before filing new issues to avoid duplicates. For new issues, file your bug or
17 | feature request as a new Issue.
18 |
19 | For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE
20 | FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER
21 | CHANNEL. WHERE WILL YOU HELP PEOPLE?**.
22 |
23 | ## Microsoft Support Policy
24 |
25 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above.
26 |
--------------------------------------------------------------------------------
/apis/v1alpha1/doc.go:
--------------------------------------------------------------------------------
1 | // +kubebuilder:object:generate=true
2 | // +k8s:deepcopy-gen=package,register
3 | // +groupName=placement-policy.scheduling.x-k8s.io
4 | package v1alpha1
5 |
--------------------------------------------------------------------------------
/apis/v1alpha1/placementpolicy_types.go:
--------------------------------------------------------------------------------
1 | package v1alpha1
2 |
3 | import (
4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
5 | "k8s.io/apimachinery/pkg/util/intstr"
6 | )
7 |
8 | type (
9 | // EnforcementMode is an enumeration of the enforcement modes
10 | EnforcementMode string
11 | // Action is an enumeration of the actions
12 | Action string
13 | )
14 |
15 | const (
16 | // EnforcementModeBestEffort means the policy will be enforced as best effort
17 | EnforcementModeBestEffort EnforcementMode = "BestEffort"
18 | // EnforcementModeStrict the policy will be forced during scheduling
19 | EnforcementModeStrict EnforcementMode = "Strict"
20 |
21 | // ActionMust means the pods must be placed on the node
22 | ActionMust Action = "Must"
23 | // ActionMustNot means the pods must not be placed on the node
24 | ActionMustNot Action = "MustNot"
25 |
26 | // PlacementPolicyAnnotationKey is the annotation key for placement policy
27 | PlacementPolicyAnnotationKey = "placement-policy.x-k8s.io/policy-name"
28 | // PlacementPolicyPreferenceAnnotationKey is the annotation key for placement policy node preference
29 | PlacementPolicyPreferenceAnnotationKey = "placement-policy.x-k8s.io/node-preference-matching-labels"
30 | )
31 |
32 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
33 |
34 | // PlacementPolicySpec defines the desired state of PlacementPolicy
35 | type PlacementPolicySpec struct {
36 | // The policy weight allows the engine to decide which policy to use when
37 | // pods match multiple policies. If multiple policies matched and all
38 | // share the same weight then a policy with spec.enforcementMode == Force
39 | // will be selected. If multiple policies match and +1 policy is marked
40 | // as “Force” enforcementMode then they will sorted alphabetically /
41 | // ascending and first one will be used. The scheduler publishes events
42 | // capturing this conflict when it happens. Weight == 0-100 is reserved
43 | // for future use.
44 | Weight int32 `json:"weight,omitempty"`
45 | // enforcementMode is an enum that specifies how the policy will be
46 | // enforced during scheduler (e.g. the application of filter vs scorer
47 | // plugin). Values allowed for this field are:
48 | // BestEffort (default): the policy will be enforced as best effort
49 | // (scorer mode).
50 | // Strict: the policy will be forced during scheduling. The filter
51 | // approach will be used. Note: that may yield pods unschedulable.
52 | EnforcementMode EnforcementMode `json:"enforcementMode,omitempty"`
53 | // podSelector identifies which pods this placement policy will apply on
54 | PodSelector *metav1.LabelSelector `json:"podSelector,omitempty"`
55 | // nodeSelector selects the nodes where the placement policy will
56 | // apply on according to action
57 | NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"`
58 | // Policy is the policy placement for target based on action
59 | Policy *Policy `json:"policy,omitempty"`
60 | }
61 |
62 | type Policy struct {
63 | // The action field is policy placement action. It is a string enum
64 | // that carries the following possible values:
65 | // Must(default): based on the rule below pods must be placed on
66 | // nodes selected by node selector
67 | // MustNot: based on the rule pods must *not* be placed nodes
68 | // selected by node selector
69 | Action Action `json:"action,omitempty"`
70 | // TargetSize is the number of pods that can or cannot be placed on the node.
71 | // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
72 | // Absolute number is calculated from percentage by rounding down.
73 | TargetSize *intstr.IntOrString `json:"targetSize,omitempty"`
74 | }
75 |
76 | // PlacementPolicyStatus defines the observed state of PlacementPolicy
77 | type PlacementPolicyStatus struct {
78 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
79 | // Important: Run "make" to regenerate code after modifying this file
80 | }
81 |
82 | //+kubebuilder:object:root=true
83 | //+kubebuilder:subresource:status
84 | // +genclient
85 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
86 |
87 | // PlacementPolicy is the Schema for the placementpolicies API
88 | type PlacementPolicy struct {
89 | metav1.TypeMeta `json:",inline"`
90 | metav1.ObjectMeta `json:"metadata,omitempty"`
91 |
92 | Spec PlacementPolicySpec `json:"spec,omitempty"`
93 | Status PlacementPolicyStatus `json:"status,omitempty"`
94 | }
95 |
96 | //+kubebuilder:object:root=true
97 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
98 |
99 | // PlacementPolicyList contains a list of PlacementPolicy
100 | type PlacementPolicyList struct {
101 | metav1.TypeMeta `json:",inline"`
102 | metav1.ListMeta `json:"metadata,omitempty"`
103 | Items []PlacementPolicy `json:"items"`
104 | }
105 |
--------------------------------------------------------------------------------
/apis/v1alpha1/zz_generated.deepcopy.go:
--------------------------------------------------------------------------------
1 | //go:build !ignore_autogenerated
2 | // +build !ignore_autogenerated
3 |
4 | // Code generated by controller-gen. DO NOT EDIT.
5 |
6 | package v1alpha1
7 |
8 | import (
9 | "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/apimachinery/pkg/runtime"
11 | "k8s.io/apimachinery/pkg/util/intstr"
12 | )
13 |
14 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
15 | func (in *PlacementPolicy) DeepCopyInto(out *PlacementPolicy) {
16 | *out = *in
17 | out.TypeMeta = in.TypeMeta
18 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
19 | in.Spec.DeepCopyInto(&out.Spec)
20 | out.Status = in.Status
21 | }
22 |
23 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementPolicy.
24 | func (in *PlacementPolicy) DeepCopy() *PlacementPolicy {
25 | if in == nil {
26 | return nil
27 | }
28 | out := new(PlacementPolicy)
29 | in.DeepCopyInto(out)
30 | return out
31 | }
32 |
33 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
34 | func (in *PlacementPolicy) DeepCopyObject() runtime.Object {
35 | if c := in.DeepCopy(); c != nil {
36 | return c
37 | }
38 | return nil
39 | }
40 |
41 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
42 | func (in *PlacementPolicyList) DeepCopyInto(out *PlacementPolicyList) {
43 | *out = *in
44 | out.TypeMeta = in.TypeMeta
45 | in.ListMeta.DeepCopyInto(&out.ListMeta)
46 | if in.Items != nil {
47 | in, out := &in.Items, &out.Items
48 | *out = make([]PlacementPolicy, len(*in))
49 | for i := range *in {
50 | (*in)[i].DeepCopyInto(&(*out)[i])
51 | }
52 | }
53 | }
54 |
55 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementPolicyList.
56 | func (in *PlacementPolicyList) DeepCopy() *PlacementPolicyList {
57 | if in == nil {
58 | return nil
59 | }
60 | out := new(PlacementPolicyList)
61 | in.DeepCopyInto(out)
62 | return out
63 | }
64 |
65 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
66 | func (in *PlacementPolicyList) DeepCopyObject() runtime.Object {
67 | if c := in.DeepCopy(); c != nil {
68 | return c
69 | }
70 | return nil
71 | }
72 |
73 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
74 | func (in *PlacementPolicySpec) DeepCopyInto(out *PlacementPolicySpec) {
75 | *out = *in
76 | if in.PodSelector != nil {
77 | in, out := &in.PodSelector, &out.PodSelector
78 | *out = new(v1.LabelSelector)
79 | (*in).DeepCopyInto(*out)
80 | }
81 | if in.NodeSelector != nil {
82 | in, out := &in.NodeSelector, &out.NodeSelector
83 | *out = new(v1.LabelSelector)
84 | (*in).DeepCopyInto(*out)
85 | }
86 | if in.Policy != nil {
87 | in, out := &in.Policy, &out.Policy
88 | *out = new(Policy)
89 | (*in).DeepCopyInto(*out)
90 | }
91 | }
92 |
93 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementPolicySpec.
94 | func (in *PlacementPolicySpec) DeepCopy() *PlacementPolicySpec {
95 | if in == nil {
96 | return nil
97 | }
98 | out := new(PlacementPolicySpec)
99 | in.DeepCopyInto(out)
100 | return out
101 | }
102 |
103 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
104 | func (in *PlacementPolicyStatus) DeepCopyInto(out *PlacementPolicyStatus) {
105 | *out = *in
106 | }
107 |
108 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementPolicyStatus.
109 | func (in *PlacementPolicyStatus) DeepCopy() *PlacementPolicyStatus {
110 | if in == nil {
111 | return nil
112 | }
113 | out := new(PlacementPolicyStatus)
114 | in.DeepCopyInto(out)
115 | return out
116 | }
117 |
118 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
119 | func (in *Policy) DeepCopyInto(out *Policy) {
120 | *out = *in
121 | if in.TargetSize != nil {
122 | in, out := &in.TargetSize, &out.TargetSize
123 | *out = new(intstr.IntOrString)
124 | **out = **in
125 | }
126 | }
127 |
128 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
129 | func (in *Policy) DeepCopy() *Policy {
130 | if in == nil {
131 | return nil
132 | }
133 | out := new(Policy)
134 | in.DeepCopyInto(out)
135 | return out
136 | }
137 |
--------------------------------------------------------------------------------
/apis/v1alpha1/zz_generated.register.go:
--------------------------------------------------------------------------------
1 | // Code generated by register-gen. DO NOT EDIT.
2 |
3 | package v1alpha1
4 |
5 | import (
6 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7 | "k8s.io/apimachinery/pkg/runtime"
8 | "k8s.io/apimachinery/pkg/runtime/schema"
9 | )
10 |
11 | // GroupName specifies the group name used to register the objects.
12 | const GroupName = "placement-policy.scheduling.x-k8s.io"
13 |
14 | // GroupVersion specifies the group and the version used to register the objects.
15 | var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1alpha1"}
16 |
17 | // SchemeGroupVersion is group version used to register these objects
18 | // Deprecated: use GroupVersion instead.
19 | var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
20 |
21 | // Resource takes an unqualified resource and returns a Group qualified GroupResource
22 | func Resource(resource string) schema.GroupResource {
23 | return SchemeGroupVersion.WithResource(resource).GroupResource()
24 | }
25 |
26 | var (
27 | // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
28 | SchemeBuilder runtime.SchemeBuilder
29 | localSchemeBuilder = &SchemeBuilder
30 | // Depreciated: use Install instead
31 | AddToScheme = localSchemeBuilder.AddToScheme
32 | Install = localSchemeBuilder.AddToScheme
33 | )
34 |
35 | func init() {
36 | // We only register manually written functions here. The registration of the
37 | // generated functions takes place in the generated files. The separation
38 | // makes the code compile even when the generated files are missing.
39 | localSchemeBuilder.Register(addKnownTypes)
40 | }
41 |
42 | // Adds the list of known types to Scheme.
43 | func addKnownTypes(scheme *runtime.Scheme) error {
44 | scheme.AddKnownTypes(SchemeGroupVersion,
45 | &PlacementPolicy{},
46 | &PlacementPolicyList{},
47 | )
48 | // AddToGroupVersion allows the serialization of client types like ListOptions.
49 | v1.AddToGroupVersion(scheme, SchemeGroupVersion)
50 | return nil
51 | }
52 |
--------------------------------------------------------------------------------
/charts/placement-policy-scheduler-plugins/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/charts/placement-policy-scheduler-plugins/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: placement-policy-scheduler-plugins
3 | description: A Helm chart for Kubernetes placement policy scheduler plugins
4 | type: application
5 | version: 0.1.1
6 | appVersion: v0.1.0
7 | kubeVersion: "<=1.22.2"
8 |
--------------------------------------------------------------------------------
/charts/placement-policy-scheduler-plugins/README.md:
--------------------------------------------------------------------------------
1 | # Chart to run placement policy scheduler plugins as a second scheduler in cluster.
--------------------------------------------------------------------------------
/charts/placement-policy-scheduler-plugins/crds/scheduling.x-k8s.io_placementpolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apiextensions.k8s.io/v1
2 | kind: CustomResourceDefinition
3 | metadata:
4 | annotations:
5 | controller-gen.kubebuilder.io/version: v0.7.0
6 | creationTimestamp: null
7 | name: placementpolicies.placement-policy.scheduling.x-k8s.io
8 | spec:
9 | group: placement-policy.scheduling.x-k8s.io
10 | names:
11 | kind: PlacementPolicy
12 | listKind: PlacementPolicyList
13 | plural: placementpolicies
14 | singular: placementpolicy
15 | scope: Namespaced
16 | versions:
17 | - name: v1alpha1
18 | schema:
19 | openAPIV3Schema:
20 | description: PlacementPolicy is the Schema for the placementpolicies API
21 | properties:
22 | apiVersion:
23 | description: 'APIVersion defines the versioned schema of this representation
24 | of an object. Servers should convert recognized schemas to the latest
25 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
26 | type: string
27 | kind:
28 | description: 'Kind is a string value representing the REST resource this
29 | object represents. Servers may infer this from the endpoint the client
30 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
31 | type: string
32 | metadata:
33 | type: object
34 | spec:
35 | description: PlacementPolicySpec defines the desired state of PlacementPolicy
36 | properties:
37 | enforcementMode:
38 | description: 'enforcementMode is an enum that specifies how the policy
39 | will be enforced during scheduler (e.g. the application of filter
40 | vs scorer plugin). Values allowed for this field are: BestEffort
41 | (default): the policy will be enforced as best effort (scorer mode).
42 | Strict: the policy will be forced during scheduling. The filter
43 | approach will be used. Note: that may yield pods unschedulable.'
44 | type: string
45 | nodeSelector:
46 | description: nodeSelector selects the nodes where the placement policy
47 | will apply on according to action
48 | properties:
49 | matchExpressions:
50 | description: matchExpressions is a list of label selector requirements.
51 | The requirements are ANDed.
52 | items:
53 | description: A label selector requirement is a selector that
54 | contains values, a key, and an operator that relates the key
55 | and values.
56 | properties:
57 | key:
58 | description: key is the label key that the selector applies
59 | to.
60 | type: string
61 | operator:
62 | description: operator represents a key's relationship to
63 | a set of values. Valid operators are In, NotIn, Exists
64 | and DoesNotExist.
65 | type: string
66 | values:
67 | description: values is an array of string values. If the
68 | operator is In or NotIn, the values array must be non-empty.
69 | If the operator is Exists or DoesNotExist, the values
70 | array must be empty. This array is replaced during a strategic
71 | merge patch.
72 | items:
73 | type: string
74 | type: array
75 | required:
76 | - key
77 | - operator
78 | type: object
79 | type: array
80 | matchLabels:
81 | additionalProperties:
82 | type: string
83 | description: matchLabels is a map of {key,value} pairs. A single
84 | {key,value} in the matchLabels map is equivalent to an element
85 | of matchExpressions, whose key field is "key", the operator
86 | is "In", and the values array contains only "value". The requirements
87 | are ANDed.
88 | type: object
89 | type: object
90 | podSelector:
91 | description: podSelector identifies which pods this placement policy
92 | will apply on
93 | properties:
94 | matchExpressions:
95 | description: matchExpressions is a list of label selector requirements.
96 | The requirements are ANDed.
97 | items:
98 | description: A label selector requirement is a selector that
99 | contains values, a key, and an operator that relates the key
100 | and values.
101 | properties:
102 | key:
103 | description: key is the label key that the selector applies
104 | to.
105 | type: string
106 | operator:
107 | description: operator represents a key's relationship to
108 | a set of values. Valid operators are In, NotIn, Exists
109 | and DoesNotExist.
110 | type: string
111 | values:
112 | description: values is an array of string values. If the
113 | operator is In or NotIn, the values array must be non-empty.
114 | If the operator is Exists or DoesNotExist, the values
115 | array must be empty. This array is replaced during a strategic
116 | merge patch.
117 | items:
118 | type: string
119 | type: array
120 | required:
121 | - key
122 | - operator
123 | type: object
124 | type: array
125 | matchLabels:
126 | additionalProperties:
127 | type: string
128 | description: matchLabels is a map of {key,value} pairs. A single
129 | {key,value} in the matchLabels map is equivalent to an element
130 | of matchExpressions, whose key field is "key", the operator
131 | is "In", and the values array contains only "value". The requirements
132 | are ANDed.
133 | type: object
134 | type: object
135 | policy:
136 | description: Policy is the policy placement for target based on action
137 | properties:
138 | action:
139 | description: 'The action field is policy placement action. It
140 | is a string enum that carries the following possible values:
141 | Must(default): based on the rule below pods must be placed on
142 | nodes selected by node selector MustNot: based on the rule pods
143 | must *not* be placed nodes selected by node selector'
144 | type: string
145 | targetSize:
146 | anyOf:
147 | - type: integer
148 | - type: string
149 | description: 'TargetSize is the number of pods that can or cannot
150 | be placed on the node. Value can be an absolute number (ex:
151 | 5) or a percentage of desired pods (ex: 10%). Absolute number
152 | is calculated from percentage by rounding down.'
153 | x-kubernetes-int-or-string: true
154 | type: object
155 | weight:
156 | description: The policy weight allows the engine to decide which policy
157 | to use when pods match multiple policies. If multiple policies matched
158 | and all share the same weight then a policy with spec.enforcementMode
159 | == Force will be selected. If multiple policies match and +1 policy
160 | is marked as “Force” enforcementMode then they will sorted alphabetically
161 | / ascending and first one will be used. The scheduler publishes
162 | events capturing this conflict when it happens. Weight == 0-100
163 | is reserved for future use.
164 | format: int32
165 | type: integer
166 | type: object
167 | status:
168 | description: PlacementPolicyStatus defines the observed state of PlacementPolicy
169 | type: object
170 | type: object
171 | served: true
172 | storage: true
173 | subresources:
174 | status: {}
175 | status:
176 | acceptedNames:
177 | kind: ""
178 | plural: ""
179 | conditions: []
180 | storedVersions: []
181 |
--------------------------------------------------------------------------------
/charts/placement-policy-scheduler-plugins/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "placement-policy-scheduler-plugins.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "placement-policy-scheduler-plugins.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "placement-policy-scheduler-plugins.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "placement-policy-scheduler-plugins.labels" -}}
37 | helm.sh/chart: {{ include "placement-policy-scheduler-plugins.chart" . }}
38 | {{ include "placement-policy-scheduler-plugins.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "placement-policy-scheduler-plugins.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "placement-policy-scheduler-plugins.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | component: scheduler
52 | {{- end }}
53 |
54 | {{/*
55 | Create the name of the service account to use
56 | */}}
57 | {{- define "placement-policy-scheduler-plugins.serviceAccountName" -}}
58 | {{- if .Values.serviceAccount.create }}
59 | {{- default (include "placement-policy-scheduler-plugins.fullname" .) .Values.serviceAccount.name }}
60 | {{- else }}
61 | {{- default "default" .Values.serviceAccount.name }}
62 | {{- end }}
63 | {{- end }}
64 |
--------------------------------------------------------------------------------
/charts/placement-policy-scheduler-plugins/templates/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: pp-scheduler-config
5 | namespace: {{ .Release.Namespace }}
6 | data:
7 | scheduler-config.yaml: |
8 | apiVersion: kubescheduler.config.k8s.io/v1beta1
9 | kind: KubeSchedulerConfiguration
10 | leaderElection:
11 | leaderElect: false
12 | profiles:
13 | - schedulerName: placement-policy-plugins-scheduler
14 | plugins:
15 | preScore:
16 | enabled:
17 | - name: placementpolicy
18 | score:
19 | enabled:
20 | - name: placementpolicy
21 | preFilter:
22 | enabled:
23 | - name: placementpolicy
24 | filter:
25 | enabled:
26 | - name: placementpolicy
27 |
--------------------------------------------------------------------------------
/charts/placement-policy-scheduler-plugins/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: pp-plugins-scheduler
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | {{- include "placement-policy-scheduler-plugins.labels" . | nindent 4 }}
8 | spec:
9 | replicas: {{ .Values.replicaCount }}
10 | selector:
11 | matchLabels:
12 | {{- include "placement-policy-scheduler-plugins.selectorLabels" . | nindent 6 }}
13 | template:
14 | metadata:
15 | labels:
16 | {{- include "placement-policy-scheduler-plugins.selectorLabels" . | nindent 8 }}
17 | spec:
18 | serviceAccountName: pp-plugins-scheduler
19 | containers:
20 | - command:
21 | - /manager
22 | - --config=/etc/schedulerconfig/scheduler-config.yaml
23 | image: {{ .Values.image }}
24 | name: pp-plugins-scheduler
25 | securityContext:
26 | privileged: true
27 | runAsUser: 0
28 | volumeMounts:
29 | - name: scheduler-config
30 | mountPath: /etc/schedulerconfig
31 | readOnly: true
32 | hostNetwork: false
33 | hostPID: false
34 | volumes:
35 | - name: scheduler-config
36 | configMap:
37 | name: pp-scheduler-config
38 |
--------------------------------------------------------------------------------
/charts/placement-policy-scheduler-plugins/templates/rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: system:pp-plugins-scheduler
5 | rules:
6 | - apiGroups: [""]
7 | resources: ["namespaces", "configmaps"]
8 | verbs: ["get", "list", "watch"]
9 | - apiGroups: ["", "events.k8s.io"]
10 | resources: ["events"]
11 | verbs: ["create", "patch", "update"]
12 | - apiGroups: ["coordination.k8s.io"]
13 | resources: ["leases"]
14 | verbs: ["create"]
15 | - apiGroups: ["coordination.k8s.io"]
16 | resourceNames: ["kube-scheduler"]
17 | resources: ["leases"]
18 | verbs: ["get", "update"]
19 | - apiGroups: [""]
20 | resources: ["endpoints"]
21 | verbs: ["create"]
22 | - apiGroups: [""]
23 | resourceNames: ["kube-scheduler"]
24 | resources: ["endpoints"]
25 | verbs: ["get", "update"]
26 | - apiGroups: [""]
27 | resources: ["nodes"]
28 | verbs: ["get", "list", "watch"]
29 | - apiGroups: [""]
30 | resources: ["pods"]
31 | verbs: ["delete", "get", "list", "watch", "update"]
32 | - apiGroups: [""]
33 | resources: ["bindings", "pods/binding"]
34 | verbs: ["create"]
35 | - apiGroups: [""]
36 | resources: ["pods/status"]
37 | verbs: ["patch", "update"]
38 | - apiGroups: [""]
39 | resources: ["replicationcontrollers", "services"]
40 | verbs: ["get", "list", "watch"]
41 | - apiGroups: ["apps", "extensions"]
42 | resources: ["replicasets"]
43 | verbs: ["get", "list", "watch"]
44 | - apiGroups: ["apps"]
45 | resources: ["statefulsets"]
46 | verbs: ["get", "list", "watch"]
47 | - apiGroups: ["policy"]
48 | resources: ["poddisruptionbudgets"]
49 | verbs: ["get", "list", "watch"]
50 | - apiGroups: [""]
51 | resources: ["persistentvolumeclaims", "persistentvolumes"]
52 | verbs: ["get", "list", "watch", "patch", "update"]
53 | - apiGroups: ["authentication.k8s.io"]
54 | resources: ["tokenreviews"]
55 | verbs: ["create"]
56 | - apiGroups: ["authorization.k8s.io"]
57 | resources: ["subjectaccessreviews"]
58 | verbs: ["create"]
59 | - apiGroups: ["storage.k8s.io"]
60 | resources: ["*"]
61 | verbs: ["get", "list", "watch"]
62 | - apiGroups: ["placement-policy.scheduling.x-k8s.io"]
63 | resources: ["placementpolicies"]
64 | verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
65 | ---
66 | kind: ClusterRoleBinding
67 | apiVersion: rbac.authorization.k8s.io/v1
68 | metadata:
69 | name: pp-plugins-scheduler
70 | subjects:
71 | - kind: User
72 | name: system:kube-scheduler
73 | namespace: {{ .Release.Namespace }}
74 | apiGroup: rbac.authorization.k8s.io
75 | roleRef:
76 | kind: ClusterRole
77 | name: system:pp-plugins-scheduler
78 | apiGroup: rbac.authorization.k8s.io
79 | ---
80 | apiVersion: rbac.authorization.k8s.io/v1
81 | kind: ClusterRoleBinding
82 | metadata:
83 | name: pp-plugins-scheduler:system:auth-delegator
84 | roleRef:
85 | apiGroup: rbac.authorization.k8s.io
86 | kind: ClusterRole
87 | name: system:auth-delegator
88 | subjects:
89 | - kind: ServiceAccount
90 | name: pp-plugins-scheduler
91 | namespace: {{ .Release.Namespace }}
92 | ---
93 | # To be able to to retrieve the PlacementPolicy objects, the following role has been added
94 | apiVersion: rbac.authorization.k8s.io/v1
95 | kind: RoleBinding
96 | metadata:
97 | name: pp-plugins-scheduler-as-kube-scheduler
98 | namespace: {{ .Release.Namespace }}
99 | subjects:
100 | - kind: ServiceAccount
101 | name: pp-plugins-scheduler
102 | namespace: {{ .Release.Namespace }}
103 | roleRef:
104 | kind: Role
105 | name: extension-apiserver-authentication-reader
106 | apiGroup: rbac.authorization.k8s.io
107 | ---
108 | apiVersion: rbac.authorization.k8s.io/v1
109 | kind: ClusterRoleBinding
110 | metadata:
111 | name: pp-plugins-scheduler-as-kube-scheduler
112 | subjects:
113 | - kind: ServiceAccount
114 | name: pp-plugins-scheduler
115 | namespace: {{ .Release.Namespace }}
116 | roleRef:
117 | kind: ClusterRole
118 | name: system:pp-plugins-scheduler
119 | apiGroup: rbac.authorization.k8s.io
120 | ---
121 | apiVersion: v1
122 | kind: ServiceAccount
123 | metadata:
124 | name: pp-plugins-scheduler
125 | namespace: {{ .Release.Namespace }}
126 |
--------------------------------------------------------------------------------
/charts/placement-policy-scheduler-plugins/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for placement-policy-scheduler-plugins.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | image: ghcr.io/azure/placement-policy-scheduler-plugins/placement-policy:v0.1.0
6 | replicaCount: 1
7 |
--------------------------------------------------------------------------------
/cmd/scheduler/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 |
6 | "github.com/Azure/placement-policy-scheduler-plugins/pkg/plugins/placementpolicy"
7 |
8 | "k8s.io/klog/v2"
9 | "k8s.io/kubernetes/cmd/kube-scheduler/app"
10 | // +kubebuilder:scaffold:imports
11 | )
12 |
13 | func main() {
14 | // register the custom plugins with kube-scheduler
15 | command := app.NewSchedulerCommand(
16 | app.WithPlugin(placementpolicy.Name, placementpolicy.New))
17 |
18 | if err := command.Execute(); err != nil {
19 | klog.ErrorS(err, "unable to run command")
20 | os.Exit(1)
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/config/certmanager/certificate.yaml:
--------------------------------------------------------------------------------
1 | # The following manifests contain a self-signed issuer CR and a certificate CR.
2 | # More document can be found at https://docs.cert-manager.io
3 | # WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for
4 | # breaking changes
5 | apiVersion: cert-manager.io/v1alpha2
6 | kind: Issuer
7 | metadata:
8 | name: selfsigned-issuer
9 | namespace: system
10 | spec:
11 | selfSigned: {}
12 | ---
13 | apiVersion: cert-manager.io/v1alpha2
14 | kind: Certificate
15 | metadata:
16 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
17 | namespace: system
18 | spec:
19 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize
20 | dnsNames:
21 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
22 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
23 | issuerRef:
24 | kind: Issuer
25 | name: selfsigned-issuer
26 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize
27 |
--------------------------------------------------------------------------------
/config/certmanager/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - certificate.yaml
3 |
4 | configurations:
5 | - kustomizeconfig.yaml
6 |
--------------------------------------------------------------------------------
/config/certmanager/kustomizeconfig.yaml:
--------------------------------------------------------------------------------
1 | # This configuration is for teaching kustomize how to update name ref and var substitution
2 | nameReference:
3 | - kind: Issuer
4 | group: cert-manager.io
5 | fieldSpecs:
6 | - kind: Certificate
7 | group: cert-manager.io
8 | path: spec/issuerRef/name
9 |
10 | varReference:
11 | - kind: Certificate
12 | group: cert-manager.io
13 | path: spec/commonName
14 | - kind: Certificate
15 | group: cert-manager.io
16 | path: spec/dnsNames
17 |
--------------------------------------------------------------------------------
/config/crd/bases/placement-policy.scheduling.x-k8s.io_placementpolicies.yaml:
--------------------------------------------------------------------------------
1 |
2 | ---
3 | apiVersion: apiextensions.k8s.io/v1
4 | kind: CustomResourceDefinition
5 | metadata:
6 | annotations:
7 | controller-gen.kubebuilder.io/version: v0.7.0
8 | creationTimestamp: null
9 | name: placementpolicies.placement-policy.scheduling.x-k8s.io
10 | spec:
11 | group: placement-policy.scheduling.x-k8s.io
12 | names:
13 | kind: PlacementPolicy
14 | listKind: PlacementPolicyList
15 | plural: placementpolicies
16 | singular: placementpolicy
17 | scope: Namespaced
18 | versions:
19 | - name: v1alpha1
20 | schema:
21 | openAPIV3Schema:
22 | description: PlacementPolicy is the Schema for the placementpolicies API
23 | properties:
24 | apiVersion:
25 | description: 'APIVersion defines the versioned schema of this representation
26 | of an object. Servers should convert recognized schemas to the latest
27 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
28 | type: string
29 | kind:
30 | description: 'Kind is a string value representing the REST resource this
31 | object represents. Servers may infer this from the endpoint the client
32 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
33 | type: string
34 | metadata:
35 | type: object
36 | spec:
37 | description: PlacementPolicySpec defines the desired state of PlacementPolicy
38 | properties:
39 | enforcementMode:
40 | description: 'enforcementMode is an enum that specifies how the policy
41 | will be enforced during scheduler (e.g. the application of filter
42 | vs scorer plugin). Values allowed for this field are: BestEffort
43 | (default): the policy will be enforced as best effort (scorer mode).
44 | Strict: the policy will be forced during scheduling. The filter
45 | approach will be used. Note: that may yield pods unschedulable.'
46 | type: string
47 | nodeSelector:
48 | description: nodeSelector selects the nodes where the placement policy
49 | will apply on according to action
50 | properties:
51 | matchExpressions:
52 | description: matchExpressions is a list of label selector requirements.
53 | The requirements are ANDed.
54 | items:
55 | description: A label selector requirement is a selector that
56 | contains values, a key, and an operator that relates the key
57 | and values.
58 | properties:
59 | key:
60 | description: key is the label key that the selector applies
61 | to.
62 | type: string
63 | operator:
64 | description: operator represents a key's relationship to
65 | a set of values. Valid operators are In, NotIn, Exists
66 | and DoesNotExist.
67 | type: string
68 | values:
69 | description: values is an array of string values. If the
70 | operator is In or NotIn, the values array must be non-empty.
71 | If the operator is Exists or DoesNotExist, the values
72 | array must be empty. This array is replaced during a strategic
73 | merge patch.
74 | items:
75 | type: string
76 | type: array
77 | required:
78 | - key
79 | - operator
80 | type: object
81 | type: array
82 | matchLabels:
83 | additionalProperties:
84 | type: string
85 | description: matchLabels is a map of {key,value} pairs. A single
86 | {key,value} in the matchLabels map is equivalent to an element
87 | of matchExpressions, whose key field is "key", the operator
88 | is "In", and the values array contains only "value". The requirements
89 | are ANDed.
90 | type: object
91 | type: object
92 | podSelector:
93 | description: podSelector identifies which pods this placement policy
94 | will apply on
95 | properties:
96 | matchExpressions:
97 | description: matchExpressions is a list of label selector requirements.
98 | The requirements are ANDed.
99 | items:
100 | description: A label selector requirement is a selector that
101 | contains values, a key, and an operator that relates the key
102 | and values.
103 | properties:
104 | key:
105 | description: key is the label key that the selector applies
106 | to.
107 | type: string
108 | operator:
109 | description: operator represents a key's relationship to
110 | a set of values. Valid operators are In, NotIn, Exists
111 | and DoesNotExist.
112 | type: string
113 | values:
114 | description: values is an array of string values. If the
115 | operator is In or NotIn, the values array must be non-empty.
116 | If the operator is Exists or DoesNotExist, the values
117 | array must be empty. This array is replaced during a strategic
118 | merge patch.
119 | items:
120 | type: string
121 | type: array
122 | required:
123 | - key
124 | - operator
125 | type: object
126 | type: array
127 | matchLabels:
128 | additionalProperties:
129 | type: string
130 | description: matchLabels is a map of {key,value} pairs. A single
131 | {key,value} in the matchLabels map is equivalent to an element
132 | of matchExpressions, whose key field is "key", the operator
133 | is "In", and the values array contains only "value". The requirements
134 | are ANDed.
135 | type: object
136 | type: object
137 | policy:
138 | description: Policy is the policy placement for target based on action
139 | properties:
140 | action:
141 | description: 'The action field is policy placement action. It
142 | is a string enum that carries the following possible values:
143 | Must(default): based on the rule below pods must be placed on
144 | nodes selected by node selector MustNot: based on the rule pods
145 | must *not* be placed nodes selected by node selector'
146 | type: string
147 | targetSize:
148 | anyOf:
149 | - type: integer
150 | - type: string
151 | description: 'TargetSize is the number of pods that can or cannot
152 | be placed on the node. Value can be an absolute number (ex:
153 | 5) or a percentage of desired pods (ex: 10%). Absolute number
154 | is calculated from percentage by rounding down.'
155 | x-kubernetes-int-or-string: true
156 | type: object
157 | weight:
158 | description: The policy weight allows the engine to decide which policy
159 | to use when pods match multiple policies. If multiple policies matched
160 | and all share the same weight then a policy with spec.enforcementMode
161 | == Force will be selected. If multiple policies match and +1 policy
162 | is marked as “Force” enforcementMode then they will sorted alphabetically
163 | / ascending and first one will be used. The scheduler publishes
164 | events capturing this conflict when it happens. Weight == 0-100
165 | is reserved for future use.
166 | format: int32
167 | type: integer
168 | type: object
169 | status:
170 | description: PlacementPolicyStatus defines the observed state of PlacementPolicy
171 | type: object
172 | type: object
173 | served: true
174 | storage: true
175 | subresources:
176 | status: {}
177 | status:
178 | acceptedNames:
179 | kind: ""
180 | plural: ""
181 | conditions: []
182 | storedVersions: []
183 |
--------------------------------------------------------------------------------
/config/crd/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # This kustomization.yaml is not intended to be run by itself,
2 | # since it depends on service name and namespace that are out of this kustomize package.
3 | # It should be run by config/default
4 | resources:
5 | - bases/placement-policy.scheduling.x-k8s.io_placementpolicies.yaml
6 | #+kubebuilder:scaffold:crdkustomizeresource
7 |
8 | patchesStrategicMerge:
9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
10 | # patches here are for enabling the conversion webhook for each CRD
11 | #- patches/webhook_in_placementpolicies.yaml
12 | #+kubebuilder:scaffold:crdkustomizewebhookpatch
13 |
14 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
15 | # patches here are for enabling the CA injection for each CRD
16 | #- patches/cainjection_in_placementpolicies.yaml
17 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch
18 |
19 | # the following config is for teaching kustomize how to do kustomization for CRDs.
20 | configurations:
21 | - kustomizeconfig.yaml
22 |
--------------------------------------------------------------------------------
/config/crd/kustomizeconfig.yaml:
--------------------------------------------------------------------------------
1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD
2 | nameReference:
3 | - kind: Service
4 | version: v1
5 | fieldSpecs:
6 | - kind: CustomResourceDefinition
7 | group: apiextensions.k8s.io
8 | path: spec/conversion/webhookClientConfig/service/name
9 |
10 | namespace:
11 | - kind: CustomResourceDefinition
12 | group: apiextensions.k8s.io
13 | path: spec/conversion/webhookClientConfig/service/namespace
14 | create: false
15 |
16 | varReference:
17 | - path: metadata/annotations
18 |
--------------------------------------------------------------------------------
/config/crd/patches/cainjection_in_placementpolicies.yaml:
--------------------------------------------------------------------------------
1 | # The following patch adds a directive for certmanager to inject CA into the CRD
2 | # CRD conversion requires k8s 1.13 or later.
3 | apiVersion: apiextensions.k8s.io/v1beta1
4 | kind: CustomResourceDefinition
5 | metadata:
6 | annotations:
7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
8 | name: placementpolicies.placement-policy.scheduling.x-k8s.io
9 |
--------------------------------------------------------------------------------
/config/crd/patches/webhook_in_placementpolicies.yaml:
--------------------------------------------------------------------------------
1 | # The following patch enables conversion webhook for CRD
2 | # CRD conversion requires k8s 1.13 or later.
3 | apiVersion: apiextensions.k8s.io/v1beta1
4 | kind: CustomResourceDefinition
5 | metadata:
6 | name: placementpolicies.placement-policy.scheduling.x-k8s.io
7 | spec:
8 | conversion:
9 | strategy: Webhook
10 | webhookClientConfig:
11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
13 | caBundle: Cg==
14 | service:
15 | namespace: system
16 | name: webhook-service
17 | path: /convert
18 |
--------------------------------------------------------------------------------
/config/default/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # Adds namespace to all resources.
2 | namespace: placement-policy-scheduler-plugins-system
3 |
4 | # Value of this field is prepended to the
5 | # names of all resources, e.g. a deployment named
6 | # "wordpress" becomes "alices-wordpress".
7 | # Note that it should also match with the prefix (text before '-') of the namespace
8 | # field above.
9 | namePrefix: placement-policy-scheduler-plugins-
10 |
11 | # Labels to add to all resources and selectors.
12 | #commonLabels:
13 | # someName: someValue
14 |
15 | bases:
16 | - ../crd
17 | - ../rbac
18 | - ../manager
19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
20 | # crd/kustomization.yaml
21 | #- ../webhook
22 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
23 | #- ../certmanager
24 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
25 | #- ../prometheus
26 |
27 | # patchesStrategicMerge:
28 | # Protect the /metrics endpoint by putting it behind auth.
29 | # If you want your controller-manager to expose the /metrics
30 | # endpoint w/o any authn/z, please comment the following line.
31 | # - manager_auth_proxy_patch.yaml
32 |
33 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
34 | # crd/kustomization.yaml
35 | #- manager_webhook_patch.yaml
36 |
37 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
38 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
39 | # 'CERTMANAGER' needs to be enabled to use ca injection
40 | #- webhookcainjection_patch.yaml
41 |
42 | # the following config is for teaching kustomize how to do var substitution
43 | # vars:
44 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
45 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
46 | # objref:
47 | # kind: Certificate
48 | # group: cert-manager.io
49 | # version: v1alpha2
50 | # name: serving-cert # this name should match the one in certificate.yaml
51 | # fieldref:
52 | # fieldpath: metadata.namespace
53 | #- name: CERTIFICATE_NAME
54 | # objref:
55 | # kind: Certificate
56 | # group: cert-manager.io
57 | # version: v1alpha2
58 | # name: serving-cert # this name should match the one in certificate.yaml
59 | #- name: SERVICE_NAMESPACE # namespace of the service
60 | # objref:
61 | # kind: Service
62 | # version: v1
63 | # name: webhook-service
64 | # fieldref:
65 | # fieldpath: metadata.namespace
66 | #- name: SERVICE_NAME
67 | # objref:
68 | # kind: Service
69 | # version: v1
70 | # name: webhook-service
71 |
--------------------------------------------------------------------------------
/config/default/manager_auth_proxy_patch.yaml:
--------------------------------------------------------------------------------
1 | # This patch inject a sidecar container which is a HTTP proxy for the
2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
3 | apiVersion: apps/v1
4 | kind: Deployment
5 | metadata:
6 | name: controller-manager
7 | namespace: system
8 | spec:
9 | template:
10 | spec:
11 | containers:
12 | - name: kube-rbac-proxy
13 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0
14 | args:
15 | - "--secure-listen-address=0.0.0.0:8443"
16 | - "--upstream=http://127.0.0.1:8080/"
17 | - "--logtostderr=true"
18 | - "--v=10"
19 | ports:
20 | - containerPort: 8443
21 | name: https
22 | - name: manager
23 | args:
24 | - "--metrics-addr=127.0.0.1:8080"
25 | - "--enable-leader-election"
26 |
--------------------------------------------------------------------------------
/config/default/manager_webhook_patch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: controller-manager
5 | namespace: system
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: manager
11 | ports:
12 | - containerPort: 9443
13 | name: webhook-server
14 | protocol: TCP
15 | volumeMounts:
16 | - mountPath: /tmp/k8s-webhook-server/serving-certs
17 | name: cert
18 | readOnly: true
19 | volumes:
20 | - name: cert
21 | secret:
22 | defaultMode: 420
23 | secretName: webhook-server-cert
24 |
--------------------------------------------------------------------------------
/config/default/webhookcainjection_patch.yaml:
--------------------------------------------------------------------------------
1 | # This patch add annotation to admission webhook config and
2 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
3 | apiVersion: admissionregistration.k8s.io/v1beta1
4 | kind: MutatingWebhookConfiguration
5 | metadata:
6 | name: mutating-webhook-configuration
7 | annotations:
8 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
9 | ---
10 | apiVersion: admissionregistration.k8s.io/v1beta1
11 | kind: ValidatingWebhookConfiguration
12 | metadata:
13 | name: validating-webhook-configuration
14 | annotations:
15 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
16 |
--------------------------------------------------------------------------------
/config/manager/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - manager.yaml
3 |
--------------------------------------------------------------------------------
/config/manager/manager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | control-plane: controller-manager
6 | name: system
7 | ---
8 | apiVersion: apps/v1
9 | kind: Deployment
10 | metadata:
11 | name: controller-manager
12 | namespace: system
13 | labels:
14 | control-plane: controller-manager
15 | spec:
16 | selector:
17 | matchLabels:
18 | control-plane: controller-manager
19 | replicas: 1
20 | template:
21 | metadata:
22 | labels:
23 | control-plane: controller-manager
24 | spec:
25 | containers:
26 | - command:
27 | - /manager
28 | args:
29 | - --enable-leader-election
30 | image: controller:latest
31 | name: manager
32 | resources:
33 | limits:
34 | cpu: 100m
35 | memory: 30Mi
36 | requests:
37 | cpu: 100m
38 | memory: 20Mi
39 | terminationGracePeriodSeconds: 10
40 |
--------------------------------------------------------------------------------
/config/prometheus/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - monitor.yaml
3 |
--------------------------------------------------------------------------------
/config/prometheus/monitor.yaml:
--------------------------------------------------------------------------------
1 |
2 | # Prometheus Monitor Service (Metrics)
3 | apiVersion: monitoring.coreos.com/v1
4 | kind: ServiceMonitor
5 | metadata:
6 | labels:
7 | control-plane: controller-manager
8 | name: controller-manager-metrics-monitor
9 | namespace: system
10 | spec:
11 | endpoints:
12 | - path: /metrics
13 | port: https
14 | selector:
15 | matchLabels:
16 | control-plane: controller-manager
17 |
--------------------------------------------------------------------------------
/config/rbac/auth_proxy_client_clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: ClusterRole
3 | metadata:
4 | name: metrics-reader
5 | rules:
6 | - nonResourceURLs: ["/metrics"]
7 | verbs: ["get"]
8 |
--------------------------------------------------------------------------------
/config/rbac/auth_proxy_role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: proxy-role
5 | rules:
6 | - apiGroups: ["authentication.k8s.io"]
7 | resources:
8 | - tokenreviews
9 | verbs: ["create"]
10 | - apiGroups: ["authorization.k8s.io"]
11 | resources:
12 | - subjectaccessreviews
13 | verbs: ["create"]
14 |
--------------------------------------------------------------------------------
/config/rbac/auth_proxy_role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: proxy-rolebinding
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: proxy-role
9 | subjects:
10 | - kind: ServiceAccount
11 | name: default
12 | namespace: system
13 |
--------------------------------------------------------------------------------
/config/rbac/auth_proxy_service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | control-plane: controller-manager
6 | name: controller-manager-metrics-service
7 | namespace: system
8 | spec:
9 | ports:
10 | - name: https
11 | port: 8443
12 | targetPort: https
13 | selector:
14 | control-plane: controller-manager
15 |
--------------------------------------------------------------------------------
/config/rbac/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - role.yaml
3 | - role_binding.yaml
4 | - leader_election_role.yaml
5 | - leader_election_role_binding.yaml
6 | # Comment the following 4 lines if you want to disable
7 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy)
8 | # which protects your /metrics endpoint.
9 | # - auth_proxy_service.yaml
10 | # - auth_proxy_role.yaml
11 | # - auth_proxy_role_binding.yaml
12 | # - auth_proxy_client_clusterrole.yaml
13 |
--------------------------------------------------------------------------------
/config/rbac/leader_election_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions to do leader election.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | name: leader-election-role
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - configmaps
11 | verbs:
12 | - get
13 | - list
14 | - watch
15 | - create
16 | - update
17 | - patch
18 | - delete
19 | - apiGroups:
20 | - ""
21 | resources:
22 | - configmaps/status
23 | verbs:
24 | - get
25 | - update
26 | - patch
27 | - apiGroups:
28 | - ""
29 | resources:
30 | - events
31 | verbs:
32 | - create
33 |
--------------------------------------------------------------------------------
/config/rbac/leader_election_role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | name: leader-election-rolebinding
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: Role
8 | name: leader-election-role
9 | subjects:
10 | - kind: ServiceAccount
11 | name: default
12 | namespace: system
13 |
--------------------------------------------------------------------------------
/config/rbac/placementpolicy_editor_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to edit placementpolicies.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: placementpolicy-editor-role
6 | rules:
7 | - apiGroups:
8 | - placement-policy.scheduling.x-k8s.io
9 | resources:
10 | - placementpolicies
11 | verbs:
12 | - create
13 | - delete
14 | - get
15 | - list
16 | - patch
17 | - update
18 | - watch
19 | - apiGroups:
20 | - placement-policy.scheduling.x-k8s.io
21 | resources:
22 | - placementpolicies/status
23 | verbs:
24 | - get
25 |
--------------------------------------------------------------------------------
/config/rbac/placementpolicy_viewer_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to view placementpolicies.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: placementpolicy-viewer-role
6 | rules:
7 | - apiGroups:
8 | - placement-policy.scheduling.x-k8s.io
9 | resources:
10 | - placementpolicies
11 | verbs:
12 | - get
13 | - list
14 | - watch
15 | - apiGroups:
16 | - placement-policy.scheduling.x-k8s.io
17 | resources:
18 | - placementpolicies/status
19 | verbs:
20 | - get
21 |
--------------------------------------------------------------------------------
/config/rbac/role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: manager-rolebinding
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: manager-role
9 | subjects:
10 | - kind: ServiceAccount
11 | name: default
12 | namespace: system
13 |
--------------------------------------------------------------------------------
/config/samples/placement-policy.scheduling.x-k8s.io_v1alpha1_placementpolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: placement-policy.scheduling.x-k8s.io/v1alpha1
2 | kind: PlacementPolicy
3 | metadata:
4 | name: placementpolicy-sample
5 | spec:
6 | # Add fields here
7 | foo: bar
8 |
--------------------------------------------------------------------------------
/config/webhook/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - manifests.yaml
3 | - service.yaml
4 |
5 | configurations:
6 | - kustomizeconfig.yaml
7 |
--------------------------------------------------------------------------------
/config/webhook/kustomizeconfig.yaml:
--------------------------------------------------------------------------------
1 | # the following config is for teaching kustomize where to look at when substituting vars.
2 | # It requires kustomize v2.1.0 or newer to work properly.
3 | nameReference:
4 | - kind: Service
5 | version: v1
6 | fieldSpecs:
7 | - kind: MutatingWebhookConfiguration
8 | group: admissionregistration.k8s.io
9 | path: webhooks/clientConfig/service/name
10 | - kind: ValidatingWebhookConfiguration
11 | group: admissionregistration.k8s.io
12 | path: webhooks/clientConfig/service/name
13 |
14 | namespace:
15 | - kind: MutatingWebhookConfiguration
16 | group: admissionregistration.k8s.io
17 | path: webhooks/clientConfig/service/namespace
18 | create: true
19 | - kind: ValidatingWebhookConfiguration
20 | group: admissionregistration.k8s.io
21 | path: webhooks/clientConfig/service/namespace
22 | create: true
23 |
24 | varReference:
25 | - path: metadata/annotations
26 |
--------------------------------------------------------------------------------
/config/webhook/service.yaml:
--------------------------------------------------------------------------------
1 |
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: webhook-service
6 | namespace: system
7 | spec:
8 | ports:
9 | - port: 443
10 | targetPort: 9443
11 | selector:
12 | control-plane: controller-manager
13 |
--------------------------------------------------------------------------------
/examples/basic-mixed-node-pools/README.md:
--------------------------------------------------------------------------------
1 | # Running Placement Policy Scheduler Plugins in Mixed Node Cluster
2 |
3 | ## AKS demo
4 |
5 | #### 1. Create an [AKS](https://docs.microsoft.com/en-us/azure/aks/) cluster
6 |
7 | ```sh
8 | # Create a resource group in South Central US
9 | az group create --name multinodepool --location southcentralus
10 |
11 | # Create a basic single-node pool AKS cluster; the Basic load balancer SKU is not supported so you must use Standard
12 | az aks create \
13 | --resource-group multinodepool \
14 | --name multinodepoolcluster \
15 | --vm-set-type VirtualMachineScaleSets \
16 | --node-count 3 \
17 | --generate-ssh-keys \
18 | --load-balancer-sku standard
19 |
20 | # Get credentials for cluster access for use in current session
21 | az aks get-credentials --resource-group multinodepool --name multinodepoolcluster
22 |
23 | # Add a second node pool with priority equaling Spot
24 | az aks nodepool add \
25 | --resource-group multinodepool \
26 | --cluster-name multinodepoolcluster \
27 | --name spotnodepool \
28 | --priority Spot \
29 | --eviction-policy Delete \
30 | --spot-max-price -1 \
31 | --enable-cluster-autoscaler \
32 | --min-count 1 \
33 | --max-count 3
34 | ```
35 | >**Important**: The nodes in the newly created _spotnodepool_ are created with a taint of `kubernetes.azure.com/scalesetpriority=spot:NoSchedule`
36 |
37 | #### 2. Deploy placement-policy-scheduler-plugins as a secondary scheduler
38 |
39 | The container images for the scheduler plugin is available in GitHub Container Registry.
40 |
41 | ```sh
42 | kubectl apply -f https://raw.githubusercontent.com/Azure/placement-policy-scheduler-plugins/main/deploy/kube-scheduler-configuration.yml
43 | ```
44 |
45 |
46 | Output
47 |
48 | ```
49 | customresourcedefinition.apiextensions.k8s.io/placementpolicies.placement-policy.scheduling.x-k8s.io created
50 | configmap/pp-scheduler-config created
51 | clusterrole.rbac.authorization.k8s.io/system:pp-plugins-scheduler created
52 | clusterrolebinding.rbac.authorization.k8s.io/pp-plugins-scheduler created
53 | clusterrolebinding.rbac.authorization.k8s.io/pp-plugins-scheduler:system:auth-delegator created
54 | rolebinding.rbac.authorization.k8s.io/pp-plugins-scheduler-as-kube-scheduler created
55 | clusterrolebinding.rbac.authorization.k8s.io/pp-plugins-scheduler-as-kube-scheduler created
56 | serviceaccount/pp-plugins-scheduler created
57 | deployment.apps/pp-plugins-scheduler created
58 | ```
59 |
60 |
61 | #### 3. Choose node selector
62 |
63 | Node labels are one path to indicate the applicable nodes for a given policy. The Azure scaleset priority label (`kubernetes.azure.com/scalesetpriority: `) is appropriate to identify spot nodes.
64 |
65 | ```sh
66 | az aks nodepool list --resource-group multinodepool --cluster-name multinodepoolcluster --query "[].{Name:name, NodeLabels:nodeLabels, Priority:scaleSetPriority, Taints:nodeTaints}"
67 | ```
68 |
69 |
70 | Output
71 |
72 | ```json
73 | [
74 | {
75 | "Name": "nodepool1",
76 | "NodeLabels": null,
77 | "Priority": null,
78 | "Taints": null
79 | },
80 | {
81 | "Name": "spotnodepool",
82 | "NodeLabels": {
83 | "kubernetes.azure.com/scalesetpriority": "spot"
84 | },
85 | "Priority": "Spot",
86 | "Taints": [
87 | "kubernetes.azure.com/scalesetpriority=spot:NoSchedule"
88 | ]
89 | }
90 | ]
91 | ```
92 |
93 |
94 | The node pool created with the cluster (_nodepool1_) lacks labels, priority and taints. Node pool _spotnodepool_ has `Priority` equal to `Spot`, node label `kubernetes.azure.com/scalesetpriority:spot` and taint `kubernetes.azure.com/scalesetpriority=spot:NoSchedule` as expected.
95 |
96 | #### 5. Deploy a `PlacementPolicy` CRD
97 |
98 | ```yaml
99 | apiVersion: placement-policy.scheduling.x-k8s.io/v1alpha1
100 | kind: PlacementPolicy
101 | metadata:
102 | name: mixednodepools-strict-must-spot
103 | spec:
104 | weight: 100
105 | enforcementMode: Strict
106 | podSelector:
107 | matchLabels:
108 | app: nginx
109 | nodeSelector:
110 | matchLabels:
111 | kubernetes.azure.com/scalesetpriority: spot
112 | policy:
113 | action: Must
114 | targetSize: 40%
115 | ```
116 |
117 |
118 | Output
119 |
120 | ```
121 | placementpolicy.placement-policy.scheduling.x-k8s.io/mixednodepools-strict-must-spot created
122 | ```
123 |
124 |
125 | #### 6. Deploy a `Deployment` with replicas
126 |
127 | The desired outcome is 10 pods running with the provided spec. When running the placement policy scheduler as a second scheduler, the name of the desired scheduler must be included in the definition.
128 |
129 | >Since all nodes in _spotnodepool_ have a taint, a corresponding toleration **must** be included.
130 |
131 | ```yml
132 | apiVersion: apps/v1
133 | kind: Deployment
134 | metadata:
135 | name: nginx-deployment
136 | labels:
137 | app: nginx
138 | spec:
139 | replicas: 10
140 | selector:
141 | matchLabels:
142 | app: nginx
143 | template:
144 | metadata:
145 | name: nginx
146 | labels:
147 | app: nginx
148 | spec:
149 | schedulerName: placement-policy-plugins-scheduler
150 | containers:
151 | - name: nginx
152 | image: nginx
153 | tolerations:
154 | - key: "kubernetes.azure.com/scalesetpriority"
155 | operator: "Exists"
156 | effect: "NoSchedule"
157 | ```
158 |
159 |
160 | Output
161 |
162 | ```
163 | deployment.apps/nginx-deployment created
164 | ```
165 |
166 |
167 | #### 7. Validate node assignment
168 |
169 | ```sh
170 | kubectl get po -o wide -l app=nginx --sort-by="{.spec.nodeName}" -o wide
171 | ```
172 |
173 |
174 | Output
175 |
176 | ```sh
177 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
178 | nginx-deployment-5cf7bbf99b-2hd42 1/1 Running 0 93s 10.244.1.4 aks-nodepool1-25514715-vmss000000
179 | nginx-deployment-5cf7bbf99b-d6g8g 1/1 Running 0 93s 10.244.1.5 aks-nodepool1-25514715-vmss000000
180 | nginx-deployment-5cf7bbf99b-6krbw 1/1 Running 0 93s 10.244.0.5 aks-nodepool1-25514715-vmss000001
181 | nginx-deployment-5cf7bbf99b-bpgmh 1/1 Running 0 93s 10.244.0.4 aks-nodepool1-25514715-vmss000001
182 | nginx-deployment-5cf7bbf99b-7d8lf 1/1 Running 0 93s 10.244.2.8 aks-nodepool1-25514715-vmss000002
183 | nginx-deployment-5cf7bbf99b-z69v6 1/1 Running 0 93s 10.244.2.9 aks-nodepool1-25514715-vmss000002
184 | nginx-deployment-5cf7bbf99b-24xgw 1/1 Running 0 93s 10.244.4.5 aks-spotnodepool-40924876-vmss000002
185 | nginx-deployment-5cf7bbf99b-554wh 1/1 Running 0 93s 10.244.4.4 aks-spotnodepool-40924876-vmss000002
186 | nginx-deployment-5cf7bbf99b-dn48d 1/1 Running 0 93s 10.244.4.2 aks-spotnodepool-40924876-vmss000002
187 | nginx-deployment-5cf7bbf99b-j7f7l 1/1 Running 0 93s 10.244.4.3 aks-spotnodepool-40924876-vmss000002
188 | ```
189 |
190 | As expected, four out of the ten pods created are running on nodes within _spotnodepool_. This aligns with the 40% `targetSize` set in the _mixednodepools-strict-must-spot_ `PlacementPolicy`.
191 |
192 |
193 | #### 8. Clean up
194 |
195 | - Delete [AKS](https://docs.microsoft.com/en-us/azure/aks/) cluster
196 |
197 | ```bash
198 | az group delete --name multinodepool --yes --no-wait
199 | ```
--------------------------------------------------------------------------------
/examples/basic-mixed-node-pools/demo_deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | labels:
6 | app: nginx
7 | spec:
8 | replicas: 10
9 | selector:
10 | matchLabels:
11 | app: nginx
12 | template:
13 | metadata:
14 | name: nginx
15 | labels:
16 | app: nginx
17 | spec:
18 | schedulerName: placement-policy-plugins-scheduler
19 | containers:
20 | - name: nginx
21 | image: nginx
22 | tolerations:
23 | - key: "kubernetes.azure.com/scalesetpriority"
24 | operator: "Exists"
25 | effect: "NoSchedule"
26 |
--------------------------------------------------------------------------------
/examples/basic-mixed-node-pools/v1alpha1_placementpolicy_mixednodepools.yml:
--------------------------------------------------------------------------------
1 | apiVersion: placement-policy.scheduling.x-k8s.io/v1alpha1
2 | kind: PlacementPolicy
3 | metadata:
4 | name: mixednodepools-strict-must-spot
5 | spec:
6 | weight: 100
7 | enforcementMode: Strict
8 | podSelector:
9 | matchLabels:
10 | app: nginx
11 | nodeSelector:
12 | matchLabels:
13 | kubernetes.azure.com/scalesetpriority: spot
14 | policy:
15 | action: Must
16 | targetSize: 40%
17 |
--------------------------------------------------------------------------------
/examples/demo_replicaset.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: ReplicaSet
3 | metadata:
4 | name: nginx
5 | labels:
6 | app: nginx
7 | spec:
8 | replicas: 10
9 | selector:
10 | matchLabels:
11 | app: nginx
12 | template:
13 | metadata:
14 | name: nginx
15 | labels:
16 | app: nginx
17 | spec:
18 | schedulerName: placement-policy-plugins-scheduler
19 | containers:
20 | - name: nginx
21 | image: nginx
22 |
--------------------------------------------------------------------------------
/examples/harvest-vm/README.md:
--------------------------------------------------------------------------------
1 | # Running Placement Policy Scheduler Plugins in Harvest VM cluster
2 |
3 | ## AKS demo
4 |
5 | #### 1. Create an [AKS](https://docs.microsoft.com/en-us/azure/aks/) cluster with harvest vm
6 |
7 | ```sh
8 | # Create a resource group in East US
9 | az group create --name harvestvm --location eastus
10 |
11 | # Create a basic single-node AKS cluster
12 | az aks create \
13 | --resource-group harvestvm \
14 | --name harvestaks \
15 | --vm-set-type VirtualMachineScaleSets \
16 | --node-vm-size Harvest_E2s_v3
17 | --node-count 3 \
18 | --generate-ssh-keys \
19 | --load-balancer-sku standard
20 | ```
21 |
22 | Run `az aks get-credentials` command to get access credentials for the cluster:
23 |
24 | ```
25 | az aks get-credentials --resource-group harvestvm --name harvestaks
26 | ```
27 |
28 | Add a second node pool with 3 nodes:
29 |
30 | ```
31 | az aks nodepool add \
32 | --resource-group harvestvm \
33 | --cluster-name harvestaks \
34 | --name normalvms \
35 | --node-count 3
36 | ```
37 |
38 | #### 2. Deploy placement-policy-scheduler-plugins as a secondary scheduler
39 |
40 | The container images for the scheduler plugin is available in the github container registry.
41 |
42 | ```bash
43 | kubectl apply -f https://raw.githubusercontent.com/Azure/placement-policy-scheduler-plugins/main/deploy/kube-scheduler-configuration.yml
44 | ```
45 |
46 |
47 | Result
48 |
49 | ```
50 | customresourcedefinition.apiextensions.k8s.io/placementpolicies.placement-policy.scheduling.x-k8s.io created
51 | configmap/pp-scheduler-config created
52 | clusterrole.rbac.authorization.k8s.io/pp-plugins-scheduler created
53 | clusterrolebinding.rbac.authorization.k8s.io/pp-plugins-scheduler created
54 | rolebinding.rbac.authorization.k8s.io/pp-plugins-scheduler-as-kube-scheduler created
55 | clusterrolebinding.rbac.authorization.k8s.io/pp-plugins-scheduler-as-kube-scheduler created
56 | serviceaccount/pp-plugins-scheduler created
57 | deployment.apps/pp-plugins-scheduler created
58 |
59 | ```
60 |
61 |
62 | #### 3. Choose node label
63 |
64 | To identify the harvest vms, we can use the instance type node label: `node.kubernetes.io/instance-type: `. Run the following command to get the vm size:
65 |
66 | ```
67 | az aks nodepool list --cluster-name harvestvm -g harvest -o table
68 | ```
69 |
70 |
71 | Result
72 |
73 | ```
74 | Name OsType KubernetesVersion VmSize Count MaxPods ProvisioningState Mode
75 | --------- -------- ------------------- --------------- ------- --------- ------------------- ------
76 | agentpool Linux 1.22.2 Harvest_E2s_v3 3 110 Succeeded System
77 | normalvms Linux 1.22.2 Standard_D2s_v3 3 110 Succeeded System
78 |
79 | ```
80 |
81 |
82 | The node label would be `node.kubernetes.io/instance-type: Harvest_E2s_v3`
83 |
84 | #### 4. Deploy a `PlacementPolicy` CRD
85 |
86 | ```yaml
87 | kind: PlacementPolicy
88 | metadata:
89 | name: harvest-strict-must
90 | spec:
91 | weight: 100
92 | enforcementMode: Strict
93 | podSelector:
94 | matchLabels:
95 | app: nginx
96 | nodeSelector:
97 | matchLabels:
98 | # instance type can be one of the following( Harvest_E2s_v3, Harvest_E4s_v3, Harvest_E8s_v3)
99 | node.kubernetes.io/instance-type: Harvest_E2s_v3
100 | policy:
101 | action: Must
102 | targetSize: 40%
103 | ```
104 |
105 |
106 | Result
107 |
108 | ```
109 | placementpolicy.placement-policy.scheduling.x-k8s.io/harvest-strict-must created
110 | ```
111 |
112 |
113 | >The same node selector `node.kubernetes.io/instance-type: Harvest_E2s_v3` is a node label for `agentpool`
114 |
115 | #### 5. Deploy a `ReplicaSet` that will create 10 replicas
116 |
117 | ```sh
118 | kubectl apply -f https://raw.githubusercontent.com/Azure/placement-policy-scheduler-plugins/main/examples/demo_replicaset.yml
119 | ```
120 |
121 |
122 | Result
123 |
124 | ```
125 | replicaset.apps/nginx created
126 | ```
127 |
128 |
129 | #### 6. Get pods with matching labels
130 |
131 | ```sh
132 | kubectl get po -o wide -l app=nginx --sort-by="{.spec.nodeName}" -o wide
133 |
134 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
135 | nginx-jgp5l 1/1 Running 0 56s 10.244.0.15 aks-agentpool-33997223-vmss000000
136 | nginx-cdb9z 1/1 Running 0 56s 10.244.2.11 aks-agentpool-33997223-vmss000001
137 | nginx-wpxj9 1/1 Running 0 56s 10.244.2.12 aks-agentpool-33997223-vmss000001
138 | nginx-xc2cr 1/1 Running 0 56s 10.244.1.10 aks-agentpool-33997223-vmss000002
139 | nginx-xvqbb 1/1 Running 0 56s 10.244.7.5 aks-normalvms-23099053-vmss000000
140 | nginx-dmb4h 1/1 Running 0 56s 10.244.7.6 aks-normalvms-23099053-vmss000000
141 | nginx-skzrk 1/1 Running 0 56s 10.244.8.6 aks-normalvms-23099053-vmss000001
142 | nginx-hrznh 1/1 Running 0 56s 10.244.8.5 aks-normalvms-23099053-vmss000001
143 | nginx-6c87l 1/1 Running 0 56s 10.244.6.6 aks-normalvms-23099053-vmss000002
144 | nginx-f9mm2 1/1 Running 0 56s 10.244.6.5 aks-normalvms-23099053-vmss000002
145 | ```
146 |
147 | We will find the nodes which carry the same node selector defined in the harvest-strict-must `PlacementPolicy` have been assigned 40% of the workload as defined with `targetSize`.
148 |
149 | #### 7. Clean up
150 |
151 | - Delete [AKS](https://docs.microsoft.com/en-us/azure/aks/) cluster
152 |
153 | ```bash
154 | az group delete --name harvestvm --yes --no-wait
155 | ```
--------------------------------------------------------------------------------
/examples/harvest-vm/v1alpha1_placementpolicy_harvestvm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: placement-policy.scheduling.x-k8s.io/v1alpha1
2 | kind: PlacementPolicy
3 | metadata:
4 | name: harvest-strict-must
5 | spec:
6 | weight: 100
7 | enforcementMode: Strict
8 | podSelector:
9 | matchLabels:
10 | app: nginx
11 | nodeSelector:
12 | matchLabels:
13 | # instansce type can be one of the following( Harvest_E2s_v3, Harvest_E4s_v3, Harvest_E8s_v3)
14 | node.kubernetes.io/instance-type: Harvest_E2s_v3
15 | policy:
16 | action: Must
17 | targetSize: 40%
18 |
--------------------------------------------------------------------------------
/examples/v1alpha1_placementpolicy_must_besteffort.yml:
--------------------------------------------------------------------------------
1 | apiVersion: placement-policy.scheduling.x-k8s.io/v1alpha1
2 | kind: PlacementPolicy
3 | metadata:
4 | name: besteffort-must
5 | spec:
6 | weight: 100
7 | enforcementMode: BestEffort
8 | podSelector:
9 | matchLabels:
10 | app: nginx
11 | nodeSelector:
12 | matchLabels:
13 | node: want
14 | policy:
15 | action: Must
16 | targetSize: 40%
17 |
--------------------------------------------------------------------------------
/examples/v1alpha1_placementpolicy_strict_must.yml:
--------------------------------------------------------------------------------
1 | apiVersion: placement-policy.scheduling.x-k8s.io/v1alpha1
2 | kind: PlacementPolicy
3 | metadata:
4 | name: strict-must
5 | spec:
6 | weight: 100
7 | enforcementMode: Strict
8 | podSelector:
9 | matchLabels:
10 | app: nginx
11 | nodeSelector:
12 | matchLabels:
13 | node: want
14 | policy:
15 | action: Must
16 | targetSize: 40%
17 |
--------------------------------------------------------------------------------
/examples/v1alpha1_placementpolicy_strict_mustnot.yml:
--------------------------------------------------------------------------------
1 | apiVersion: placement-policy.scheduling.x-k8s.io/v1alpha1
2 | kind: PlacementPolicy
3 | metadata:
4 | name: strict-mustnot
5 | spec:
6 | weight: 100
7 | enforcementMode: Strict
8 | podSelector:
9 | matchLabels:
10 | app: nginx
11 | nodeSelector:
12 | matchLabels:
13 | node: want
14 | policy:
15 | action: MustNot
16 | targetSize: 40%
17 |
--------------------------------------------------------------------------------
/hack/boilerplate.go.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Azure/placement-policy-scheduler-plugins/2533f9bbf7f9f6a78ca1feea97259d31645f35c3/hack/boilerplate.go.txt
--------------------------------------------------------------------------------
/hack/go-install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/master/scripts/go_install.sh
3 |
4 | set -o errexit
5 | set -o nounset
6 | set -o pipefail
7 |
8 | if [[ -z "${1}" ]]; then
9 | echo "must provide module as first parameter"
10 | exit 1
11 | fi
12 |
13 | if [[ -z "${2}" ]]; then
14 | echo "must provide binary name as second parameter"
15 | exit 1
16 | fi
17 |
18 | if [[ -z "${3}" ]]; then
19 | echo "must provide version as third parameter"
20 | exit 1
21 | fi
22 |
23 | if [[ -z "${GOBIN}" ]]; then
24 | echo "GOBIN is not set. Must set GOBIN to install the bin in a specified directory."
25 | exit 1
26 | fi
27 |
28 | tmp_dir=$(mktemp -d -t goinstall_XXXXXXXXXX)
29 | function clean {
30 | rm -rf "${tmp_dir}"
31 | }
32 | trap clean EXIT
33 |
34 | rm "${GOBIN}/${2}"* || true
35 |
36 | cd "${tmp_dir}"
37 |
38 | # create a new module in the tmp directory
39 | go mod init fake/mod
40 |
41 | # install the golang module specified as the first argument
42 | go install "${1}@${3}"
43 | mv "${GOBIN}/${2}" "${GOBIN}/${2}-${3}"
44 | ln -sf "${GOBIN}/${2}-${3}" "${GOBIN}/${2}"
45 |
--------------------------------------------------------------------------------
/hack/install-etcd.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/hack/install-etcd.sh
3 |
4 | set -o errexit
5 | set -o nounset
6 | set -o pipefail
7 |
8 | SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
9 | source "${SCRIPT_ROOT}/hack/lib/init.sh"
10 |
11 | kube::etcd::install
12 |
--------------------------------------------------------------------------------
/hack/integration-test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/hack/integration-test.sh
3 |
4 | set -o errexit
5 | set -o nounset
6 | set -o pipefail
7 |
8 | SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/..
9 | source "${SCRIPT_ROOT}/hack/lib/init.sh"
10 |
11 | checkEtcdOnPath() {
12 | export PATH="$(pwd)/etcd:${PATH}"
13 | kube::log::status "Checking etcd is on PATH"
14 | command -v etcd >/dev/null && return
15 | kube::log::status "Cannot find etcd, cannot run integration tests."
16 | kube::log::status "Please see https://git.k8s.io/community/contributors/devel/sig-testing/integration-tests.md#install-etcd-dependency for instructions."
17 | # kube::log::usage "You can use 'hack/install-etcd.sh' to install a copy in third_party/."
18 | retucrn 1
19 | }
20 |
21 | CLEANUP_REQUIRED=
22 | cleanup() {
23 | [[ -z "${CLEANUP_REQUIRED}" ]] && return
24 | kube::log::status "Cleaning up etcd"
25 | kube::etcd::cleanup
26 | CLEANUP_REQUIRED=
27 | kube::log::status "Integration test cleanup complete"
28 | }
29 |
30 | runTests() {
31 | kube::log::status "Starting etcd instance"
32 | CLEANUP_REQUIRED=1
33 | kube::etcd::start
34 | kube::log::status "Running integration test cases"
35 |
36 | # TODO try to use SCRIPT_ROOT for absolute path
37 | ln -s ../../../../../../../hack/testdata vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/testing/testdata
38 | go test ./test/integration/... -mod=vendor -coverprofile cover.out
39 |
40 | cleanup
41 | }
42 |
43 | checkEtcdOnPath
44 |
45 | # Run cleanup to stop etcd on interrupt or other kill signal.
46 | trap cleanup EXIT
47 |
48 | runTests
49 |
--------------------------------------------------------------------------------
/hack/lib/etcd.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/hack/lib/etcd.sh
3 |
4 | # A set of helpers for starting/running etcd for tests
5 |
6 | ETCD_VERSION=${ETCD_VERSION:-3.5.0}
7 | ETCD_HOST=${ETCD_HOST:-127.0.0.1}
8 | ETCD_PORT=${ETCD_PORT:-2379}
9 | export KUBE_INTEGRATION_ETCD_URL="http://${ETCD_HOST}:${ETCD_PORT}"
10 |
11 | kube::etcd::validate() {
12 | # validate if in path
13 | command -v etcd >/dev/null || {
14 | kube::log::usage "etcd must be in your PATH"
15 | kube::log::info "You can use 'hack/install-etcd.sh' to install a copy in third_party/."
16 | exit 1
17 | }
18 |
19 | # validate if etcd is running and $ETCD_PORT is in use
20 | if ps -ef | grep "etcd " | grep ${ETCD_PORT} &> /dev/null; then
21 | kube::log::usage "unable to start etcd as port ${ETCD_PORT} is in use. please stop the process listening on this port and retry."
22 | exit 1
23 | fi
24 |
25 | # need set the env of "ETCD_UNSUPPORTED_ARCH" on unstable arch.
26 | arch=$(uname -m)
27 | if [[ $arch =~ aarch* ]]; then
28 | export ETCD_UNSUPPORTED_ARCH=arm64
29 | elif [[ $arch =~ arm* ]]; then
30 | export ETCD_UNSUPPORTED_ARCH=arm
31 | fi
32 | # validate installed version is at least equal to minimum
33 | version=$(etcd --version | grep Version | head -n 1 | cut -d " " -f 3)
34 | if [[ $(kube::etcd::version "${ETCD_VERSION}") -gt $(kube::etcd::version "${version}") ]]; then
35 | kube::log::usage "etcd version ${ETCD_VERSION} or greater required."
36 | kube::log::info "You can use 'hack/install-etcd.sh' to install."
37 | exit 1
38 | fi
39 | }
40 |
41 | kube::etcd::version() {
42 | printf '%s\n' "${@}" | awk -F . '{ printf("%d%03d%03d\n", $1, $2, $3) }'
43 | }
44 |
45 | kube::etcd::start() {
46 | # validate before running
47 | kube::etcd::validate
48 |
49 | # Start etcd
50 | ETCD_DIR=${ETCD_DIR:-$(mktemp -d 2>/dev/null || mktemp -d -t test-etcd.XXXXXX)}
51 | if [[ -d "${ARTIFACTS:-}" ]]; then
52 | ETCD_LOGFILE="${ARTIFACTS}/etcd.$(uname -n).$(id -un).log.DEBUG.$(date +%Y%m%d-%H%M%S).$$"
53 | else
54 | ETCD_LOGFILE=${ETCD_LOGFILE:-"/dev/null"}
55 | fi
56 | kube::log::info "etcd --advertise-client-urls ${KUBE_INTEGRATION_ETCD_URL} --data-dir ${ETCD_DIR} --listen-client-urls http://${ETCD_HOST}:${ETCD_PORT} --log-level=debug > \"${ETCD_LOGFILE}\" 2>/dev/null"
57 | etcd --advertise-client-urls "${KUBE_INTEGRATION_ETCD_URL}" --data-dir "${ETCD_DIR}" --listen-client-urls "${KUBE_INTEGRATION_ETCD_URL}" --log-level=debug 2> "${ETCD_LOGFILE}" >/dev/null &
58 | ETCD_PID=$!
59 |
60 | echo "Waiting for etcd to come up."
61 | kube::util::wait_for_url "${KUBE_INTEGRATION_ETCD_URL}/health" "etcd: " 0.25 80
62 | curl -fs -X POST "${KUBE_INTEGRATION_ETCD_URL}/v3/kv/put" -d '{"key": "X3Rlc3Q=", "value": ""}'
63 | }
64 |
65 | kube::etcd::stop() {
66 | if [[ -n "${ETCD_PID-}" ]]; then
67 | kill "${ETCD_PID}" &>/dev/null || :
68 | wait "${ETCD_PID}" &>/dev/null || :
69 | fi
70 | }
71 |
72 | kube::etcd::clean_etcd_dir() {
73 | if [[ -n "${ETCD_DIR-}" ]]; then
74 | rm -rf "${ETCD_DIR}"
75 | fi
76 | }
77 |
78 | kube::etcd::cleanup() {
79 | kube::etcd::stop
80 | kube::etcd::clean_etcd_dir
81 | }
82 |
83 | kube::etcd::install() {
84 | (
85 | local os
86 | local arch
87 |
88 | os=$(kube::util::host_os)
89 | arch=$(kube::util::host_arch)
90 |
91 | if [[ $(readlink etcd) == etcd-v${ETCD_VERSION}-${os}-* ]]; then
92 | kube::log::info "etcd v${ETCD_VERSION} already installed. To use:"
93 | kube::log::info "export PATH=\"$(pwd)/etcd:\${PATH}\""
94 | return #already installed
95 | fi
96 |
97 | if [[ ${os} == "darwin" ]]; then
98 | download_file="etcd-v${ETCD_VERSION}-darwin-amd64.zip"
99 | url="https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/${download_file}"
100 | kube::util::download_file "${url}" "${download_file}"
101 | unzip -o "${download_file}"
102 | ln -fns "etcd-v${ETCD_VERSION}-darwin-amd64" etcd
103 | rm "${download_file}"
104 | else
105 | url="https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-${arch}.tar.gz"
106 | download_file="etcd-v${ETCD_VERSION}-linux-${arch}.tar.gz"
107 | kube::util::download_file "${url}" "${download_file}"
108 | tar xzf "${download_file}"
109 | ln -fns "etcd-v${ETCD_VERSION}-linux-${arch}" etcd
110 | rm "${download_file}"
111 | fi
112 | kube::log::info "etcd v${ETCD_VERSION} installed. To use:"
113 | kube::log::info "export PATH=\"$(pwd)/etcd:\${PATH}\""
114 | )
115 | }
116 |
--------------------------------------------------------------------------------
/hack/lib/golang.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/hack/lib/golang.sh
3 |
4 | # Ensure the go tool exists and is a viable version.
5 | kube::golang::verify_go_version() {
6 | if [[ -z "$(command -v go)" ]]; then
7 | kube::log::usage_from_stdin <&2
24 | shift
25 | for message; do
26 | echo " ${message}" >&2
27 | done
28 | }
29 |
30 | # Print an usage message to stderr. The arguments are printed directly.
31 | kube::log::usage() {
32 | echo >&2
33 | local message
34 | for message; do
35 | echo "${message}" >&2
36 | done
37 | echo >&2
38 | }
39 |
40 | kube::log::usage_from_stdin() {
41 | local messages=()
42 | while read -r line; do
43 | messages+=("${line}")
44 | done
45 |
46 | kube::log::usage "${messages[@]}"
47 | }
48 |
--------------------------------------------------------------------------------
/hack/lib/util.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/hack/lib/util.sh
3 |
4 | kube::util::host_os() {
5 | local host_os
6 | case "$(uname -s)" in
7 | Darwin)
8 | host_os=darwin
9 | ;;
10 | Linux)
11 | host_os=linux
12 | ;;
13 | *)
14 | kube::log::error "Unsupported host OS. Must be Linux or Mac OS X."
15 | exit 1
16 | ;;
17 | esac
18 | echo "${host_os}"
19 | }
20 |
21 | kube::util::host_arch() {
22 | local host_arch
23 | case "$(uname -m)" in
24 | x86_64*)
25 | host_arch=amd64
26 | ;;
27 | i?86_64*)
28 | host_arch=amd64
29 | ;;
30 | amd64*)
31 | host_arch=amd64
32 | ;;
33 | aarch64*)
34 | host_arch=arm64
35 | ;;
36 | arm64*)
37 | host_arch=arm64
38 | ;;
39 | arm*)
40 | host_arch=arm
41 | ;;
42 | i?86*)
43 | host_arch=x86
44 | ;;
45 | s390x*)
46 | host_arch=s390x
47 | ;;
48 | ppc64le*)
49 | host_arch=ppc64le
50 | ;;
51 | *)
52 | kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
53 | exit 1
54 | ;;
55 | esac
56 | echo "${host_arch}"
57 | }
58 |
59 | kube::util::wait_for_url() {
60 | local url=$1
61 | local prefix=${2:-}
62 | local wait=${3:-1}
63 | local times=${4:-30}
64 | local maxtime=${5:-1}
65 |
66 | command -v curl >/dev/null || {
67 | kube::log::usage "curl must be installed"
68 | exit 1
69 | }
70 |
71 | local i
72 | for i in $(seq 1 "${times}"); do
73 | local out
74 | if out=$(curl --max-time "${maxtime}" -gkfs "${url}" 2>/dev/null); then
75 | kube::log::status "On try ${i}, ${prefix}: ${out}"
76 | return 0
77 | fi
78 | sleep "${wait}"
79 | done
80 | kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
81 | return 1
82 | }
83 |
84 | kube::util::download_file() {
85 | local -r url=$1
86 | local -r destination_file=$2
87 |
88 | rm "${destination_file}" 2&> /dev/null || true
89 |
90 | for i in $(seq 5)
91 | do
92 | if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then
93 | echo "Downloading ${url} failed. $((5-i)) retries left."
94 | sleep 1
95 | else
96 | echo "Downloading ${url} succeed"
97 | return 0
98 | fi
99 | done
100 | return 1
101 | }
102 |
--------------------------------------------------------------------------------
/hack/testdata/127.0.0.1_10.0.0.1_kubernetes.default.svc-kubernetes.default-kubernetes-localhost.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDTzCCAjegAwIBAgIBAjANBgkqhkiG9w0BAQsFADAiMSAwHgYDVQQDDBcxMjcu
3 | MC4wLjEtY2FAMTUzMTQ2NzU5MzAgFw0xODA3MTMwNjM5NTNaGA8yMTE4MDYxOTA2
4 | Mzk1M1owHzEdMBsGA1UEAwwUMTI3LjAuMC4xQDE1MzE0Njc1OTMwggEiMA0GCSqG
5 | SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDdTNF7rRKBvDtLOvCTDgj3utt+zv5u+23x
6 | kCz2475DPnTZ7JK2ipCuHemyCY88M6VyaBkIqAVvvl3LZiS+Hu3gd+8elbdGrCxQ
7 | sui1MrUcAg8OoBM+97UzoKC3HMFIFEpqzKjVJKr5PbV3F8XXIBQeS3YUCePo3m7u
8 | OkGCXUXtWRtQTu4Dcq+tJKlJBsY+Q8CUvb1l0n5hafIFEMnFF/sKGP28CWd8gfzD
9 | ZKKtVumvQlgcp1GdfxqKHfjQOtBo+ZBFiHgDGDrrghuQ2CxROvk5/bNrViqbWbw4
10 | lUbU3Yn18L4UHR5xOOvQyLP2QdWAaoPutT7Xba40RMgWYlsNRaatAgMBAAGjgZAw
11 | gY0wDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB
12 | /wQCMAAwWAYDVR0RBFEwT4IWa3ViZXJuZXRlcy5kZWZhdWx0LnN2Y4ISa3ViZXJu
13 | ZXRlcy5kZWZhdWx0ggprdWJlcm5ldGVzgglsb2NhbGhvc3SHBH8AAAGHBAoAAAEw
14 | DQYJKoZIhvcNAQELBQADggEBAFkRV1oBLrY3IJDI6E9nxAK30EdyfYZqvybPCZB8
15 | 6AAErj+WleJVFi0rZJ3fRDoQ5Gelwe4Ud21DknW4+L7nZ8JRbzNkLTYTJxtkujSW
16 | aEz7xKW1IxD+o9TEceqiVko4xGawXjUVTun7n0Upv6T4D4jC0GN9zu8oT6xbUHmd
17 | WSSc2HjGLs8vF130xt2Oj0jx03i7AoJF4ZxMRt7dqSK7j5tfflfTS9Dxhmd9Gg5P
18 | eGH4BWJ3IJI3r0+WUtiIgMSgV2ppTSNY2UNbNNpudsRCq55IzyHuRioFt/FH9t+8
19 | xFaar6D9RDsm87JCv5JZ3BoVZJglmX8iqye+OBXgHgMZxx4=
20 | -----END CERTIFICATE-----
21 | -----BEGIN CERTIFICATE-----
22 | MIIC5DCCAcygAwIBAgIBATANBgkqhkiG9w0BAQsFADAiMSAwHgYDVQQDDBcxMjcu
23 | MC4wLjEtY2FAMTUzMTQ2NzU5MzAgFw0xODA3MTMwNjM5NTNaGA8yMTE4MDYxOTA2
24 | Mzk1M1owIjEgMB4GA1UEAwwXMTI3LjAuMC4xLWNhQDE1MzE0Njc1OTMwggEiMA0G
25 | CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDRfS+rc4EPpDafeiRo+uIvV1812UQM
26 | ddoaCrn2yIVSYiupsFc7goieXOpqxgI6ksUCMDUOfi3DQGC8067wX2HpMzz5J9yz
27 | Qfamcg3SL7G9u5Vx+x+EU6qmBhXa4Z46JwTY0vYeccz2PR+Nx+HHO0DglIh3tip8
28 | ECQ2rtpMc5YxJOCwJg3zh8pnEqLNEahm3p1lNGLbY7Kpqp7al68ZVReVg/YaoJt5
29 | Voi7vbR38OWBChbBmwKRP4gJD8aKY2eY6Xgn8+UAAytYGOEp18y/eAvba7awKp56
30 | wG1Y3JqWD06D8NnUCPQOO/g/KyGU77sM66xdlsOwLpSbtwWLcjC3nnvHAgMBAAGj
31 | IzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB
32 | CwUAA4IBAQCECJvqzrh57MrVT5sj+TDmhtSwkblBBqNG03X8gvmj7PsYAnZVpaev
33 | KbN0xfDhK6yaCJX41sZCoTaQa55a0Y9n/Lr6d2LREYPf2NdhHU2aj1UjNhhamoZk
34 | 0/MJtn/7t6UmYsdFIRlYtLJZQRfNaeO+ULpjjQeGj+Y4mR87VzyDZk2zi/fLJCtk
35 | aVKsI2Tan9KFzwmsCp/9RH7uPhOIFsaa8ePBCvzrahMrG+b9FGV670bQTS104Gyt
36 | HB73ixOheUPL9PuvahXKz0xlJfeKu4nFFJkqUmThj2Ybv8cyzDNrSyDywZxzNe3e
37 | nMA3i/kfmIj33gkmwcFgYPqfKleeVZQo
38 | -----END CERTIFICATE-----
39 |
--------------------------------------------------------------------------------
/hack/testdata/127.0.0.1_10.0.0.1_kubernetes.default.svc-kubernetes.default-kubernetes-localhost.key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEogIBAAKCAQEA3UzRe60Sgbw7Szrwkw4I97rbfs7+bvtt8ZAs9uO+Qz502eyS
3 | toqQrh3psgmPPDOlcmgZCKgFb75dy2Ykvh7t4HfvHpW3RqwsULLotTK1HAIPDqAT
4 | Pve1M6CgtxzBSBRKasyo1SSq+T21dxfF1yAUHkt2FAnj6N5u7jpBgl1F7VkbUE7u
5 | A3KvrSSpSQbGPkPAlL29ZdJ+YWnyBRDJxRf7Chj9vAlnfIH8w2SirVbpr0JYHKdR
6 | nX8aih340DrQaPmQRYh4Axg664IbkNgsUTr5Of2za1Yqm1m8OJVG1N2J9fC+FB0e
7 | cTjr0Miz9kHVgGqD7rU+122uNETIFmJbDUWmrQIDAQABAoIBAFXzdhFhASUeZLEt
8 | bS7Qbq85BfNUlgGo6eS+qJgjkEwxv9S8S6dWXiciOxgJGna5YHL093QjPilOeMN9
9 | IpwtCxr5ugfZAlwSlwuo0TU/QpRkQFDf31m/f8NTidhU9MT4DIc6ggB2w2kWjJp6
10 | wz5wmR/DE1NpG/ngGpmwSq1FaNlr3xz4e6b0A56ReqQr5YwYsZl2Fxf8sOBWTiPe
11 | Iv41q8jyRXL2ytv9uTgdD7i+qLMz1/NGvy4ZWxD3yCMsDm5eEI8/4l2pOmRrrpKY
12 | Fc14eUkbHBMyT6ibI4d7Y2aZJslq8d0HMMX1XNLvzLEnGT1+mrOjWwerI+60B0t1
13 | 6EvTfUkCgYEA/rVROq6JupfnT7BM04jEx5UuaokcLoNpn6bptZwmMakioLjgZoa2
14 | XEZrNoRWVMQ82JuguxkLtUgLgqKQRreolDCyQtaGFjFnWdVs+1W0oIHY7oMdwOLh
15 | XsQRtPW3HCK4HYZJcBBIh8t4USiBTrRtTpXmDinLkbCRXYOcehbRZ2cCgYEA3mwg
16 | tsbnSDYTcFmPjJEGjISfMEjAHoka8ubPKzU6VylDnrb2k/NbDYL3NidzGbJaVJFk
17 | YNfCsja4COz+0pBiMY2fBEzHU4RwDaRrxUr0fLVxvH7/E9JPP8y/e5CJR2Z2sDQa
18 | yed3ArkNh0MaecGr+7IZFbv+Uj4QaBq3W77hGMsCgYB/keC1O2XQBvTbfLl92Sp1
19 | q8orobBzu23TgI3dX+/hP40QYohB0YbUgJCCJZX3rrGq64d9LfYnPYZGT5VjVekh
20 | D6K4xykxRF03KSYEW9Cz81TrYNAuI3QtOpaDw+2KMfl1ECUH85/gI5CHVXouKT/1
21 | 9C3dOiGzPnQQGjLtEzCeUQKBgFacZGDIM2e7Jvao6W0jTBmLyzFSIv3BBe1wU1vP
22 | 7lfiiaJUPNCAAwTP6tP7qKZo/SPROfU8D2S2ShOvtcrozlPdgf56p2OuPrQRQqYg
23 | +fNV9GQiT9G4I4QEhsvnDI3xKGaU45mbuIwm4024o6al9AKe54W/HtmHsXvYa24e
24 | dijhAoGARcbgcE/aT8jhdVHHCRBuSD4ZzXbB+JCetHsrjhOYnifc0graq0umiuRI
25 | c0i+IT5OhGTdVbjnPgySHn/V/IuSYLLtKvfqSV8tQk3womXRPJ/K9BsFhelo1Vd5
26 | MTyZ2j0XjLWHOo0DKxIPLW3P7sBYAFM2Z+/RAe1uKjISmggDhBs=
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/hack/testdata/README.md:
--------------------------------------------------------------------------------
1 | Keys in this directory are generated for testing purposes only.
2 |
--------------------------------------------------------------------------------
/hack/tools.go:
--------------------------------------------------------------------------------
1 | //go:build tools
2 | // +build tools
3 |
4 | // Copied from https://github.com/kubernetes/sample-controller/blob/master/hack/tools.go
5 |
6 | // This package imports things required by build scripts, to force `go mod` to see them as dependencies
7 |
8 | package tools
9 |
10 | import (
11 | _ "k8s.io/code-generator"
12 | _ "k8s.io/klog/hack/tools/logcheck"
13 | _ "k8s.io/kube-openapi/cmd/openapi-gen"
14 | )
15 |
--------------------------------------------------------------------------------
/hack/update-codegen.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | SCRIPT_ROOT=$(dirname "${BASH_SOURCE[@]}")/..
8 |
9 | TOOLS_DIR=$(realpath ./hack/tools)
10 | TOOLS_BIN_DIR="${TOOLS_DIR}/bin"
11 | GO_INSTALL=$(realpath ./hack/go-install.sh)
12 |
13 | pushd "${SCRIPT_ROOT}"
14 | # install the generators if they are not already present
15 | for GENERATOR in client-gen lister-gen informer-gen register-gen; do
16 | GOBIN=${TOOLS_BIN_DIR} ${GO_INSTALL} k8s.io/code-generator/cmd/${GENERATOR} ${GENERATOR} v0.22.3
17 | done
18 | popd
19 |
20 | OUTPUT_PKG=github.com/Azure/placement-policy-scheduler-plugins/pkg/client
21 | FQ_APIS=github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1
22 | CLIENTSET_NAME=versioned
23 | CLIENTSET_PKG_NAME=clientset
24 |
25 | # reference from https://github.com/servicemeshinterface/smi-sdk-go/blob/master/hack/update-codegen.sh
26 | # the generate-groups.sh script cannot handle group names with dashes, so we use placementpolicy.scheduling.x-k8s.io as the group name
27 | if [[ "$OSTYPE" == "darwin"* ]]; then
28 | find "${SCRIPT_ROOT}/apis" -type f -exec sed -i '' 's/placement-policy.scheduling.x-k8s.io/placementpolicy.scheduling.x-k8s.io/g' {} +
29 | else
30 | find "${SCRIPT_ROOT}/apis" -type f -exec sed -i 's/placement-policy.scheduling.x-k8s.io/placementpolicy.scheduling.x-k8s.io/g' {} +
31 | fi
32 |
33 | echo "Generating clientset at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME}"
34 | "${TOOLS_BIN_DIR}/client-gen" \
35 | --clientset-name "${CLIENTSET_NAME}" \
36 | --input-base "" \
37 | --input "${FQ_APIS}" \
38 | --output-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME}" \
39 | --go-header-file "${SCRIPT_ROOT}/hack/boilerplate.go.txt"
40 |
41 | echo "Generating listers at ${OUTPUT_PKG}/listers"
42 | "${TOOLS_BIN_DIR}/lister-gen" \
43 | --input-dirs "${FQ_APIS}" \
44 | --output-package "${OUTPUT_PKG}/listers" \
45 | --go-header-file "${SCRIPT_ROOT}/hack/boilerplate.go.txt"
46 |
47 | echo "Generating informers at ${OUTPUT_PKG}/informers"
48 | "${TOOLS_BIN_DIR}/informer-gen" \
49 | --input-dirs "${FQ_APIS}" \
50 | --versioned-clientset-package "${OUTPUT_PKG}/${CLIENTSET_PKG_NAME}/${CLIENTSET_NAME}" \
51 | --listers-package "${OUTPUT_PKG}/listers" \
52 | --output-package "${OUTPUT_PKG}/informers" \
53 | --go-header-file "${SCRIPT_ROOT}/hack/boilerplate.go.txt"
54 |
55 | echo "Generating register at ${FQ_APIS}"
56 | "${TOOLS_BIN_DIR}/register-gen" \
57 | --input-dirs "${FQ_APIS}" \
58 | --output-package "${FQ_APIS}" \
59 | --go-header-file "${SCRIPT_ROOT}/hack/boilerplate.go.txt"
60 |
61 | # reference from https://github.com/servicemeshinterface/smi-sdk-go/blob/master/hack/update-codegen.sh
62 | # replace placementpolicy.scheduling.x-k8s.io with placement-policy.scheduling.x-k8s.io after code generation
63 | if [[ "$OSTYPE" == "darwin"* ]]; then
64 | find "${SCRIPT_ROOT}/apis" -type f -exec sed -i '' 's/placementpolicy.scheduling.x-k8s.io/placement-policy.scheduling.x-k8s.io/g' {} +
65 | find "${SCRIPT_ROOT}/pkg/client" -type f -exec sed -i '' 's/placementpolicy.scheduling.x-k8s.io/placement-policy.scheduling.x-k8s.io/g' {} +
66 | else
67 | find "${SCRIPT_ROOT}/apis" -type f -exec sed -i 's/placementpolicy.scheduling.x-k8s.io/placement-policy.scheduling.x-k8s.io/g' {} +
68 | find "${SCRIPT_ROOT}/pkg/client" -type f -exec sed -i 's/placementpolicy.scheduling.x-k8s.io/placement-policy.scheduling.x-k8s.io/g' {} +
69 | fi
70 |
--------------------------------------------------------------------------------
/hack/update-generated-openapi.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #https://github.com/kubernetes-sigs/scheduler-plugins/blob/master/hack/update-generated-openapi.sh
3 |
4 | set -o errexit
5 | set -o nounset
6 | set -o pipefail
7 |
8 | SCRIPT_ROOT=$(dirname "${BASH_SOURCE[@]}")/..
9 |
10 |
11 | TOOLS_DIR=$(realpath ./hack/tools)
12 | TOOLS_BIN_DIR="${TOOLS_DIR}/bin"
13 | GO_INSTALL=$(realpath ./hack/go-install.sh)
14 |
15 |
16 | pushd "${SCRIPT_ROOT}"
17 | # install the openapi-gen if they are not already present
18 | GOBIN=${TOOLS_BIN_DIR} ${GO_INSTALL} k8s.io/code-generator/cmd/openapi-gen openapi-gen v0.22.3
19 | popd
20 |
21 | KUBE_INPUT_DIRS=(
22 | $(
23 | grep --color=never -rl '+k8s:openapi-gen=' vendor/k8s.io | \
24 | xargs -n1 dirname | \
25 | sed "s,^vendor/,," | \
26 | sort -u | \
27 | sed '/^k8s\.io\/kubernetes\/build\/root$/d' | \
28 | sed '/^k8s\.io\/kubernetes$/d' | \
29 | sed '/^k8s\.io\/kubernetes\/staging$/d' | \
30 | sed 's,k8s\.io/kubernetes/staging/src/,,' | \
31 | grep -v 'k8s.io/code-generator' | \
32 | grep -v 'k8s.io/sample-apiserver'
33 | )
34 | )
35 |
36 | KUBE_INPUT_DIRS=$(IFS=,; echo "${KUBE_INPUT_DIRS[*]}")
37 |
38 | function join { local IFS="$1"; shift; echo "$*"; }
39 |
40 | echo "Generating Kubernetes OpenAPI"
41 |
42 | "${TOOLS_BIN_DIR}/openapi-gen" \
43 | --output-file-base zz_generated.openapi \
44 | --output-base="${TOOLS_BIN_DIR}/src" \
45 | --go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt \
46 | --output-base="./" \
47 | --input-dirs $(join , "${KUBE_INPUT_DIRS[@]}") \
48 | --output-package "vendor/k8s.io/kubernetes/pkg/generated/openapi" \
49 | "$@"
50 |
--------------------------------------------------------------------------------
/manifest_staging/charts/placement-policy-scheduler-plugins/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/manifest_staging/charts/placement-policy-scheduler-plugins/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: placement-policy-scheduler-plugins
3 | description: A Helm chart for Kubernetes placement policy scheduler plugins
4 | type: application
5 | version: 0.1.1
6 | appVersion: v0.1.0
7 | kubeVersion: "<=1.22.2"
8 |
--------------------------------------------------------------------------------
/manifest_staging/charts/placement-policy-scheduler-plugins/README.md:
--------------------------------------------------------------------------------
1 | # Chart to run placement policy scheduler plugins as a second scheduler in cluster.
--------------------------------------------------------------------------------
/manifest_staging/charts/placement-policy-scheduler-plugins/crds/scheduling.x-k8s.io_placementpolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apiextensions.k8s.io/v1
2 | kind: CustomResourceDefinition
3 | metadata:
4 | annotations:
5 | controller-gen.kubebuilder.io/version: v0.7.0
6 | creationTimestamp: null
7 | name: placementpolicies.placement-policy.scheduling.x-k8s.io
8 | spec:
9 | group: placement-policy.scheduling.x-k8s.io
10 | names:
11 | kind: PlacementPolicy
12 | listKind: PlacementPolicyList
13 | plural: placementpolicies
14 | singular: placementpolicy
15 | scope: Namespaced
16 | versions:
17 | - name: v1alpha1
18 | schema:
19 | openAPIV3Schema:
20 | description: PlacementPolicy is the Schema for the placementpolicies API
21 | properties:
22 | apiVersion:
23 | description: 'APIVersion defines the versioned schema of this representation
24 | of an object. Servers should convert recognized schemas to the latest
25 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
26 | type: string
27 | kind:
28 | description: 'Kind is a string value representing the REST resource this
29 | object represents. Servers may infer this from the endpoint the client
30 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
31 | type: string
32 | metadata:
33 | type: object
34 | spec:
35 | description: PlacementPolicySpec defines the desired state of PlacementPolicy
36 | properties:
37 | enforcementMode:
38 | description: 'enforcementMode is an enum that specifies how the policy
39 | will be enforced during scheduler (e.g. the application of filter
40 | vs scorer plugin). Values allowed for this field are: BestEffort
41 | (default): the policy will be enforced as best effort (scorer mode).
42 | Strict: the policy will be forced during scheduling. The filter
43 | approach will be used. Note: that may yield pods unschedulable.'
44 | type: string
45 | nodeSelector:
46 | description: nodeSelector selects the nodes where the placement policy
47 | will apply on according to action
48 | properties:
49 | matchExpressions:
50 | description: matchExpressions is a list of label selector requirements.
51 | The requirements are ANDed.
52 | items:
53 | description: A label selector requirement is a selector that
54 | contains values, a key, and an operator that relates the key
55 | and values.
56 | properties:
57 | key:
58 | description: key is the label key that the selector applies
59 | to.
60 | type: string
61 | operator:
62 | description: operator represents a key's relationship to
63 | a set of values. Valid operators are In, NotIn, Exists
64 | and DoesNotExist.
65 | type: string
66 | values:
67 | description: values is an array of string values. If the
68 | operator is In or NotIn, the values array must be non-empty.
69 | If the operator is Exists or DoesNotExist, the values
70 | array must be empty. This array is replaced during a strategic
71 | merge patch.
72 | items:
73 | type: string
74 | type: array
75 | required:
76 | - key
77 | - operator
78 | type: object
79 | type: array
80 | matchLabels:
81 | additionalProperties:
82 | type: string
83 | description: matchLabels is a map of {key,value} pairs. A single
84 | {key,value} in the matchLabels map is equivalent to an element
85 | of matchExpressions, whose key field is "key", the operator
86 | is "In", and the values array contains only "value". The requirements
87 | are ANDed.
88 | type: object
89 | type: object
90 | podSelector:
91 | description: podSelector identifies which pods this placement policy
92 | will apply on
93 | properties:
94 | matchExpressions:
95 | description: matchExpressions is a list of label selector requirements.
96 | The requirements are ANDed.
97 | items:
98 | description: A label selector requirement is a selector that
99 | contains values, a key, and an operator that relates the key
100 | and values.
101 | properties:
102 | key:
103 | description: key is the label key that the selector applies
104 | to.
105 | type: string
106 | operator:
107 | description: operator represents a key's relationship to
108 | a set of values. Valid operators are In, NotIn, Exists
109 | and DoesNotExist.
110 | type: string
111 | values:
112 | description: values is an array of string values. If the
113 | operator is In or NotIn, the values array must be non-empty.
114 | If the operator is Exists or DoesNotExist, the values
115 | array must be empty. This array is replaced during a strategic
116 | merge patch.
117 | items:
118 | type: string
119 | type: array
120 | required:
121 | - key
122 | - operator
123 | type: object
124 | type: array
125 | matchLabels:
126 | additionalProperties:
127 | type: string
128 | description: matchLabels is a map of {key,value} pairs. A single
129 | {key,value} in the matchLabels map is equivalent to an element
130 | of matchExpressions, whose key field is "key", the operator
131 | is "In", and the values array contains only "value". The requirements
132 | are ANDed.
133 | type: object
134 | type: object
135 | policy:
136 | description: Policy is the policy placement for target based on action
137 | properties:
138 | action:
139 | description: 'The action field is policy placement action. It
140 | is a string enum that carries the following possible values:
141 | Must(default): based on the rule below pods must be placed on
142 | nodes selected by node selector MustNot: based on the rule pods
143 | must *not* be placed nodes selected by node selector'
144 | type: string
145 | targetSize:
146 | anyOf:
147 | - type: integer
148 | - type: string
149 | description: 'TargetSize is the number of pods that can or cannot
150 | be placed on the node. Value can be an absolute number (ex:
151 | 5) or a percentage of desired pods (ex: 10%). Absolute number
152 | is calculated from percentage by rounding down.'
153 | x-kubernetes-int-or-string: true
154 | type: object
155 | weight:
156 | description: The policy weight allows the engine to decide which policy
157 | to use when pods match multiple policies. If multiple policies matched
158 | and all share the same weight then a policy with spec.enforcementMode
159 | == Force will be selected. If multiple policies match and +1 policy
160 | is marked as “Force” enforcementMode then they will sorted alphabetically
161 | / ascending and first one will be used. The scheduler publishes
162 | events capturing this conflict when it happens. Weight == 0-100
163 | is reserved for future use.
164 | format: int32
165 | type: integer
166 | type: object
167 | status:
168 | description: PlacementPolicyStatus defines the observed state of PlacementPolicy
169 | type: object
170 | type: object
171 | served: true
172 | storage: true
173 | subresources:
174 | status: {}
175 | status:
176 | acceptedNames:
177 | kind: ""
178 | plural: ""
179 | conditions: []
180 | storedVersions: []
181 |
--------------------------------------------------------------------------------
/manifest_staging/charts/placement-policy-scheduler-plugins/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "placement-policy-scheduler-plugins.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "placement-policy-scheduler-plugins.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "placement-policy-scheduler-plugins.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "placement-policy-scheduler-plugins.labels" -}}
37 | helm.sh/chart: {{ include "placement-policy-scheduler-plugins.chart" . }}
38 | {{ include "placement-policy-scheduler-plugins.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "placement-policy-scheduler-plugins.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "placement-policy-scheduler-plugins.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | component: scheduler
52 | {{- end }}
53 |
54 | {{/*
55 | Create the name of the service account to use
56 | */}}
57 | {{- define "placement-policy-scheduler-plugins.serviceAccountName" -}}
58 | {{- if .Values.serviceAccount.create }}
59 | {{- default (include "placement-policy-scheduler-plugins.fullname" .) .Values.serviceAccount.name }}
60 | {{- else }}
61 | {{- default "default" .Values.serviceAccount.name }}
62 | {{- end }}
63 | {{- end }}
64 |
--------------------------------------------------------------------------------
/manifest_staging/charts/placement-policy-scheduler-plugins/templates/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: pp-scheduler-config
5 | namespace: {{ .Release.Namespace }}
6 | data:
7 | scheduler-config.yaml: |
8 | apiVersion: kubescheduler.config.k8s.io/v1beta1
9 | kind: KubeSchedulerConfiguration
10 | leaderElection:
11 | leaderElect: false
12 | profiles:
13 | - schedulerName: placement-policy-plugins-scheduler
14 | plugins:
15 | preScore:
16 | enabled:
17 | - name: placementpolicy
18 | score:
19 | enabled:
20 | - name: placementpolicy
21 | preFilter:
22 | enabled:
23 | - name: placementpolicy
24 | filter:
25 | enabled:
26 | - name: placementpolicy
27 |
--------------------------------------------------------------------------------
/manifest_staging/charts/placement-policy-scheduler-plugins/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: pp-plugins-scheduler
5 | namespace: {{ .Release.Namespace }}
6 | labels:
7 | {{- include "placement-policy-scheduler-plugins.labels" . | nindent 4 }}
8 | spec:
9 | replicas: {{ .Values.replicaCount }}
10 | selector:
11 | matchLabels:
12 | {{- include "placement-policy-scheduler-plugins.selectorLabels" . | nindent 6 }}
13 | template:
14 | metadata:
15 | labels:
16 | {{- include "placement-policy-scheduler-plugins.selectorLabels" . | nindent 8 }}
17 | spec:
18 | serviceAccountName: pp-plugins-scheduler
19 | containers:
20 | - command:
21 | - /manager
22 | - --config=/etc/schedulerconfig/scheduler-config.yaml
23 | image: {{ .Values.image }}
24 | name: pp-plugins-scheduler
25 | volumeMounts:
26 | - name: scheduler-config
27 | mountPath: /etc/schedulerconfig
28 | readOnly: true
29 | hostNetwork: false
30 | hostPID: false
31 | volumes:
32 | - name: scheduler-config
33 | configMap:
34 | name: pp-scheduler-config
35 |
--------------------------------------------------------------------------------
/manifest_staging/charts/placement-policy-scheduler-plugins/templates/rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: system:pp-plugins-scheduler
5 | rules:
6 | - apiGroups: [""]
7 | resources: ["namespaces", "configmaps"]
8 | verbs: ["get", "list", "watch"]
9 | - apiGroups: ["", "events.k8s.io"]
10 | resources: ["events"]
11 | verbs: ["create", "patch", "update"]
12 | - apiGroups: ["coordination.k8s.io"]
13 | resources: ["leases"]
14 | verbs: ["create"]
15 | - apiGroups: ["coordination.k8s.io"]
16 | resourceNames: ["kube-scheduler"]
17 | resources: ["leases"]
18 | verbs: ["get", "update"]
19 | - apiGroups: [""]
20 | resources: ["endpoints"]
21 | verbs: ["create"]
22 | - apiGroups: [""]
23 | resourceNames: ["kube-scheduler"]
24 | resources: ["endpoints"]
25 | verbs: ["get", "update"]
26 | - apiGroups: [""]
27 | resources: ["nodes"]
28 | verbs: ["get", "list", "watch"]
29 | - apiGroups: [""]
30 | resources: ["pods"]
31 | verbs: ["delete", "get", "list", "watch", "update"]
32 | - apiGroups: [""]
33 | resources: ["bindings", "pods/binding"]
34 | verbs: ["create"]
35 | - apiGroups: [""]
36 | resources: ["pods/status"]
37 | verbs: ["patch", "update"]
38 | - apiGroups: [""]
39 | resources: ["replicationcontrollers", "services"]
40 | verbs: ["get", "list", "watch"]
41 | - apiGroups: ["apps", "extensions"]
42 | resources: ["replicasets"]
43 | verbs: ["get", "list", "watch"]
44 | - apiGroups: ["apps"]
45 | resources: ["statefulsets"]
46 | verbs: ["get", "list", "watch"]
47 | - apiGroups: ["policy"]
48 | resources: ["poddisruptionbudgets"]
49 | verbs: ["get", "list", "watch"]
50 | - apiGroups: [""]
51 | resources: ["persistentvolumeclaims", "persistentvolumes"]
52 | verbs: ["get", "list", "watch", "patch", "update"]
53 | - apiGroups: ["authentication.k8s.io"]
54 | resources: ["tokenreviews"]
55 | verbs: ["create"]
56 | - apiGroups: ["authorization.k8s.io"]
57 | resources: ["subjectaccessreviews"]
58 | verbs: ["create"]
59 | - apiGroups: ["storage.k8s.io"]
60 | resources: ["*"]
61 | verbs: ["get", "list", "watch"]
62 | - apiGroups: ["placement-policy.scheduling.x-k8s.io"]
63 | resources: ["placementpolicies"]
64 | verbs: ["get", "list", "watch", "create", "delete", "update", "patch"]
65 | ---
66 | kind: ClusterRoleBinding
67 | apiVersion: rbac.authorization.k8s.io/v1
68 | metadata:
69 | name: pp-plugins-scheduler
70 | subjects:
71 | - kind: User
72 | name: system:kube-scheduler
73 | namespace: {{ .Release.Namespace }}
74 | apiGroup: rbac.authorization.k8s.io
75 | roleRef:
76 | kind: ClusterRole
77 | name: system:pp-plugins-scheduler
78 | apiGroup: rbac.authorization.k8s.io
79 |
80 | ---
81 | apiVersion: rbac.authorization.k8s.io/v1
82 | kind: ClusterRoleBinding
83 | metadata:
84 | name: pp-plugins-scheduler:system:auth-delegator
85 | roleRef:
86 | apiGroup: rbac.authorization.k8s.io
87 | kind: ClusterRole
88 | name: system:auth-delegator
89 | subjects:
90 | - kind: ServiceAccount
91 | name: pp-plugins-scheduler
92 | namespace: {{ .Release.Namespace }}
93 | ---
94 | # To be able to to retrieve the PlacementPolicy objects, the following role has been added
95 | apiVersion: rbac.authorization.k8s.io/v1
96 | kind: RoleBinding
97 | metadata:
98 | name: pp-plugins-scheduler-as-kube-scheduler
99 | namespace: {{ .Release.Namespace }}
100 | subjects:
101 | - kind: ServiceAccount
102 | name: pp-plugins-scheduler
103 | namespace: {{ .Release.Namespace }}
104 | roleRef:
105 | kind: Role
106 | name: extension-apiserver-authentication-reader
107 | apiGroup: rbac.authorization.k8s.io
108 | ---
109 | apiVersion: rbac.authorization.k8s.io/v1
110 | kind: ClusterRoleBinding
111 | metadata:
112 | name: pp-plugins-scheduler-as-kube-scheduler
113 | subjects:
114 | - kind: ServiceAccount
115 | name: pp-plugins-scheduler
116 | namespace: {{ .Release.Namespace }}
117 | roleRef:
118 | kind: ClusterRole
119 | name: system:pp-plugins-scheduler
120 | apiGroup: rbac.authorization.k8s.io
121 | ---
122 | apiVersion: v1
123 | kind: ServiceAccount
124 | metadata:
125 | name: pp-plugins-scheduler
126 | namespace: {{ .Release.Namespace }}
127 |
--------------------------------------------------------------------------------
/manifest_staging/charts/placement-policy-scheduler-plugins/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for placement-policy-scheduler-plugins.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | image: ghcr.io/azure/placement-policy-scheduler-plugins/placement-policy:v0.1.0
6 | replicaCount: 1
7 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/clientset.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | package versioned
4 |
5 | import (
6 | "fmt"
7 |
8 | placementpolicyv1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned/typed/apis/v1alpha1"
9 | discovery "k8s.io/client-go/discovery"
10 | rest "k8s.io/client-go/rest"
11 | flowcontrol "k8s.io/client-go/util/flowcontrol"
12 | )
13 |
14 | type Interface interface {
15 | Discovery() discovery.DiscoveryInterface
16 | PlacementpolicyV1alpha1() placementpolicyv1alpha1.PlacementpolicyV1alpha1Interface
17 | }
18 |
19 | // Clientset contains the clients for groups. Each group has exactly one
20 | // version included in a Clientset.
21 | type Clientset struct {
22 | *discovery.DiscoveryClient
23 | placementpolicyV1alpha1 *placementpolicyv1alpha1.PlacementpolicyV1alpha1Client
24 | }
25 |
26 | // PlacementpolicyV1alpha1 retrieves the PlacementpolicyV1alpha1Client
27 | func (c *Clientset) PlacementpolicyV1alpha1() placementpolicyv1alpha1.PlacementpolicyV1alpha1Interface {
28 | return c.placementpolicyV1alpha1
29 | }
30 |
31 | // Discovery retrieves the DiscoveryClient
32 | func (c *Clientset) Discovery() discovery.DiscoveryInterface {
33 | if c == nil {
34 | return nil
35 | }
36 | return c.DiscoveryClient
37 | }
38 |
39 | // NewForConfig creates a new Clientset for the given config.
40 | // If config's RateLimiter is not set and QPS and Burst are acceptable,
41 | // NewForConfig will generate a rate-limiter in configShallowCopy.
42 | func NewForConfig(c *rest.Config) (*Clientset, error) {
43 | configShallowCopy := *c
44 | if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
45 | if configShallowCopy.Burst <= 0 {
46 | return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
47 | }
48 | configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
49 | }
50 | var cs Clientset
51 | var err error
52 | cs.placementpolicyV1alpha1, err = placementpolicyv1alpha1.NewForConfig(&configShallowCopy)
53 | if err != nil {
54 | return nil, err
55 | }
56 |
57 | cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
58 | if err != nil {
59 | return nil, err
60 | }
61 | return &cs, nil
62 | }
63 |
64 | // NewForConfigOrDie creates a new Clientset for the given config and
65 | // panics if there is an error in the config.
66 | func NewForConfigOrDie(c *rest.Config) *Clientset {
67 | var cs Clientset
68 | cs.placementpolicyV1alpha1 = placementpolicyv1alpha1.NewForConfigOrDie(c)
69 |
70 | cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
71 | return &cs
72 | }
73 |
74 | // New creates a new Clientset for the given RESTClient.
75 | func New(c rest.Interface) *Clientset {
76 | var cs Clientset
77 | cs.placementpolicyV1alpha1 = placementpolicyv1alpha1.New(c)
78 |
79 | cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
80 | return &cs
81 | }
82 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/doc.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | // This package has the automatically generated clientset.
4 | package versioned
5 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/fake/clientset_generated.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | package fake
4 |
5 | import (
6 | clientset "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned"
7 | placementpolicyv1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned/typed/apis/v1alpha1"
8 | fakeplacementpolicyv1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned/typed/apis/v1alpha1/fake"
9 | "k8s.io/apimachinery/pkg/runtime"
10 | "k8s.io/apimachinery/pkg/watch"
11 | "k8s.io/client-go/discovery"
12 | fakediscovery "k8s.io/client-go/discovery/fake"
13 | "k8s.io/client-go/testing"
14 | )
15 |
16 | // NewSimpleClientset returns a clientset that will respond with the provided objects.
17 | // It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
18 | // without applying any validations and/or defaults. It shouldn't be considered a replacement
19 | // for a real clientset and is mostly useful in simple unit tests.
20 | func NewSimpleClientset(objects ...runtime.Object) *Clientset {
21 | o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
22 | for _, obj := range objects {
23 | if err := o.Add(obj); err != nil {
24 | panic(err)
25 | }
26 | }
27 |
28 | cs := &Clientset{tracker: o}
29 | cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
30 | cs.AddReactor("*", "*", testing.ObjectReaction(o))
31 | cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
32 | gvr := action.GetResource()
33 | ns := action.GetNamespace()
34 | watch, err := o.Watch(gvr, ns)
35 | if err != nil {
36 | return false, nil, err
37 | }
38 | return true, watch, nil
39 | })
40 |
41 | return cs
42 | }
43 |
44 | // Clientset implements clientset.Interface. Meant to be embedded into a
45 | // struct to get a default implementation. This makes faking out just the method
46 | // you want to test easier.
47 | type Clientset struct {
48 | testing.Fake
49 | discovery *fakediscovery.FakeDiscovery
50 | tracker testing.ObjectTracker
51 | }
52 |
53 | func (c *Clientset) Discovery() discovery.DiscoveryInterface {
54 | return c.discovery
55 | }
56 |
57 | func (c *Clientset) Tracker() testing.ObjectTracker {
58 | return c.tracker
59 | }
60 |
61 | var (
62 | _ clientset.Interface = &Clientset{}
63 | _ testing.FakeClient = &Clientset{}
64 | )
65 |
66 | // PlacementpolicyV1alpha1 retrieves the PlacementpolicyV1alpha1Client
67 | func (c *Clientset) PlacementpolicyV1alpha1() placementpolicyv1alpha1.PlacementpolicyV1alpha1Interface {
68 | return &fakeplacementpolicyv1alpha1.FakePlacementpolicyV1alpha1{Fake: &c.Fake}
69 | }
70 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/fake/doc.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | // This package has the automatically generated fake clientset.
4 | package fake
5 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/fake/register.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | package fake
4 |
5 | import (
6 | placementpolicyv1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
7 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8 | runtime "k8s.io/apimachinery/pkg/runtime"
9 | schema "k8s.io/apimachinery/pkg/runtime/schema"
10 | serializer "k8s.io/apimachinery/pkg/runtime/serializer"
11 | utilruntime "k8s.io/apimachinery/pkg/util/runtime"
12 | )
13 |
14 | var scheme = runtime.NewScheme()
15 | var codecs = serializer.NewCodecFactory(scheme)
16 |
17 | var localSchemeBuilder = runtime.SchemeBuilder{
18 | placementpolicyv1alpha1.AddToScheme,
19 | }
20 |
21 | // AddToScheme adds all types of this clientset into the given scheme. This allows composition
22 | // of clientsets, like in:
23 | //
24 | // import (
25 | // "k8s.io/client-go/kubernetes"
26 | // clientsetscheme "k8s.io/client-go/kubernetes/scheme"
27 | // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
28 | // )
29 | //
30 | // kclientset, _ := kubernetes.NewForConfig(c)
31 | // _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
32 | //
33 | // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
34 | // correctly.
35 | var AddToScheme = localSchemeBuilder.AddToScheme
36 |
37 | func init() {
38 | v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
39 | utilruntime.Must(AddToScheme(scheme))
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/scheme/doc.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | // This package contains the scheme of the automatically generated clientset.
4 | package scheme
5 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/scheme/register.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | package scheme
4 |
5 | import (
6 | placementpolicyv1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
7 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8 | runtime "k8s.io/apimachinery/pkg/runtime"
9 | schema "k8s.io/apimachinery/pkg/runtime/schema"
10 | serializer "k8s.io/apimachinery/pkg/runtime/serializer"
11 | utilruntime "k8s.io/apimachinery/pkg/util/runtime"
12 | )
13 |
14 | var Scheme = runtime.NewScheme()
15 | var Codecs = serializer.NewCodecFactory(Scheme)
16 | var ParameterCodec = runtime.NewParameterCodec(Scheme)
17 | var localSchemeBuilder = runtime.SchemeBuilder{
18 | placementpolicyv1alpha1.AddToScheme,
19 | }
20 |
21 | // AddToScheme adds all types of this clientset into the given scheme. This allows composition
22 | // of clientsets, like in:
23 | //
24 | // import (
25 | // "k8s.io/client-go/kubernetes"
26 | // clientsetscheme "k8s.io/client-go/kubernetes/scheme"
27 | // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
28 | // )
29 | //
30 | // kclientset, _ := kubernetes.NewForConfig(c)
31 | // _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
32 | //
33 | // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
34 | // correctly.
35 | var AddToScheme = localSchemeBuilder.AddToScheme
36 |
37 | func init() {
38 | v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
39 | utilruntime.Must(AddToScheme(Scheme))
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/typed/apis/v1alpha1/apis_client.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | package v1alpha1
4 |
5 | import (
6 | v1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
7 | "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned/scheme"
8 | rest "k8s.io/client-go/rest"
9 | )
10 |
11 | type PlacementpolicyV1alpha1Interface interface {
12 | RESTClient() rest.Interface
13 | PlacementPoliciesGetter
14 | }
15 |
16 | // PlacementpolicyV1alpha1Client is used to interact with features provided by the placement-policy.scheduling.x-k8s.io group.
17 | type PlacementpolicyV1alpha1Client struct {
18 | restClient rest.Interface
19 | }
20 |
21 | func (c *PlacementpolicyV1alpha1Client) PlacementPolicies(namespace string) PlacementPolicyInterface {
22 | return newPlacementPolicies(c, namespace)
23 | }
24 |
25 | // NewForConfig creates a new PlacementpolicyV1alpha1Client for the given config.
26 | func NewForConfig(c *rest.Config) (*PlacementpolicyV1alpha1Client, error) {
27 | config := *c
28 | if err := setConfigDefaults(&config); err != nil {
29 | return nil, err
30 | }
31 | client, err := rest.RESTClientFor(&config)
32 | if err != nil {
33 | return nil, err
34 | }
35 | return &PlacementpolicyV1alpha1Client{client}, nil
36 | }
37 |
38 | // NewForConfigOrDie creates a new PlacementpolicyV1alpha1Client for the given config and
39 | // panics if there is an error in the config.
40 | func NewForConfigOrDie(c *rest.Config) *PlacementpolicyV1alpha1Client {
41 | client, err := NewForConfig(c)
42 | if err != nil {
43 | panic(err)
44 | }
45 | return client
46 | }
47 |
48 | // New creates a new PlacementpolicyV1alpha1Client for the given RESTClient.
49 | func New(c rest.Interface) *PlacementpolicyV1alpha1Client {
50 | return &PlacementpolicyV1alpha1Client{c}
51 | }
52 |
53 | func setConfigDefaults(config *rest.Config) error {
54 | gv := v1alpha1.SchemeGroupVersion
55 | config.GroupVersion = &gv
56 | config.APIPath = "/apis"
57 | config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
58 |
59 | if config.UserAgent == "" {
60 | config.UserAgent = rest.DefaultKubernetesUserAgent()
61 | }
62 |
63 | return nil
64 | }
65 |
66 | // RESTClient returns a RESTClient that is used to communicate
67 | // with API server by this client implementation.
68 | func (c *PlacementpolicyV1alpha1Client) RESTClient() rest.Interface {
69 | if c == nil {
70 | return nil
71 | }
72 | return c.restClient
73 | }
74 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/typed/apis/v1alpha1/doc.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | // This package has the automatically generated typed clients.
4 | package v1alpha1
5 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/typed/apis/v1alpha1/fake/doc.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | // Package fake has the automatically generated clients.
4 | package fake
5 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/typed/apis/v1alpha1/fake/fake_apis_client.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | package fake
4 |
5 | import (
6 | v1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned/typed/apis/v1alpha1"
7 | rest "k8s.io/client-go/rest"
8 | testing "k8s.io/client-go/testing"
9 | )
10 |
11 | type FakePlacementpolicyV1alpha1 struct {
12 | *testing.Fake
13 | }
14 |
15 | func (c *FakePlacementpolicyV1alpha1) PlacementPolicies(namespace string) v1alpha1.PlacementPolicyInterface {
16 | return &FakePlacementPolicies{c, namespace}
17 | }
18 |
19 | // RESTClient returns a RESTClient that is used to communicate
20 | // with API server by this client implementation.
21 | func (c *FakePlacementpolicyV1alpha1) RESTClient() rest.Interface {
22 | var ret *rest.RESTClient
23 | return ret
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/typed/apis/v1alpha1/fake/fake_placementpolicy.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | package fake
4 |
5 | import (
6 | "context"
7 |
8 | v1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
9 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | labels "k8s.io/apimachinery/pkg/labels"
11 | schema "k8s.io/apimachinery/pkg/runtime/schema"
12 | types "k8s.io/apimachinery/pkg/types"
13 | watch "k8s.io/apimachinery/pkg/watch"
14 | testing "k8s.io/client-go/testing"
15 | )
16 |
17 | // FakePlacementPolicies implements PlacementPolicyInterface
18 | type FakePlacementPolicies struct {
19 | Fake *FakePlacementpolicyV1alpha1
20 | ns string
21 | }
22 |
23 | var placementpoliciesResource = schema.GroupVersionResource{Group: "placement-policy.scheduling.x-k8s.io", Version: "v1alpha1", Resource: "placementpolicies"}
24 |
25 | var placementpoliciesKind = schema.GroupVersionKind{Group: "placement-policy.scheduling.x-k8s.io", Version: "v1alpha1", Kind: "PlacementPolicy"}
26 |
27 | // Get takes name of the placementPolicy, and returns the corresponding placementPolicy object, and an error if there is any.
28 | func (c *FakePlacementPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PlacementPolicy, err error) {
29 | obj, err := c.Fake.
30 | Invokes(testing.NewGetAction(placementpoliciesResource, c.ns, name), &v1alpha1.PlacementPolicy{})
31 |
32 | if obj == nil {
33 | return nil, err
34 | }
35 | return obj.(*v1alpha1.PlacementPolicy), err
36 | }
37 |
38 | // List takes label and field selectors, and returns the list of PlacementPolicies that match those selectors.
39 | func (c *FakePlacementPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PlacementPolicyList, err error) {
40 | obj, err := c.Fake.
41 | Invokes(testing.NewListAction(placementpoliciesResource, placementpoliciesKind, c.ns, opts), &v1alpha1.PlacementPolicyList{})
42 |
43 | if obj == nil {
44 | return nil, err
45 | }
46 |
47 | label, _, _ := testing.ExtractFromListOptions(opts)
48 | if label == nil {
49 | label = labels.Everything()
50 | }
51 | list := &v1alpha1.PlacementPolicyList{ListMeta: obj.(*v1alpha1.PlacementPolicyList).ListMeta}
52 | for _, item := range obj.(*v1alpha1.PlacementPolicyList).Items {
53 | if label.Matches(labels.Set(item.Labels)) {
54 | list.Items = append(list.Items, item)
55 | }
56 | }
57 | return list, err
58 | }
59 |
60 | // Watch returns a watch.Interface that watches the requested placementPolicies.
61 | func (c *FakePlacementPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
62 | return c.Fake.
63 | InvokesWatch(testing.NewWatchAction(placementpoliciesResource, c.ns, opts))
64 |
65 | }
66 |
67 | // Create takes the representation of a placementPolicy and creates it. Returns the server's representation of the placementPolicy, and an error, if there is any.
68 | func (c *FakePlacementPolicies) Create(ctx context.Context, placementPolicy *v1alpha1.PlacementPolicy, opts v1.CreateOptions) (result *v1alpha1.PlacementPolicy, err error) {
69 | obj, err := c.Fake.
70 | Invokes(testing.NewCreateAction(placementpoliciesResource, c.ns, placementPolicy), &v1alpha1.PlacementPolicy{})
71 |
72 | if obj == nil {
73 | return nil, err
74 | }
75 | return obj.(*v1alpha1.PlacementPolicy), err
76 | }
77 |
78 | // Update takes the representation of a placementPolicy and updates it. Returns the server's representation of the placementPolicy, and an error, if there is any.
79 | func (c *FakePlacementPolicies) Update(ctx context.Context, placementPolicy *v1alpha1.PlacementPolicy, opts v1.UpdateOptions) (result *v1alpha1.PlacementPolicy, err error) {
80 | obj, err := c.Fake.
81 | Invokes(testing.NewUpdateAction(placementpoliciesResource, c.ns, placementPolicy), &v1alpha1.PlacementPolicy{})
82 |
83 | if obj == nil {
84 | return nil, err
85 | }
86 | return obj.(*v1alpha1.PlacementPolicy), err
87 | }
88 |
89 | // UpdateStatus was generated because the type contains a Status member.
90 | // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
91 | func (c *FakePlacementPolicies) UpdateStatus(ctx context.Context, placementPolicy *v1alpha1.PlacementPolicy, opts v1.UpdateOptions) (*v1alpha1.PlacementPolicy, error) {
92 | obj, err := c.Fake.
93 | Invokes(testing.NewUpdateSubresourceAction(placementpoliciesResource, "status", c.ns, placementPolicy), &v1alpha1.PlacementPolicy{})
94 |
95 | if obj == nil {
96 | return nil, err
97 | }
98 | return obj.(*v1alpha1.PlacementPolicy), err
99 | }
100 |
101 | // Delete takes name of the placementPolicy and deletes it. Returns an error if one occurs.
102 | func (c *FakePlacementPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
103 | _, err := c.Fake.
104 | Invokes(testing.NewDeleteAction(placementpoliciesResource, c.ns, name), &v1alpha1.PlacementPolicy{})
105 |
106 | return err
107 | }
108 |
109 | // DeleteCollection deletes a collection of objects.
110 | func (c *FakePlacementPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
111 | action := testing.NewDeleteCollectionAction(placementpoliciesResource, c.ns, listOpts)
112 |
113 | _, err := c.Fake.Invokes(action, &v1alpha1.PlacementPolicyList{})
114 | return err
115 | }
116 |
117 | // Patch applies the patch and returns the patched placementPolicy.
118 | func (c *FakePlacementPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PlacementPolicy, err error) {
119 | obj, err := c.Fake.
120 | Invokes(testing.NewPatchSubresourceAction(placementpoliciesResource, c.ns, name, pt, data, subresources...), &v1alpha1.PlacementPolicy{})
121 |
122 | if obj == nil {
123 | return nil, err
124 | }
125 | return obj.(*v1alpha1.PlacementPolicy), err
126 | }
127 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/typed/apis/v1alpha1/generated_expansion.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | package v1alpha1
4 |
5 | type PlacementPolicyExpansion interface{}
6 |
--------------------------------------------------------------------------------
/pkg/client/clientset/versioned/typed/apis/v1alpha1/placementpolicy.go:
--------------------------------------------------------------------------------
1 | // Code generated by client-gen. DO NOT EDIT.
2 |
3 | package v1alpha1
4 |
5 | import (
6 | "context"
7 | "time"
8 |
9 | v1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
10 | scheme "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned/scheme"
11 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | types "k8s.io/apimachinery/pkg/types"
13 | watch "k8s.io/apimachinery/pkg/watch"
14 | rest "k8s.io/client-go/rest"
15 | )
16 |
17 | // PlacementPoliciesGetter has a method to return a PlacementPolicyInterface.
18 | // A group's client should implement this interface.
19 | type PlacementPoliciesGetter interface {
20 | PlacementPolicies(namespace string) PlacementPolicyInterface
21 | }
22 |
23 | // PlacementPolicyInterface has methods to work with PlacementPolicy resources.
24 | type PlacementPolicyInterface interface {
25 | Create(ctx context.Context, placementPolicy *v1alpha1.PlacementPolicy, opts v1.CreateOptions) (*v1alpha1.PlacementPolicy, error)
26 | Update(ctx context.Context, placementPolicy *v1alpha1.PlacementPolicy, opts v1.UpdateOptions) (*v1alpha1.PlacementPolicy, error)
27 | UpdateStatus(ctx context.Context, placementPolicy *v1alpha1.PlacementPolicy, opts v1.UpdateOptions) (*v1alpha1.PlacementPolicy, error)
28 | Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
29 | DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
30 | Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PlacementPolicy, error)
31 | List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PlacementPolicyList, error)
32 | Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
33 | Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PlacementPolicy, err error)
34 | PlacementPolicyExpansion
35 | }
36 |
37 | // placementPolicies implements PlacementPolicyInterface
38 | type placementPolicies struct {
39 | client rest.Interface
40 | ns string
41 | }
42 |
43 | // newPlacementPolicies returns a PlacementPolicies
44 | func newPlacementPolicies(c *PlacementpolicyV1alpha1Client, namespace string) *placementPolicies {
45 | return &placementPolicies{
46 | client: c.RESTClient(),
47 | ns: namespace,
48 | }
49 | }
50 |
51 | // Get takes name of the placementPolicy, and returns the corresponding placementPolicy object, and an error if there is any.
52 | func (c *placementPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PlacementPolicy, err error) {
53 | result = &v1alpha1.PlacementPolicy{}
54 | err = c.client.Get().
55 | Namespace(c.ns).
56 | Resource("placementpolicies").
57 | Name(name).
58 | VersionedParams(&options, scheme.ParameterCodec).
59 | Do(ctx).
60 | Into(result)
61 | return
62 | }
63 |
64 | // List takes label and field selectors, and returns the list of PlacementPolicies that match those selectors.
65 | func (c *placementPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PlacementPolicyList, err error) {
66 | var timeout time.Duration
67 | if opts.TimeoutSeconds != nil {
68 | timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
69 | }
70 | result = &v1alpha1.PlacementPolicyList{}
71 | err = c.client.Get().
72 | Namespace(c.ns).
73 | Resource("placementpolicies").
74 | VersionedParams(&opts, scheme.ParameterCodec).
75 | Timeout(timeout).
76 | Do(ctx).
77 | Into(result)
78 | return
79 | }
80 |
81 | // Watch returns a watch.Interface that watches the requested placementPolicies.
82 | func (c *placementPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
83 | var timeout time.Duration
84 | if opts.TimeoutSeconds != nil {
85 | timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
86 | }
87 | opts.Watch = true
88 | return c.client.Get().
89 | Namespace(c.ns).
90 | Resource("placementpolicies").
91 | VersionedParams(&opts, scheme.ParameterCodec).
92 | Timeout(timeout).
93 | Watch(ctx)
94 | }
95 |
96 | // Create takes the representation of a placementPolicy and creates it. Returns the server's representation of the placementPolicy, and an error, if there is any.
97 | func (c *placementPolicies) Create(ctx context.Context, placementPolicy *v1alpha1.PlacementPolicy, opts v1.CreateOptions) (result *v1alpha1.PlacementPolicy, err error) {
98 | result = &v1alpha1.PlacementPolicy{}
99 | err = c.client.Post().
100 | Namespace(c.ns).
101 | Resource("placementpolicies").
102 | VersionedParams(&opts, scheme.ParameterCodec).
103 | Body(placementPolicy).
104 | Do(ctx).
105 | Into(result)
106 | return
107 | }
108 |
109 | // Update takes the representation of a placementPolicy and updates it. Returns the server's representation of the placementPolicy, and an error, if there is any.
110 | func (c *placementPolicies) Update(ctx context.Context, placementPolicy *v1alpha1.PlacementPolicy, opts v1.UpdateOptions) (result *v1alpha1.PlacementPolicy, err error) {
111 | result = &v1alpha1.PlacementPolicy{}
112 | err = c.client.Put().
113 | Namespace(c.ns).
114 | Resource("placementpolicies").
115 | Name(placementPolicy.Name).
116 | VersionedParams(&opts, scheme.ParameterCodec).
117 | Body(placementPolicy).
118 | Do(ctx).
119 | Into(result)
120 | return
121 | }
122 |
123 | // UpdateStatus was generated because the type contains a Status member.
124 | // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
125 | func (c *placementPolicies) UpdateStatus(ctx context.Context, placementPolicy *v1alpha1.PlacementPolicy, opts v1.UpdateOptions) (result *v1alpha1.PlacementPolicy, err error) {
126 | result = &v1alpha1.PlacementPolicy{}
127 | err = c.client.Put().
128 | Namespace(c.ns).
129 | Resource("placementpolicies").
130 | Name(placementPolicy.Name).
131 | SubResource("status").
132 | VersionedParams(&opts, scheme.ParameterCodec).
133 | Body(placementPolicy).
134 | Do(ctx).
135 | Into(result)
136 | return
137 | }
138 |
139 | // Delete takes name of the placementPolicy and deletes it. Returns an error if one occurs.
140 | func (c *placementPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
141 | return c.client.Delete().
142 | Namespace(c.ns).
143 | Resource("placementpolicies").
144 | Name(name).
145 | Body(&opts).
146 | Do(ctx).
147 | Error()
148 | }
149 |
150 | // DeleteCollection deletes a collection of objects.
151 | func (c *placementPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
152 | var timeout time.Duration
153 | if listOpts.TimeoutSeconds != nil {
154 | timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
155 | }
156 | return c.client.Delete().
157 | Namespace(c.ns).
158 | Resource("placementpolicies").
159 | VersionedParams(&listOpts, scheme.ParameterCodec).
160 | Timeout(timeout).
161 | Body(&opts).
162 | Do(ctx).
163 | Error()
164 | }
165 |
166 | // Patch applies the patch and returns the patched placementPolicy.
167 | func (c *placementPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PlacementPolicy, err error) {
168 | result = &v1alpha1.PlacementPolicy{}
169 | err = c.client.Patch(pt).
170 | Namespace(c.ns).
171 | Resource("placementpolicies").
172 | Name(name).
173 | SubResource(subresources...).
174 | VersionedParams(&opts, scheme.ParameterCodec).
175 | Body(data).
176 | Do(ctx).
177 | Into(result)
178 | return
179 | }
180 |
--------------------------------------------------------------------------------
/pkg/client/informers/externalversions/apis/interface.go:
--------------------------------------------------------------------------------
1 | // Code generated by informer-gen. DO NOT EDIT.
2 |
3 | package apis
4 |
5 | import (
6 | v1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/informers/externalversions/apis/v1alpha1"
7 | internalinterfaces "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/informers/externalversions/internalinterfaces"
8 | )
9 |
10 | // Interface provides access to each of this group's versions.
11 | type Interface interface {
12 | // V1alpha1 provides access to shared informers for resources in V1alpha1.
13 | V1alpha1() v1alpha1.Interface
14 | }
15 |
16 | type group struct {
17 | factory internalinterfaces.SharedInformerFactory
18 | namespace string
19 | tweakListOptions internalinterfaces.TweakListOptionsFunc
20 | }
21 |
22 | // New returns a new Interface.
23 | func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
24 | return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
25 | }
26 |
27 | // V1alpha1 returns a new v1alpha1.Interface.
28 | func (g *group) V1alpha1() v1alpha1.Interface {
29 | return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
30 | }
31 |
--------------------------------------------------------------------------------
/pkg/client/informers/externalversions/apis/v1alpha1/interface.go:
--------------------------------------------------------------------------------
1 | // Code generated by informer-gen. DO NOT EDIT.
2 |
3 | package v1alpha1
4 |
5 | import (
6 | internalinterfaces "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/informers/externalversions/internalinterfaces"
7 | )
8 |
9 | // Interface provides access to all the informers in this group version.
10 | type Interface interface {
11 | // PlacementPolicies returns a PlacementPolicyInformer.
12 | PlacementPolicies() PlacementPolicyInformer
13 | }
14 |
15 | type version struct {
16 | factory internalinterfaces.SharedInformerFactory
17 | namespace string
18 | tweakListOptions internalinterfaces.TweakListOptionsFunc
19 | }
20 |
21 | // New returns a new Interface.
22 | func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
23 | return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
24 | }
25 |
26 | // PlacementPolicies returns a PlacementPolicyInformer.
27 | func (v *version) PlacementPolicies() PlacementPolicyInformer {
28 | return &placementPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/client/informers/externalversions/apis/v1alpha1/placementpolicy.go:
--------------------------------------------------------------------------------
1 | // Code generated by informer-gen. DO NOT EDIT.
2 |
3 | package v1alpha1
4 |
5 | import (
6 | "context"
7 | time "time"
8 |
9 | apisv1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
10 | versioned "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned"
11 | internalinterfaces "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/informers/externalversions/internalinterfaces"
12 | v1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/listers/apis/v1alpha1"
13 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
14 | runtime "k8s.io/apimachinery/pkg/runtime"
15 | watch "k8s.io/apimachinery/pkg/watch"
16 | cache "k8s.io/client-go/tools/cache"
17 | )
18 |
19 | // PlacementPolicyInformer provides access to a shared informer and lister for
20 | // PlacementPolicies.
21 | type PlacementPolicyInformer interface {
22 | Informer() cache.SharedIndexInformer
23 | Lister() v1alpha1.PlacementPolicyLister
24 | }
25 |
26 | type placementPolicyInformer struct {
27 | factory internalinterfaces.SharedInformerFactory
28 | tweakListOptions internalinterfaces.TweakListOptionsFunc
29 | namespace string
30 | }
31 |
32 | // NewPlacementPolicyInformer constructs a new informer for PlacementPolicy type.
33 | // Always prefer using an informer factory to get a shared informer instead of getting an independent
34 | // one. This reduces memory footprint and number of connections to the server.
35 | func NewPlacementPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
36 | return NewFilteredPlacementPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
37 | }
38 |
39 | // NewFilteredPlacementPolicyInformer constructs a new informer for PlacementPolicy type.
40 | // Always prefer using an informer factory to get a shared informer instead of getting an independent
41 | // one. This reduces memory footprint and number of connections to the server.
42 | func NewFilteredPlacementPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
43 | return cache.NewSharedIndexInformer(
44 | &cache.ListWatch{
45 | ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
46 | if tweakListOptions != nil {
47 | tweakListOptions(&options)
48 | }
49 | return client.PlacementpolicyV1alpha1().PlacementPolicies(namespace).List(context.TODO(), options)
50 | },
51 | WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
52 | if tweakListOptions != nil {
53 | tweakListOptions(&options)
54 | }
55 | return client.PlacementpolicyV1alpha1().PlacementPolicies(namespace).Watch(context.TODO(), options)
56 | },
57 | },
58 | &apisv1alpha1.PlacementPolicy{},
59 | resyncPeriod,
60 | indexers,
61 | )
62 | }
63 |
64 | func (f *placementPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
65 | return NewFilteredPlacementPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
66 | }
67 |
68 | func (f *placementPolicyInformer) Informer() cache.SharedIndexInformer {
69 | return f.factory.InformerFor(&apisv1alpha1.PlacementPolicy{}, f.defaultInformer)
70 | }
71 |
72 | func (f *placementPolicyInformer) Lister() v1alpha1.PlacementPolicyLister {
73 | return v1alpha1.NewPlacementPolicyLister(f.Informer().GetIndexer())
74 | }
75 |
--------------------------------------------------------------------------------
/pkg/client/informers/externalversions/factory.go:
--------------------------------------------------------------------------------
1 | // Code generated by informer-gen. DO NOT EDIT.
2 |
3 | package externalversions
4 |
5 | import (
6 | reflect "reflect"
7 | sync "sync"
8 | time "time"
9 |
10 | versioned "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned"
11 | apis "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/informers/externalversions/apis"
12 | internalinterfaces "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/informers/externalversions/internalinterfaces"
13 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
14 | runtime "k8s.io/apimachinery/pkg/runtime"
15 | schema "k8s.io/apimachinery/pkg/runtime/schema"
16 | cache "k8s.io/client-go/tools/cache"
17 | )
18 |
19 | // SharedInformerOption defines the functional option type for SharedInformerFactory.
20 | type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
21 |
22 | type sharedInformerFactory struct {
23 | client versioned.Interface
24 | namespace string
25 | tweakListOptions internalinterfaces.TweakListOptionsFunc
26 | lock sync.Mutex
27 | defaultResync time.Duration
28 | customResync map[reflect.Type]time.Duration
29 |
30 | informers map[reflect.Type]cache.SharedIndexInformer
31 | // startedInformers is used for tracking which informers have been started.
32 | // This allows Start() to be called multiple times safely.
33 | startedInformers map[reflect.Type]bool
34 | }
35 |
36 | // WithCustomResyncConfig sets a custom resync period for the specified informer types.
37 | func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
38 | return func(factory *sharedInformerFactory) *sharedInformerFactory {
39 | for k, v := range resyncConfig {
40 | factory.customResync[reflect.TypeOf(k)] = v
41 | }
42 | return factory
43 | }
44 | }
45 |
46 | // WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
47 | func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
48 | return func(factory *sharedInformerFactory) *sharedInformerFactory {
49 | factory.tweakListOptions = tweakListOptions
50 | return factory
51 | }
52 | }
53 |
54 | // WithNamespace limits the SharedInformerFactory to the specified namespace.
55 | func WithNamespace(namespace string) SharedInformerOption {
56 | return func(factory *sharedInformerFactory) *sharedInformerFactory {
57 | factory.namespace = namespace
58 | return factory
59 | }
60 | }
61 |
62 | // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
63 | func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
64 | return NewSharedInformerFactoryWithOptions(client, defaultResync)
65 | }
66 |
67 | // NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
68 | // Listers obtained via this SharedInformerFactory will be subject to the same filters
69 | // as specified here.
70 | // Deprecated: Please use NewSharedInformerFactoryWithOptions instead
71 | func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
72 | return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
73 | }
74 |
75 | // NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
76 | func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
77 | factory := &sharedInformerFactory{
78 | client: client,
79 | namespace: v1.NamespaceAll,
80 | defaultResync: defaultResync,
81 | informers: make(map[reflect.Type]cache.SharedIndexInformer),
82 | startedInformers: make(map[reflect.Type]bool),
83 | customResync: make(map[reflect.Type]time.Duration),
84 | }
85 |
86 | // Apply all options
87 | for _, opt := range options {
88 | factory = opt(factory)
89 | }
90 |
91 | return factory
92 | }
93 |
94 | // Start initializes all requested informers.
95 | func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
96 | f.lock.Lock()
97 | defer f.lock.Unlock()
98 |
99 | for informerType, informer := range f.informers {
100 | if !f.startedInformers[informerType] {
101 | go informer.Run(stopCh)
102 | f.startedInformers[informerType] = true
103 | }
104 | }
105 | }
106 |
107 | // WaitForCacheSync waits for all started informers' cache were synced.
108 | func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
109 | informers := func() map[reflect.Type]cache.SharedIndexInformer {
110 | f.lock.Lock()
111 | defer f.lock.Unlock()
112 |
113 | informers := map[reflect.Type]cache.SharedIndexInformer{}
114 | for informerType, informer := range f.informers {
115 | if f.startedInformers[informerType] {
116 | informers[informerType] = informer
117 | }
118 | }
119 | return informers
120 | }()
121 |
122 | res := map[reflect.Type]bool{}
123 | for informType, informer := range informers {
124 | res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
125 | }
126 | return res
127 | }
128 |
129 | // InternalInformerFor returns the SharedIndexInformer for obj using an internal
130 | // client.
131 | func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
132 | f.lock.Lock()
133 | defer f.lock.Unlock()
134 |
135 | informerType := reflect.TypeOf(obj)
136 | informer, exists := f.informers[informerType]
137 | if exists {
138 | return informer
139 | }
140 |
141 | resyncPeriod, exists := f.customResync[informerType]
142 | if !exists {
143 | resyncPeriod = f.defaultResync
144 | }
145 |
146 | informer = newFunc(f.client, resyncPeriod)
147 | f.informers[informerType] = informer
148 |
149 | return informer
150 | }
151 |
152 | // SharedInformerFactory provides shared informers for resources in all known
153 | // API group versions.
154 | type SharedInformerFactory interface {
155 | internalinterfaces.SharedInformerFactory
156 | ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
157 | WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
158 |
159 | Placementpolicy() apis.Interface
160 | }
161 |
162 | func (f *sharedInformerFactory) Placementpolicy() apis.Interface {
163 | return apis.New(f, f.namespace, f.tweakListOptions)
164 | }
165 |
--------------------------------------------------------------------------------
/pkg/client/informers/externalversions/generic.go:
--------------------------------------------------------------------------------
1 | // Code generated by informer-gen. DO NOT EDIT.
2 |
3 | package externalversions
4 |
5 | import (
6 | "fmt"
7 |
8 | v1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
9 | schema "k8s.io/apimachinery/pkg/runtime/schema"
10 | cache "k8s.io/client-go/tools/cache"
11 | )
12 |
13 | // GenericInformer is type of SharedIndexInformer which will locate and delegate to other
14 | // sharedInformers based on type
15 | type GenericInformer interface {
16 | Informer() cache.SharedIndexInformer
17 | Lister() cache.GenericLister
18 | }
19 |
20 | type genericInformer struct {
21 | informer cache.SharedIndexInformer
22 | resource schema.GroupResource
23 | }
24 |
25 | // Informer returns the SharedIndexInformer.
26 | func (f *genericInformer) Informer() cache.SharedIndexInformer {
27 | return f.informer
28 | }
29 |
30 | // Lister returns the GenericLister.
31 | func (f *genericInformer) Lister() cache.GenericLister {
32 | return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
33 | }
34 |
35 | // ForResource gives generic access to a shared informer of the matching type
36 | // TODO extend this to unknown resources with a client pool
37 | func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
38 | switch resource {
39 | // Group=placement-policy.scheduling.x-k8s.io, Version=v1alpha1
40 | case v1alpha1.SchemeGroupVersion.WithResource("placementpolicies"):
41 | return &genericInformer{resource: resource.GroupResource(), informer: f.Placementpolicy().V1alpha1().PlacementPolicies().Informer()}, nil
42 |
43 | }
44 |
45 | return nil, fmt.Errorf("no informer found for %v", resource)
46 | }
47 |
--------------------------------------------------------------------------------
/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go:
--------------------------------------------------------------------------------
1 | // Code generated by informer-gen. DO NOT EDIT.
2 |
3 | package internalinterfaces
4 |
5 | import (
6 | time "time"
7 |
8 | versioned "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned"
9 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | runtime "k8s.io/apimachinery/pkg/runtime"
11 | cache "k8s.io/client-go/tools/cache"
12 | )
13 |
14 | // NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
15 | type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
16 |
17 | // SharedInformerFactory a small interface to allow for adding an informer without an import cycle
18 | type SharedInformerFactory interface {
19 | Start(stopCh <-chan struct{})
20 | InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
21 | }
22 |
23 | // TweakListOptionsFunc is a function that transforms a v1.ListOptions.
24 | type TweakListOptionsFunc func(*v1.ListOptions)
25 |
--------------------------------------------------------------------------------
/pkg/client/listers/apis/v1alpha1/expansion_generated.go:
--------------------------------------------------------------------------------
1 | // Code generated by lister-gen. DO NOT EDIT.
2 |
3 | package v1alpha1
4 |
5 | // PlacementPolicyListerExpansion allows custom methods to be added to
6 | // PlacementPolicyLister.
7 | type PlacementPolicyListerExpansion interface{}
8 |
9 | // PlacementPolicyNamespaceListerExpansion allows custom methods to be added to
10 | // PlacementPolicyNamespaceLister.
11 | type PlacementPolicyNamespaceListerExpansion interface{}
12 |
--------------------------------------------------------------------------------
/pkg/client/listers/apis/v1alpha1/placementpolicy.go:
--------------------------------------------------------------------------------
1 | // Code generated by lister-gen. DO NOT EDIT.
2 |
3 | package v1alpha1
4 |
5 | import (
6 | v1alpha1 "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
7 | "k8s.io/apimachinery/pkg/api/errors"
8 | "k8s.io/apimachinery/pkg/labels"
9 | "k8s.io/client-go/tools/cache"
10 | )
11 |
12 | // PlacementPolicyLister helps list PlacementPolicies.
13 | // All objects returned here must be treated as read-only.
14 | type PlacementPolicyLister interface {
15 | // List lists all PlacementPolicies in the indexer.
16 | // Objects returned here must be treated as read-only.
17 | List(selector labels.Selector) (ret []*v1alpha1.PlacementPolicy, err error)
18 | // PlacementPolicies returns an object that can list and get PlacementPolicies.
19 | PlacementPolicies(namespace string) PlacementPolicyNamespaceLister
20 | PlacementPolicyListerExpansion
21 | }
22 |
23 | // placementPolicyLister implements the PlacementPolicyLister interface.
24 | type placementPolicyLister struct {
25 | indexer cache.Indexer
26 | }
27 |
28 | // NewPlacementPolicyLister returns a new PlacementPolicyLister.
29 | func NewPlacementPolicyLister(indexer cache.Indexer) PlacementPolicyLister {
30 | return &placementPolicyLister{indexer: indexer}
31 | }
32 |
33 | // List lists all PlacementPolicies in the indexer.
34 | func (s *placementPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.PlacementPolicy, err error) {
35 | err = cache.ListAll(s.indexer, selector, func(m interface{}) {
36 | ret = append(ret, m.(*v1alpha1.PlacementPolicy))
37 | })
38 | return ret, err
39 | }
40 |
41 | // PlacementPolicies returns an object that can list and get PlacementPolicies.
42 | func (s *placementPolicyLister) PlacementPolicies(namespace string) PlacementPolicyNamespaceLister {
43 | return placementPolicyNamespaceLister{indexer: s.indexer, namespace: namespace}
44 | }
45 |
46 | // PlacementPolicyNamespaceLister helps list and get PlacementPolicies.
47 | // All objects returned here must be treated as read-only.
48 | type PlacementPolicyNamespaceLister interface {
49 | // List lists all PlacementPolicies in the indexer for a given namespace.
50 | // Objects returned here must be treated as read-only.
51 | List(selector labels.Selector) (ret []*v1alpha1.PlacementPolicy, err error)
52 | // Get retrieves the PlacementPolicy from the indexer for a given namespace and name.
53 | // Objects returned here must be treated as read-only.
54 | Get(name string) (*v1alpha1.PlacementPolicy, error)
55 | PlacementPolicyNamespaceListerExpansion
56 | }
57 |
58 | // placementPolicyNamespaceLister implements the PlacementPolicyNamespaceLister
59 | // interface.
60 | type placementPolicyNamespaceLister struct {
61 | indexer cache.Indexer
62 | namespace string
63 | }
64 |
65 | // List lists all PlacementPolicies in the indexer for a given namespace.
66 | func (s placementPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PlacementPolicy, err error) {
67 | err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
68 | ret = append(ret, m.(*v1alpha1.PlacementPolicy))
69 | })
70 | return ret, err
71 | }
72 |
73 | // Get retrieves the PlacementPolicy from the indexer for a given namespace and name.
74 | func (s placementPolicyNamespaceLister) Get(name string) (*v1alpha1.PlacementPolicy, error) {
75 | obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
76 | if err != nil {
77 | return nil, err
78 | }
79 | if !exists {
80 | return nil, errors.NewNotFound(v1alpha1.Resource("placementpolicy"), name)
81 | }
82 | return obj.(*v1alpha1.PlacementPolicy), nil
83 | }
84 |
--------------------------------------------------------------------------------
/pkg/plugins/placementpolicy/core/core.go:
--------------------------------------------------------------------------------
1 | package core
2 |
3 | import (
4 | "context"
5 | "sort"
6 |
7 | "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
8 | ppclientset "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/clientset/versioned"
9 | ppinformers "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/informers/externalversions/apis/v1alpha1"
10 | pplisters "github.com/Azure/placement-policy-scheduler-plugins/pkg/client/listers/apis/v1alpha1"
11 | "github.com/Azure/placement-policy-scheduler-plugins/pkg/utils"
12 |
13 | corev1 "k8s.io/api/core/v1"
14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
15 | "k8s.io/apimachinery/pkg/labels"
16 | "k8s.io/client-go/kubernetes"
17 | corelisters "k8s.io/client-go/listers/core/v1"
18 | "k8s.io/kubernetes/pkg/scheduler/framework"
19 | )
20 |
21 | // Manager defines the interfaces for PlacementPolicy management.
22 | type Manager interface {
23 | GetPlacementPolicyForPod(context.Context, *corev1.Pod) (*v1alpha1.PlacementPolicy, error)
24 | GetPodsWithLabels(context.Context, map[string]string) ([]*corev1.Pod, error)
25 | AnnotatePod(context.Context, *corev1.Pod, *v1alpha1.PlacementPolicy, bool) (*corev1.Pod, error)
26 | GetPlacementPolicy(context.Context, string, string) (*v1alpha1.PlacementPolicy, error)
27 | }
28 |
29 | type PlacementPolicyManager struct {
30 | // client is a clientset for the kube API server.
31 | client kubernetes.Interface
32 | // client is a placementPolicy client
33 | ppClient ppclientset.Interface
34 | // podLister is pod lister
35 | podLister corelisters.PodLister
36 | // snapshotSharedLister is pod shared list
37 | snapshotSharedLister framework.SharedLister
38 | // ppLister is placementPolicy lister
39 | ppLister pplisters.PlacementPolicyLister
40 | }
41 |
42 | func NewPlacementPolicyManager(
43 | client kubernetes.Interface,
44 | ppClient ppclientset.Interface,
45 | snapshotSharedLister framework.SharedLister,
46 | ppInformer ppinformers.PlacementPolicyInformer,
47 | podLister corelisters.PodLister) *PlacementPolicyManager {
48 | return &PlacementPolicyManager{
49 | client: client,
50 | ppClient: ppClient,
51 | snapshotSharedLister: snapshotSharedLister,
52 | ppLister: ppInformer.Lister(),
53 | podLister: podLister,
54 | }
55 | }
56 |
57 | // GetPlacementPolicyForPod returns the placement policy for the given pod
58 | func (m *PlacementPolicyManager) GetPlacementPolicyForPod(ctx context.Context, pod *corev1.Pod) (*v1alpha1.PlacementPolicy, error) {
59 | ppList, err := m.ppLister.PlacementPolicies(pod.Namespace).List(labels.Everything())
60 | if err != nil {
61 | return nil, err
62 | }
63 | // filter the placement policy list based on the pod's labels
64 | ppList = m.filterPlacementPolicyList(ppList, pod)
65 | if len(ppList) == 0 {
66 | return nil, nil
67 | }
68 | if len(ppList) > 1 {
69 | // if there are multiple placement policies, sort them by weight and return the first one
70 | sort.Sort(sort.Reverse(ByWeight(ppList)))
71 | }
72 |
73 | return ppList[0], nil
74 | }
75 |
76 | func (m *PlacementPolicyManager) GetPodsWithLabels(ctx context.Context, podLabels map[string]string) ([]*corev1.Pod, error) {
77 | return m.podLister.List(labels.Set(podLabels).AsSelector())
78 | }
79 |
80 | // AnnotatePod annotates the pod with the placement policy.
81 | func (m *PlacementPolicyManager) AnnotatePod(ctx context.Context, pod *corev1.Pod, pp *v1alpha1.PlacementPolicy, preferredNodeWithMatchingLabels bool) (*corev1.Pod, error) {
82 | annotations := map[string]string{}
83 | if pod.Annotations != nil {
84 | annotations = pod.Annotations
85 | }
86 |
87 | preference := "false"
88 | if preferredNodeWithMatchingLabels {
89 | preference = "true"
90 | }
91 | annotations[v1alpha1.PlacementPolicyAnnotationKey] = pp.Name
92 | annotations[v1alpha1.PlacementPolicyPreferenceAnnotationKey] = preference
93 | pod.Annotations = annotations
94 | return m.client.CoreV1().Pods(pod.Namespace).Update(ctx, pod, metav1.UpdateOptions{})
95 | }
96 |
97 | func (m *PlacementPolicyManager) GetPlacementPolicy(ctx context.Context, namespace, name string) (*v1alpha1.PlacementPolicy, error) {
98 | return m.ppLister.PlacementPolicies(namespace).Get(name)
99 | }
100 |
101 | func (m *PlacementPolicyManager) filterPlacementPolicyList(ppList []*v1alpha1.PlacementPolicy, pod *corev1.Pod) []*v1alpha1.PlacementPolicy {
102 | var filteredPPList []*v1alpha1.PlacementPolicy
103 | for _, pp := range ppList {
104 | labels := pp.Spec.PodSelector.MatchLabels
105 | if utils.HasMatchingLabels(pod.Labels, labels) {
106 | filteredPPList = append(filteredPPList, pp)
107 | }
108 | }
109 | return filteredPPList
110 | }
111 |
--------------------------------------------------------------------------------
/pkg/plugins/placementpolicy/core/sort.go:
--------------------------------------------------------------------------------
1 | package core
2 |
3 | import "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
4 |
5 | type ByWeight []*v1alpha1.PlacementPolicy
6 |
7 | func (a ByWeight) Len() int { return len(a) }
8 |
9 | func (a ByWeight) Swap(i, j int) {
10 | a[i], a[j] = a[j], a[i]
11 | }
12 |
13 | func (a ByWeight) Less(i, j int) bool {
14 | return a[i].Spec.Weight > a[j].Spec.Weight
15 | }
16 |
--------------------------------------------------------------------------------
/pkg/plugins/placementpolicy/placementpolicy_test.go:
--------------------------------------------------------------------------------
1 | package placementpolicy
2 |
3 | import (
4 | "reflect"
5 | "testing"
6 |
7 | "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
8 | corev1 "k8s.io/api/core/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/apimachinery/pkg/types"
11 | "k8s.io/kubernetes/pkg/scheduler/framework"
12 | )
13 |
14 | func TestGroupNodesWithLabels(t *testing.T) {
15 | tests := []struct {
16 | name string
17 | nodeList []*corev1.Node
18 | labels map[string]string
19 | want func() map[string]*framework.NodeInfo
20 | }{
21 | {
22 | name: "no nodes",
23 | nodeList: []*corev1.Node{},
24 | labels: map[string]string{"foo": "bar"},
25 | want: func() map[string]*framework.NodeInfo { return map[string]*framework.NodeInfo{} },
26 | },
27 | {
28 | name: "no matching nodes",
29 | nodeList: []*corev1.Node{
30 | {ObjectMeta: metav1.ObjectMeta{Name: "node1"}},
31 | {ObjectMeta: metav1.ObjectMeta{Name: "node2"}},
32 | },
33 | labels: map[string]string{"foo": "bar"},
34 | want: func() map[string]*framework.NodeInfo { return map[string]*framework.NodeInfo{} },
35 | },
36 | {
37 | name: "matching nodes found",
38 | nodeList: []*corev1.Node{
39 | {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"foo": "bar"}}},
40 | {ObjectMeta: metav1.ObjectMeta{Name: "node2"}},
41 | {ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"foo": "bar", "baz": "qux"}}},
42 | },
43 | labels: map[string]string{"foo": "bar"},
44 | want: func() map[string]*framework.NodeInfo {
45 | n1 := framework.NewNodeInfo()
46 | n1.SetNode(&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"foo": "bar"}}})
47 |
48 | n3 := framework.NewNodeInfo()
49 | n3.SetNode(&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node3", Labels: map[string]string{"foo": "bar", "baz": "qux"}}})
50 |
51 | return map[string]*framework.NodeInfo{
52 | "node1": n1,
53 | "node3": n3,
54 | }
55 | },
56 | },
57 | }
58 |
59 | for _, tt := range tests {
60 | t.Run(tt.name, func(t *testing.T) {
61 | got := groupNodesWithLabels(tt.nodeList, tt.labels)
62 | if len(got) != len(tt.want()) {
63 | t.Errorf("groupNodesWithLabels(%v, %v) = %v, want %v", tt.nodeList, tt.labels, got, tt.want())
64 | }
65 | for k := range tt.want() {
66 | if _, ok := got[k]; !ok {
67 | t.Errorf("groupNodesWithLabels(%v, %v) = %v, want %v", tt.nodeList, tt.labels, got, tt.want())
68 | }
69 | }
70 | })
71 | }
72 | }
73 |
74 | func TestGroupPodsBasedOnNodePreference(t *testing.T) {
75 | tests := []struct {
76 | name string
77 | podList []*corev1.Pod
78 | pod *corev1.Pod
79 | nodeWithMatchingLabels map[string]*corev1.Node
80 | want []*corev1.Pod
81 | }{
82 | {
83 | name: "no pods",
84 | podList: []*corev1.Pod{},
85 | pod: &corev1.Pod{},
86 | nodeWithMatchingLabels: map[string]*corev1.Node{},
87 | want: []*corev1.Pod{},
88 | },
89 | {
90 | name: "skip current pod",
91 | podList: []*corev1.Pod{
92 | {ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}},
93 | },
94 | pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}},
95 | nodeWithMatchingLabels: map[string]*corev1.Node{},
96 | want: []*corev1.Pod{},
97 | },
98 | {
99 | name: "pod with node name exists",
100 | podList: []*corev1.Pod{
101 | {ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}},
102 | {ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: types.UID("pod2")}, Spec: corev1.PodSpec{NodeName: "node1"}},
103 | },
104 | pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}},
105 | nodeWithMatchingLabels: map[string]*corev1.Node{
106 | "node1": {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"foo": "bar"}}},
107 | },
108 | want: []*corev1.Pod{
109 | {ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: types.UID("pod2")}, Spec: corev1.PodSpec{NodeName: "node1"}},
110 | },
111 | },
112 | {
113 | name: "no node name or annotation",
114 | podList: []*corev1.Pod{
115 | {ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}},
116 | {ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: types.UID("pod2")}},
117 | },
118 | pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}},
119 | nodeWithMatchingLabels: map[string]*corev1.Node{
120 | "node1": {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"foo": "bar"}}},
121 | },
122 | want: []*corev1.Pod{},
123 | },
124 | {
125 | name: "no node name but annotation exists",
126 | podList: []*corev1.Pod{
127 | {ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}},
128 | {ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: types.UID("pod2"), Annotations: map[string]string{v1alpha1.PlacementPolicyPreferenceAnnotationKey: "true"}}},
129 | },
130 | pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}},
131 | nodeWithMatchingLabels: map[string]*corev1.Node{
132 | "node1": {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"foo": "bar"}}},
133 | },
134 | want: []*corev1.Pod{
135 | {ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: types.UID("pod2"), Annotations: map[string]string{v1alpha1.PlacementPolicyPreferenceAnnotationKey: "true"}}},
136 | },
137 | },
138 | {
139 | name: "annotation exists but no matching node",
140 | podList: []*corev1.Pod{
141 | {ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}},
142 | {ObjectMeta: metav1.ObjectMeta{Name: "pod2", UID: types.UID("pod2"), Annotations: map[string]string{v1alpha1.PlacementPolicyPreferenceAnnotationKey: "false"}}},
143 | },
144 | pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}},
145 | nodeWithMatchingLabels: map[string]*corev1.Node{
146 | "node1": {ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: map[string]string{"foo": "bar"}}},
147 | },
148 | want: []*corev1.Pod{},
149 | },
150 | }
151 |
152 | for _, tt := range tests {
153 | t.Run(tt.name, func(t *testing.T) {
154 | got := groupPodsBasedOnNodePreference(tt.podList, tt.pod, tt.nodeWithMatchingLabels)
155 | if len(got) != len(tt.want) {
156 | t.Errorf("groupPodsBasedOnNodePreference(%v, %v, %v) = %v, want %v", tt.podList, tt.pod, tt.nodeWithMatchingLabels, got, tt.want)
157 | }
158 | if !reflect.DeepEqual(got, tt.want) {
159 | t.Errorf("groupPodsBasedOnNodePreference(%v, %v, %v) = %v, want %v", tt.podList, tt.pod, tt.nodeWithMatchingLabels, got, tt.want)
160 | }
161 | })
162 | }
163 | }
164 |
--------------------------------------------------------------------------------
/pkg/plugins/placementpolicy/state.go:
--------------------------------------------------------------------------------
1 | package placementpolicy
2 |
3 | import (
4 | "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
5 |
6 | "k8s.io/kubernetes/pkg/scheduler/framework"
7 | )
8 |
9 | type stateData struct {
10 | name string
11 | pp *v1alpha1.PlacementPolicy
12 | }
13 |
14 | func NewStateData(name string, pp *v1alpha1.PlacementPolicy) framework.StateData {
15 | return &stateData{
16 | name: name,
17 | pp: pp,
18 | }
19 | }
20 |
21 | func (d *stateData) Clone() framework.StateData {
22 | return d
23 | }
24 |
--------------------------------------------------------------------------------
/pkg/utils/labels.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | // HasMatchingLabels checks if the labels exist in the provided set
4 | func HasMatchingLabels(l, wantLabels map[string]string) bool {
5 | if len(l) < len(wantLabels) {
6 | return false
7 | }
8 |
9 | for k, v := range wantLabels {
10 | if l[k] != v {
11 | return false
12 | }
13 | }
14 | return true
15 | }
16 |
--------------------------------------------------------------------------------
/pkg/utils/labels_test.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import "testing"
4 |
5 | func TestHasMatchingLabels(t *testing.T) {
6 | test := []struct {
7 | name string
8 | l, wantLabels map[string]string
9 | want bool
10 | }{
11 | {
12 | name: "no labels",
13 | l: map[string]string{},
14 | wantLabels: map[string]string{},
15 | want: true,
16 | },
17 | {
18 | name: "actual labels is less than want labels",
19 | l: map[string]string{
20 | "foo": "bar",
21 | },
22 | wantLabels: map[string]string{
23 | "foo": "bar",
24 | "baz": "qux",
25 | },
26 | want: false,
27 | },
28 | {
29 | name: "actual labels don't match want labels",
30 | l: map[string]string{
31 | "foo": "bar",
32 | },
33 | wantLabels: map[string]string{
34 | "baz": "qux",
35 | },
36 | want: false,
37 | },
38 | {
39 | name: "actual labels match want labels",
40 | l: map[string]string{
41 | "foo": "bar",
42 | "baz": "qux",
43 | },
44 | wantLabels: map[string]string{
45 | "foo": "bar",
46 | "baz": "qux",
47 | },
48 | want: true,
49 | },
50 | {
51 | name: "want labels is a subset of actual labels",
52 | l: map[string]string{
53 | "foo": "bar",
54 | "baz": "qux",
55 | },
56 | wantLabels: map[string]string{
57 | "foo": "bar",
58 | },
59 | want: true,
60 | },
61 | }
62 |
63 | for _, tt := range test {
64 | t.Run(tt.name, func(t *testing.T) {
65 | got := HasMatchingLabels(tt.l, tt.wantLabels)
66 | if got != tt.want {
67 | t.Errorf("HasMatchingLabels(%v, %v) = %v, want %v", tt.l, tt.wantLabels, got, tt.want)
68 | }
69 | })
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/test/e2e/kind-config.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | nodes:
4 | - role: control-plane
5 | - role: worker
6 | kubeadmConfigPatches:
7 | - |
8 | kind: JoinConfiguration
9 | nodeRegistration:
10 | kubeletExtraArgs:
11 | node-labels: "node=want"
12 | - role: worker
13 | kubeadmConfigPatches:
14 | - |
15 | kind: JoinConfiguration
16 | nodeRegistration:
17 | kubeletExtraArgs:
18 | node-labels: "node=want"
19 | - role: worker
20 | kubeadmConfigPatches:
21 | - |
22 | kind: JoinConfiguration
23 | nodeRegistration:
24 | kubeletExtraArgs:
25 | node-labels: "node=unwant"
26 |
--------------------------------------------------------------------------------
/test/e2e/kubectl.go:
--------------------------------------------------------------------------------
1 | //go:build e2e
2 | // +build e2e
3 |
4 | // https://raw.githubusercontent.com/Azure/secrets-store-csi-driver-provider-azure/master/test/e2e/framework/exec/kubectl.go
5 |
6 | package e2e
7 |
8 | import (
9 | "fmt"
10 | "os/exec"
11 | "strings"
12 |
13 | "k8s.io/klog/v2"
14 | )
15 |
16 | // KubectlApply executes "kubectl apply" given a list of arguments.
17 | func KubectlApply(kubeconfigPath, namespace string, args []string) error {
18 | args = append([]string{
19 | "apply",
20 | fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
21 | fmt.Sprintf("--namespace=%s", namespace),
22 | }, args...)
23 |
24 | _, err := kubectl(args)
25 | return err
26 | }
27 |
28 | // KubectlDelete executes "kubectl delete" given a list of arguments.
29 | func KubectlDelete(kubeconfigPath, namespace string, args []string) error {
30 | args = append([]string{
31 | "delete",
32 | fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
33 | fmt.Sprintf("--namespace=%s", namespace),
34 | }, args...)
35 |
36 | _, err := kubectl(args)
37 | return err
38 | }
39 |
40 | // KubectlExec executes "kubectl exec" given a list of arguments.
41 | func KubectlExec(kubeconfigPath, podName, namespace string, args []string) (string, error) {
42 | args = append([]string{
43 | "exec",
44 | fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
45 | fmt.Sprintf("--namespace=%s", namespace),
46 | "--request-timeout=5s",
47 | podName,
48 | "--",
49 | }, args...)
50 |
51 | return kubectl(args)
52 | }
53 |
54 | // KubectlLogs executes "kubectl logs" given a list of arguments.
55 | func KubectlLogs(kubeconfigPath, podName, containerName, namespace string) (string, error) {
56 | args := []string{
57 | "logs",
58 | fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
59 | fmt.Sprintf("--namespace=%s", namespace),
60 | podName,
61 | }
62 |
63 | if containerName != "" {
64 | args = append(args, fmt.Sprintf("-c=%s", containerName))
65 | }
66 |
67 | return kubectl(args)
68 | }
69 |
70 | // KubectlDescribe executes "kubectl describe" given a list of arguments.
71 | func KubectlDescribe(kubeconfigPath, podName, namespace string) (string, error) {
72 | args := []string{
73 | "describe",
74 | "pod",
75 | podName,
76 | fmt.Sprintf("--kubeconfig=%s", kubeconfigPath),
77 | fmt.Sprintf("--namespace=%s", namespace),
78 | }
79 | return kubectl(args)
80 | }
81 |
82 | func kubectl(args []string) (string, error) {
83 | klog.Infof("kubectl %s", strings.Join(args, " "))
84 |
85 | cmd := exec.Command("kubectl", args...)
86 | stdoutStderr, err := cmd.CombinedOutput()
87 |
88 | return strings.TrimSpace(string(stdoutStderr)), err
89 | }
90 |
--------------------------------------------------------------------------------
/test/e2e/main_test.go:
--------------------------------------------------------------------------------
1 | //go:build e2e
2 | // +build e2e
3 |
4 | package e2e
5 |
6 | import (
7 | "context"
8 | "fmt"
9 | "os"
10 | "path/filepath"
11 | "testing"
12 | "time"
13 |
14 | appsv1 "k8s.io/api/apps/v1"
15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
16 | "k8s.io/klog/v2"
17 | "sigs.k8s.io/e2e-framework/klient/k8s"
18 | "sigs.k8s.io/e2e-framework/klient/wait"
19 | "sigs.k8s.io/e2e-framework/klient/wait/conditions"
20 | "sigs.k8s.io/e2e-framework/pkg/env"
21 | "sigs.k8s.io/e2e-framework/pkg/envconf"
22 | "sigs.k8s.io/e2e-framework/pkg/envfuncs"
23 | )
24 |
25 | var (
26 | providerResourceDirectory = "manifest_staging/deploy"
27 | providerResource = "kube-scheduler-configuration.yml"
28 | testenv env.Environment
29 | registry = os.Getenv("REGISTRY")
30 | imageName = os.Getenv("IMAGE_NAME")
31 | imageVersion = os.Getenv("IMAGE_VERSION")
32 | )
33 |
34 | func TestMain(m *testing.M) {
35 | testenv = env.NewWithConfig(envconf.New())
36 | // Create KinD Cluster
37 | kindClusterName := envconf.RandomName("placement-policy", 16)
38 | namespace := envconf.RandomName("pp-ns", 16)
39 | testenv.Setup(
40 | envfuncs.CreateKindClusterWithConfig(kindClusterName, "kindest/node:v1.22.2", "kind-config.yaml"),
41 | envfuncs.CreateNamespace(namespace),
42 | envfuncs.LoadDockerImageToCluster(kindClusterName, fmt.Sprintf("%s/%s:%s", registry, imageName, imageVersion)),
43 | deploySchedulerManifest(),
44 | ).Finish( // Cleanup KinD Cluster
45 | envfuncs.DeleteNamespace(namespace),
46 | envfuncs.DestroyKindCluster(kindClusterName),
47 | )
48 | os.Exit(testenv.Run(m))
49 | }
50 |
51 | func deploySchedulerManifest() env.Func {
52 | return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) {
53 | wd, err := os.Getwd()
54 | if err != nil {
55 | return ctx, err
56 | }
57 | providerResourceAbsolutePath, err := filepath.Abs(filepath.Join(wd, "/../../", providerResourceDirectory))
58 | if err != nil {
59 | return ctx, err
60 | }
61 | // start a CRD deployment
62 | if err := KubectlApply(cfg.KubeconfigFile(), "kube-system", []string{"-f", fmt.Sprintf("%s/%s", providerResourceAbsolutePath, providerResource)}); err != nil {
63 | return ctx, err
64 | }
65 | // wait for the deployment to finish becoming available
66 | dep := appsv1.Deployment{
67 | ObjectMeta: metav1.ObjectMeta{Name: "pp-plugins-scheduler", Namespace: "kube-system"},
68 | }
69 |
70 | client, err := cfg.NewClient()
71 | if err != nil {
72 | klog.ErrorS(err, "Failed to create new Client")
73 | return ctx, err
74 | }
75 |
76 | if err := wait.For(conditions.New(client.Resources()).ResourceMatch(&dep, func(object k8s.Object) bool {
77 | d := object.(*appsv1.Deployment)
78 | return float64(d.Status.ReadyReplicas)/float64(*d.Spec.Replicas) >= 1
79 | }), wait.WithTimeout(time.Minute*1)); err != nil {
80 |
81 | klog.ErrorS(err, " Failed to deploy placement policy scheduler")
82 | return ctx, err
83 | }
84 |
85 | return ctx, nil
86 | }
87 | }
88 |
--------------------------------------------------------------------------------
/test/e2e/utils.go:
--------------------------------------------------------------------------------
1 | //go:build e2e
2 | // +build e2e
3 |
4 | package e2e
5 |
6 | import (
7 | appsv1 "k8s.io/api/apps/v1"
8 | corev1 "k8s.io/api/core/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
11 | )
12 |
13 | const (
14 | schedulerName = "placement-policy-plugins-scheduler"
15 | )
16 |
17 | func newDeployment(namespace, name string, replicas int32, labels map[string]string) *appsv1.Deployment {
18 | return &appsv1.Deployment{
19 | ObjectMeta: metav1.ObjectMeta{
20 | Name: name,
21 | Namespace: namespace,
22 | Labels: labels,
23 | },
24 | Spec: appsv1.DeploymentSpec{
25 | Replicas: &replicas,
26 | Selector: &metav1.LabelSelector{
27 | MatchLabels: labels,
28 | },
29 | Template: corev1.PodTemplateSpec{
30 | ObjectMeta: metav1.ObjectMeta{Labels: labels},
31 | Spec: corev1.PodSpec{
32 | SchedulerName: schedulerName,
33 | Containers: []corev1.Container{
34 | {
35 | Name: "test-deployment",
36 | Image: e2epod.GetDefaultTestImage(),
37 | ImagePullPolicy: corev1.PullIfNotPresent,
38 | Command: []string{"/bin/sleep", "10000"},
39 | },
40 | },
41 | },
42 | },
43 | },
44 | }
45 | }
46 |
47 | func newStatefulSet(namespace, name string, replicas int32, labels map[string]string) *appsv1.StatefulSet {
48 | return &appsv1.StatefulSet{
49 | ObjectMeta: metav1.ObjectMeta{
50 | Name: name,
51 | Namespace: namespace,
52 | Labels: labels,
53 | },
54 | Spec: appsv1.StatefulSetSpec{
55 | Replicas: &replicas,
56 | Selector: &metav1.LabelSelector{
57 | MatchLabels: labels,
58 | },
59 | Template: corev1.PodTemplateSpec{
60 | ObjectMeta: metav1.ObjectMeta{Labels: labels},
61 | Spec: corev1.PodSpec{
62 | SchedulerName: schedulerName,
63 | Containers: []corev1.Container{
64 | {
65 | Name: "test-statefulset",
66 | Image: e2epod.GetDefaultTestImage(),
67 | ImagePullPolicy: corev1.PullIfNotPresent,
68 | Command: []string{"/bin/sleep", "10000"},
69 | },
70 | },
71 | },
72 | },
73 | },
74 | }
75 | }
76 |
--------------------------------------------------------------------------------
/test/integration/main_test.go:
--------------------------------------------------------------------------------
1 | package integration
2 |
3 | import (
4 | "testing"
5 |
6 | "k8s.io/kubernetes/test/integration/framework"
7 | )
8 |
9 | func TestMain(m *testing.M) {
10 | framework.EtcdMain(m.Run)
11 | }
12 |
--------------------------------------------------------------------------------
/test/integration/scheduler.go:
--------------------------------------------------------------------------------
1 | // https://github.com/kubernetes-sigs/scheduler-plugins/blob/478a9cb0867c10821bfac3bf1a2be3434796af81/test/util/scheduler.go
2 |
3 | package integration
4 |
5 | import (
6 | "testing"
7 |
8 | "k8s.io/client-go/informers"
9 | "k8s.io/client-go/tools/events"
10 | "k8s.io/kubernetes/pkg/scheduler"
11 | "k8s.io/kubernetes/pkg/scheduler/profile"
12 | testutils "k8s.io/kubernetes/test/integration/util"
13 | )
14 |
15 | // InitTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
16 | // configuration and other options.
17 | // TODO(Huang-Wei): refactor the same function in the upstream, and remove here.
18 | func InitTestSchedulerWithOptions(
19 | t *testing.T,
20 | testCtx *testutils.TestContext,
21 | startScheduler bool,
22 | opts ...scheduler.Option,
23 | ) *testutils.TestContext {
24 | testCtx.InformerFactory = informers.NewSharedInformerFactory(testCtx.ClientSet, 0)
25 |
26 | var err error
27 | eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
28 | Interface: testCtx.ClientSet.EventsV1(),
29 | })
30 |
31 | testCtx.Scheduler, err = scheduler.New(
32 | testCtx.ClientSet,
33 | testCtx.InformerFactory,
34 | profile.NewRecorderFactory(eventBroadcaster),
35 | testCtx.Ctx.Done(),
36 | opts...,
37 | )
38 |
39 | if err != nil {
40 | t.Fatalf("Couldn't create scheduler: %v", err)
41 | }
42 |
43 | eventBroadcaster.StartRecordingToSink(testCtx.Ctx.Done())
44 |
45 | testCtx.InformerFactory.Start(testCtx.Scheduler.StopEverything)
46 | testCtx.InformerFactory.WaitForCacheSync(testCtx.Scheduler.StopEverything)
47 |
48 | if startScheduler {
49 | go testCtx.Scheduler.Run(testCtx.Ctx)
50 | }
51 |
52 | return testCtx
53 | }
54 |
--------------------------------------------------------------------------------
/test/integration/util.go:
--------------------------------------------------------------------------------
1 | package integration
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/Azure/placement-policy-scheduler-plugins/apis/v1alpha1"
7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8 | "k8s.io/apimachinery/pkg/util/intstr"
9 | "k8s.io/client-go/kubernetes"
10 | "k8s.io/klog/v2"
11 | "k8s.io/kube-scheduler/config/v1beta2"
12 | "k8s.io/kubernetes/pkg/scheduler/apis/config"
13 | schdscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
14 | )
15 |
16 | var (
17 | NodeSelectorLabels = map[string]string{"node": "want"}
18 | PodSelectorLabels = map[string]string{"app": "nginx"}
19 | )
20 |
21 | // PodScheduled returns true if a node is assigned to the given pod.
22 | func PodScheduled(c kubernetes.Interface, podNamespace, podName string) bool {
23 | pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
24 | if err != nil {
25 | // This could be a connection error so we want to retry.
26 | klog.ErrorS(err, "Failed to get pod", "pod", klog.KRef(podNamespace, podName))
27 | return false
28 | }
29 | return pod.Spec.NodeName != ""
30 | }
31 |
32 | // MakePlacementPolicy
33 | func MakePlacementPolicy(mode v1alpha1.EnforcementMode, targetSize intstr.IntOrString, action v1alpha1.Action, name, namespace string) *v1alpha1.PlacementPolicy {
34 |
35 | return &v1alpha1.PlacementPolicy{
36 | ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
37 | Spec: v1alpha1.PlacementPolicySpec{
38 | Weight: 100,
39 | EnforcementMode: mode,
40 | PodSelector: &metav1.LabelSelector{
41 | MatchLabels: PodSelectorLabels,
42 | },
43 | NodeSelector: &metav1.LabelSelector{
44 | MatchLabels: NodeSelectorLabels,
45 | },
46 | Policy: &v1alpha1.Policy{Action: action, TargetSize: &targetSize},
47 | },
48 | Status: v1alpha1.PlacementPolicyStatus{},
49 | }
50 | }
51 |
52 | // https://github.com/kubernetes-sigs/scheduler-plugins/blob/478a9cb0867c10821bfac3bf1a2be3434796af81/test/util/framework.go
53 | // NewDefaultSchedulerComponentConfig returns a default scheduler cc object.
54 | // We need this function due to k/k#102796 - default profile needs to built manually.
55 | func NewDefaultSchedulerComponentConfig() (config.KubeSchedulerConfiguration, error) {
56 | var versionedCfg v1beta2.KubeSchedulerConfiguration
57 | schdscheme.Scheme.Default(&versionedCfg)
58 | cfg := config.KubeSchedulerConfiguration{}
59 | if err := schdscheme.Scheme.Convert(&versionedCfg, &cfg, nil); err != nil {
60 | return config.KubeSchedulerConfiguration{}, err
61 | }
62 | return cfg, nil
63 | }
64 |
--------------------------------------------------------------------------------