├── .github ├── release-drafter.yml └── workflows │ ├── kind │ └── kind.yaml │ ├── main.yaml │ ├── push-nm-image.yaml │ ├── push-operator-image.yaml │ └── push-sidecar-image.yaml ├── .gitignore ├── CHANGLOG.md ├── LICENSE ├── Makefile ├── PROJECT ├── README.md ├── RELEASE.md ├── VERSION ├── adapter ├── Dockerfile ├── Makefile ├── README.md ├── cmd │ └── main.go ├── deploy │ └── yaml │ │ └── adapter.yaml ├── go.mod ├── go.sum ├── pkg │ ├── common │ │ └── types.go │ └── export │ │ ├── interface.go │ │ ├── stdout │ │ └── types.go │ │ └── tivoli │ │ └── types.go └── test │ ├── alert.json │ ├── samples │ ├── Dockerfile │ ├── main.go │ └── socket.yaml │ └── send_alerts.sh ├── apis └── v2beta2 │ ├── common.go │ ├── config_types.go │ ├── config_webhook.go │ ├── groupversion_info.go │ ├── notificationmanager_types.go │ ├── receiver_types.go │ ├── receiver_webhook.go │ ├── router_types.go │ ├── router_webhook.go │ ├── silence_types.go │ ├── silence_webhook.go │ └── zz_generated.deepcopy.go ├── cmd ├── notification-manager │ ├── Dockerfile │ └── main.go └── operator │ ├── Dockerfile │ └── main.go ├── config ├── bundle.yaml ├── cert │ ├── kustomization.yaml │ └── webhook-server-cert.yaml ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── ci │ ├── alerts.json │ └── slack-pr.yaml ├── crd │ ├── bases │ │ ├── notification.kubesphere.io_configs.yaml │ │ ├── notification.kubesphere.io_notificationmanagers.yaml │ │ ├── notification.kubesphere.io_receivers.yaml │ │ ├── notification.kubesphere.io_routers.yaml │ │ └── notification.kubesphere.io_silences.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_configs.yaml │ │ ├── cainjection_in_notificationmanagers.yaml │ │ ├── cainjection_in_receivers.yaml │ │ ├── webhook_in_configs.yaml │ │ ├── webhook_in_notificationmanagers.yaml │ │ └── webhook_in_receivers.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ ├── manager_webhook_patch.yaml │ └── webhookcainjection_patch.yaml ├── helm │ └── kustomization.yaml ├── i18n │ └── zh-cn.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── config_editor_role.yaml │ ├── config_viewer_role.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── notificationmanager_editor_role.yaml │ ├── notificationmanager_viewer_role.yaml │ ├── patches │ │ └── patch.yaml │ ├── receiver_editor_role.yaml │ ├── receiver_viewer_role.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml ├── samples │ ├── bundle.yaml │ ├── default_config.yaml │ ├── global_receiver.yaml │ ├── kustomization.yaml │ ├── notification_manager.yaml │ ├── sms.yaml │ ├── template.yaml │ └── tenant-sidecar-role.yaml ├── update │ └── update.sh └── webhook │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ ├── manifests.yaml │ └── service.yaml ├── controllers ├── notificationmanager_controller.go └── suite_test.go ├── docs ├── api │ └── _index.md ├── crds │ ├── config.md │ ├── credential.md │ ├── notification-manager.md │ ├── receiver.md │ ├── router.md │ └── silence.md ├── images │ ├── architecture.svg │ ├── logo.png │ ├── notification-manager.png │ ├── pipeline.svg │ └── receivers_configs.png ├── proposals │ └── Integrate-SMS-Service-Crd-For-Notification-Manager.md └── template.md ├── go.mod ├── go.sum ├── hack ├── boilerplate.go.txt ├── generate-cert.sh └── openssl.cnf ├── helm ├── .helmignore ├── Chart.yaml ├── README.md ├── crds │ └── bundle.yaml ├── templates │ ├── _helpers.tpl │ ├── clusterrolebindings.yaml │ ├── clusterroles.yaml │ ├── notificationmanagers.yaml │ ├── operator.yaml │ ├── rolebindings.yaml │ ├── roles.yaml │ ├── serviceaccount.yaml │ ├── services.yaml │ ├── template.yaml │ ├── validating.yaml │ └── zh-cn.yaml └── values.yaml ├── pkg ├── aggregation │ └── aggregation.go ├── async │ └── group.go ├── constants │ └── constants.go ├── controller │ ├── controller.go │ └── factories.go ├── dispatcher │ └── dispatcher.go ├── filter │ └── filter.go ├── history │ └── history.go ├── internal │ ├── common.go │ ├── dingtalk │ │ └── types.go │ ├── discord │ │ └── types.go │ ├── email │ │ └── types.go │ ├── feishu │ │ └── types.go │ ├── interface.go │ ├── pushover │ │ └── types.go │ ├── slack │ │ └── types.go │ ├── sms │ │ └── types.go │ ├── telegram │ │ └── types.go │ ├── webhook │ │ └── types.go │ └── wechat │ │ └── types.go ├── notify │ ├── notifier │ │ ├── dingtalk │ │ │ ├── dingtalk.go │ │ │ └── throttle.go │ │ ├── discord │ │ │ └── discord.go │ │ ├── email │ │ │ └── email.go │ │ ├── feishu │ │ │ └── feishu.go │ │ ├── interface.go │ │ ├── pushover │ │ │ └── pushover.go │ │ ├── slack │ │ │ └── slack.go │ │ ├── sms │ │ │ ├── aliyun.go │ │ │ ├── aws.go │ │ │ ├── huawei.go │ │ │ ├── interface.go │ │ │ ├── sms.go │ │ │ └── tencent.go │ │ ├── telegram │ │ │ └── telegram.go │ │ ├── token.go │ │ ├── webhook │ │ │ └── webhook.go │ │ └── wechat │ │ │ └── wechat.go │ └── notify.go ├── route │ └── router.go ├── silence │ └── silence.go ├── stage │ └── stage.go ├── store │ ├── provider │ │ ├── interface.go │ │ └── memory │ │ │ └── memory.go │ └── store.go ├── template │ ├── language.go │ ├── template.go │ └── types.go ├── utils │ ├── error.go │ ├── hash.go │ ├── http.go │ ├── json.go │ └── string.go └── webhook │ ├── v1 │ └── handler.go │ └── webhook.go ├── sidecar ├── kubernetes │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── cmd │ │ └── main.go │ └── test │ │ └── get-tenants.sh └── kubesphere │ ├── 3.1.0 │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── cmd │ │ └── main.go │ ├── go.mod │ ├── go.sum │ ├── pkg │ │ ├── controller │ │ │ └── controller.go │ │ ├── ks │ │ │ └── runtime.go │ │ └── tenant │ │ │ └── tenant.go │ └── test │ │ └── get-tenants.sh │ ├── 3.2.0 │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── cmd │ │ └── main.go │ ├── go.mod │ ├── go.sum │ ├── pkg │ │ ├── controller │ │ │ └── controller.go │ │ ├── ks │ │ │ └── runtime.go │ │ └── tenant │ │ │ └── tenant.go │ └── test │ │ └── get-tenants.sh │ └── 4.0.0 │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── backend.go │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── test │ └── get-tenants.sh └── test ├── send_alerts.sh └── testdata ├── alert-auditing.json ├── alert.json ├── alert1.json ├── alert10.json ├── alert11.json ├── alert12.json ├── alert13.json ├── alert14.json ├── alert15.json ├── alert2.json ├── alert3.json ├── alert4.json ├── alert5.json ├── alert6.json ├── alert7.json ├── alert8.json ├── alert9.json └── alerts-without-namespace.json /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: 'v$RESOLVED_VERSION' 2 | tag-template: 'v$RESOLVED_VERSION' 3 | template: | 4 | # What's Changed 5 | 6 | $CHANGES 7 | 8 | **Full Changelog**: https://github.com/$OWNER/$REPOSITORY/compare/$PREVIOUS_TAG...v$RESOLVED_VERSION 9 | 10 | categories: 11 | - title: '💥 Breaking' 12 | label: 'type: breaking' 13 | - title: '✨ New' 14 | label: 'type: feature' 15 | - title: '🐛 Bug Fixes' 16 | label: 'type: bug' 17 | - title: '🏗️ Maintenance' 18 | label: 'type: maintenance' 19 | - title: '🔒 Security' 20 | label: 'type: security' 21 | - title: '👷 CI/CD' 22 | label: 'type: cicd' 23 | - title: '📝 Documentation' 24 | label: 'type: docs' 25 | - title: 'Other changes' 26 | - title: '🏷️ Dependency Updates' 27 | label: 'type: dependencies' 28 | collapse-after: 5 29 | 30 | version-resolver: 31 | major: 32 | labels: 33 | - 'type: breaking' 34 | minor: 35 | labels: 36 | - 'type: feature' 37 | patch: 38 | labels: 39 | - 'type: bug' 40 | - 'type: maintenance' 41 | - 'type: docs' 42 | - 'type: dependencies' 43 | - 'type: cicd' 44 | 45 | exclude-labels: 46 | - 'skip-changelog' -------------------------------------------------------------------------------- /.github/workflows/kind/kind.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | image: kindest/node:v1.21.2 6 | extraMounts: 7 | - hostPath: /etc/localtime 8 | containerPath: /etc/localtime 9 | # extraPortMappings: 10 | # - containerPort: 19093 11 | # hostPort: 19093 -------------------------------------------------------------------------------- /.github/workflows/push-nm-image.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2022 The Notification-Manager Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | name: WorkFlow for Building notification-manager image 18 | 19 | on: 20 | push: 21 | branches: 22 | - 'master' 23 | tags: 24 | - 'v*' 25 | paths: 26 | - '.github/workflows/**' 27 | - 'cmd/notification-manager/**' 28 | - 'pkg/**' 29 | - 'go.mod' 30 | - 'go.sum' 31 | 32 | env: 33 | REPO_NM: 'kubesphere' 34 | 35 | jobs: 36 | build: 37 | runs-on: ubuntu-latest 38 | timeout-minutes: 30 39 | name: Build Notification-Manager Image 40 | steps: 41 | - name: Install Go 42 | uses: actions/setup-go@v2 43 | with: 44 | go-version: 1.20.x 45 | 46 | - uses: actions/cache@v4 47 | with: 48 | path: ~/go/pkg/mod 49 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 50 | 51 | - name: Checkout code 52 | uses: actions/checkout@v2 53 | with: 54 | fetch-depth: 0 55 | 56 | - name: Set up QEMU 57 | id: qemu 58 | uses: docker/setup-qemu-action@v1 59 | with: 60 | image: tonistiigi/binfmt:latest 61 | platforms: all 62 | 63 | - name: Login to Docker Hub 64 | uses: docker/login-action@v1 65 | with: 66 | username: ${{ secrets.REGISTRY_USER }} 67 | password: ${{ secrets.REGISTRY_PASSWORD }} 68 | 69 | - name: Set up Docker Buildx 70 | id: buildx 71 | uses: docker/setup-buildx-action@v1 72 | 73 | - name: Build and Push image 74 | run: | 75 | tag=$(cat VERSION | tr -d " \t\n\r") 76 | make build-nm -e NM_IMG=${{ env.REPO_NM }}/notification-manager:$tag 77 | -------------------------------------------------------------------------------- /.github/workflows/push-operator-image.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2022 The Notification-Manager Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | name: WorkFlow for Building operator image 18 | 19 | on: 20 | push: 21 | branches: 22 | - 'master' 23 | tags: 24 | - 'v*' 25 | paths: 26 | - '.github/workflows/**' 27 | - 'controllers/**' 28 | - 'cmd/operator/**' 29 | - 'pkg/**' 30 | - 'go.sum' 31 | - 'go.mod' 32 | 33 | env: 34 | REPO_OP: 'kubesphere' 35 | 36 | jobs: 37 | build: 38 | runs-on: ubuntu-latest 39 | timeout-minutes: 30 40 | name: Build Operator Image 41 | steps: 42 | - name: Install Go 43 | uses: actions/setup-go@v2 44 | with: 45 | go-version: 1.20.x 46 | 47 | - uses: actions/cache@v4 48 | with: 49 | path: ~/go/pkg/mod 50 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 51 | 52 | - name: Checkout code 53 | uses: actions/checkout@v2 54 | with: 55 | fetch-depth: 0 56 | 57 | - name: Set up QEMU 58 | id: qemu 59 | uses: docker/setup-qemu-action@v1 60 | with: 61 | image: tonistiigi/binfmt:latest 62 | platforms: all 63 | 64 | - name: Login to Docker Hub 65 | uses: docker/login-action@v1 66 | with: 67 | username: ${{ secrets.REGISTRY_USER }} 68 | password: ${{ secrets.REGISTRY_PASSWORD }} 69 | 70 | - name: Set up Docker Buildx 71 | id: buildx 72 | uses: docker/setup-buildx-action@v1 73 | 74 | - name: Build and Push image 75 | run: | 76 | tag=$(cat VERSION | tr -d " \t\n\r") 77 | make build-op -e IMG=${{ env.REPO_OP }}/notification-manager-operator:$tag -------------------------------------------------------------------------------- /.github/workflows/push-sidecar-image.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2022 The Notification-Manager Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | name: WorkFlow for Building sidecar image 18 | 19 | on: 20 | push: 21 | branches: 22 | - 'master' 23 | paths: 24 | - '.github/workflows/push-sidecar-image.yaml' 25 | - 'sidecar/kubesphere/4.0.0/backend.go' 26 | - 'sidecar/kubesphere/4.0.0/Dockerfile' 27 | - 'sidecar/kubesphere/4.0.0/main.go' 28 | - 'sidecar/kubesphere/4.0.0/Makefile' 29 | - 'sidecar/kubesphere/4.0.0/go.sum' 30 | - 'sidecar/kubesphere/4.0.0/go.mod' 31 | 32 | env: 33 | REPO_OP: 'kubesphere' 34 | 35 | jobs: 36 | build: 37 | runs-on: ubuntu-latest 38 | timeout-minutes: 30 39 | name: Build Operator Image 40 | steps: 41 | - name: Install Go 42 | uses: actions/setup-go@v2 43 | with: 44 | go-version: 1.20.x 45 | 46 | - uses: actions/cache@v4 47 | with: 48 | path: ~/go/pkg/mod 49 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 50 | 51 | - name: Checkout code 52 | uses: actions/checkout@v2 53 | with: 54 | fetch-depth: 0 55 | 56 | - name: Set up QEMU 57 | id: qemu 58 | uses: docker/setup-qemu-action@v1 59 | with: 60 | image: tonistiigi/binfmt:latest 61 | platforms: all 62 | 63 | - name: Login to Docker Hub 64 | uses: docker/login-action@v1 65 | with: 66 | username: ${{ secrets.REGISTRY_USER }} 67 | password: ${{ secrets.REGISTRY_PASSWORD }} 68 | 69 | - name: Set up Docker Buildx 70 | id: buildx 71 | uses: docker/setup-buildx-action@v1 72 | 73 | - name: Build and Push image 74 | run: | 75 | cd sidecar/kubesphere/4.0.0 && make -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | 10 | # Test binary, build with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Kubernetes Generated files - skip generated files, except for vendored files 17 | 18 | !vendor/**/zz_generated.* 19 | 20 | # editor and IDE paraphernalia 21 | .idea 22 | *.swp 23 | *.swo 24 | *~ 25 | 26 | # text files 27 | *.log 28 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: kubesphere.io 2 | repo: github.com/kubesphere/notification-manager 3 | resources: 4 | - group: notification 5 | kind: NotificationManager 6 | version: v2beta2 7 | - group: notification 8 | kind: Config 9 | version: v2beta2 10 | - group: notification 11 | kind: Receiver 12 | version: v2beta2 13 | - group: notification 14 | kind: Router 15 | version: v2beta2 16 | - group: notification 17 | kind: Silence 18 | version: v2beta2 19 | version: "2" 20 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | latest -------------------------------------------------------------------------------- /adapter/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 2 | # Use of this source code is governed by a Apache license 3 | # that can be found in the LICENSE file. 4 | 5 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 6 | # Use of this source code is governed by a Apache license 7 | # that can be found in the LICENSE file. 8 | 9 | FROM golang:1.17 as notification-adapter 10 | 11 | COPY / / 12 | WORKDIR / 13 | ENV GOPROXY=https://goproxy.io 14 | RUN CGO_ENABLED=0 GO111MODULE=on go build -i -ldflags '-w -s' -o notification-adapter cmd/main.go 15 | 16 | # Use distroless as minimal base image to package the manager binary 17 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 18 | FROM kubesphere/distroless-static:nonroot 19 | WORKDIR / 20 | COPY --from=notification-adapter /notification-adapter . 21 | USER nonroot:nonroot 22 | 23 | ENTRYPOINT ["/notification-adapter"] 24 | -------------------------------------------------------------------------------- /adapter/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 2 | # Use of this source code is governed by a Apache license 3 | # that can be found in the LICENSE file. 4 | 5 | IMG ?= kubespheredev/notification-adapter:v0.2.1 6 | AMD64 ?= -amd64 7 | 8 | all: docker-build 9 | 10 | # Build notification-adapter binary 11 | notification-adapter: 12 | go build -o notification-adapter cmd/main.go 13 | 14 | # Build the docker image 15 | docker-build: 16 | docker buildx build --platform linux/amd64,linux/arm64 --push -f Dockerfile -t ${IMG} . 17 | 18 | # Build the docker image for arm64 19 | docker-build-amd64: 20 | docker build -f Dockerfile -t ${IMG}${AMD64} . 21 | 22 | # Push the docker image 23 | push-amd64: 24 | docker push ${IMG}${AMD64} 25 | -------------------------------------------------------------------------------- /adapter/README.md: -------------------------------------------------------------------------------- 1 | # notification-adapter 2 | 3 | Notification-adapter receive notifications from notification manager and export to the exporters. 4 | Now it supports to export notifications to tivoli and stdout. -------------------------------------------------------------------------------- /adapter/deploy/yaml/adapter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: notification-adapter 5 | namespace: kubesphere-monitoring-system 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | name: notification-adapter 11 | template: 12 | metadata: 13 | labels: 14 | name: notification-adapter 15 | spec: 16 | containers: 17 | - name: notification-adapter 18 | image: kubespheredev/notification-adapter:v0.2.0 19 | args: 20 | - --with-stdout=true 21 | imagePullPolicy: Always 22 | env: 23 | - name: TZ 24 | value: GMT 25 | lifecycle: 26 | preStop: 27 | httpGet: 28 | path: /preStop 29 | port: 8080 30 | scheme: HTTP 31 | livenessProbe: 32 | failureThreshold: 3 33 | httpGet: 34 | path: /readiness 35 | port: 8080 36 | scheme: HTTP 37 | readinessProbe: 38 | failureThreshold: 3 39 | httpGet: 40 | path: /readiness 41 | port: 8080 42 | scheme: HTTP 43 | resources: 44 | limits: 45 | cpu: 200m 46 | memory: 500Mi 47 | requests: 48 | cpu: 20m 49 | memory: 50Mi 50 | --- 51 | apiVersion: v1 52 | kind: Service 53 | metadata: 54 | name: notification-adapter 55 | namespace: kubesphere-monitoring-system 56 | spec: 57 | selector: 58 | name: notification-adapter 59 | ports: 60 | - name: http 61 | port: 8080 62 | targetPort: 8080 63 | -------------------------------------------------------------------------------- /adapter/go.mod: -------------------------------------------------------------------------------- 1 | module adapter 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/emicklei/go-restful v2.16.0+incompatible 7 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b 8 | github.com/json-iterator/go v1.1.12 9 | github.com/prometheus/alertmanager v0.25.1 10 | github.com/prometheus/common v0.38.0 11 | github.com/spf13/cobra v0.0.5 12 | github.com/spf13/pflag v1.0.5 13 | golang.org/x/text v0.5.0 14 | ) 15 | -------------------------------------------------------------------------------- /adapter/pkg/common/types.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/golang/glog" 7 | jsoniter "github.com/json-iterator/go" 8 | "github.com/prometheus/alertmanager/template" 9 | ) 10 | 11 | type Alert struct { 12 | *template.Alert 13 | NotificationTime time.Time `json:"notificationTime"` 14 | } 15 | 16 | func NewAlerts(data []byte) ([]*Alert, error) { 17 | 18 | var d template.Data 19 | 20 | err := jsoniter.Unmarshal(data, &d) 21 | if err != nil { 22 | glog.Errorf("unmarshal failed with:%v,body is: %s", err, string(data)) 23 | return nil, err 24 | } 25 | 26 | var as []*Alert 27 | for _, a := range d.Alerts { 28 | alert := a 29 | as = append(as, &Alert{ 30 | &alert, 31 | time.Now(), 32 | }) 33 | } 34 | 35 | return as, nil 36 | } 37 | -------------------------------------------------------------------------------- /adapter/pkg/export/interface.go: -------------------------------------------------------------------------------- 1 | package export 2 | 3 | import "adapter/pkg/common" 4 | 5 | type Exporter interface { 6 | Export(alerts []*common.Alert) error 7 | Close() error 8 | } 9 | -------------------------------------------------------------------------------- /adapter/pkg/export/stdout/types.go: -------------------------------------------------------------------------------- 1 | package stdout 2 | 3 | import ( 4 | "adapter/pkg/common" 5 | "adapter/pkg/export" 6 | "fmt" 7 | 8 | jsoniter "github.com/json-iterator/go" 9 | ) 10 | 11 | const ( 12 | Status = "status" 13 | StartsAt = "startsAt" 14 | EndsAt = "endsAt" 15 | NotificationTime = "notificationTime" 16 | RunbookURL = "runbook_url" 17 | Message = "message" 18 | Summary = "summary" 19 | SummaryCn = "summaryCn" 20 | ) 21 | 22 | type exporter struct { 23 | } 24 | 25 | func NewExporter() export.Exporter { 26 | 27 | return &exporter{} 28 | } 29 | 30 | func (e *exporter) Export(alerts []*common.Alert) error { 31 | 32 | for _, alert := range alerts { 33 | fmt.Println(alertToString(alert)) 34 | } 35 | 36 | return nil 37 | } 38 | 39 | func (e *exporter) Close() error { 40 | return nil 41 | } 42 | 43 | func alertToString(a *common.Alert) string { 44 | 45 | m := make(map[string]interface{}) 46 | 47 | m[Status] = a.Status 48 | m[StartsAt] = a.StartsAt 49 | m[EndsAt] = a.EndsAt 50 | m[NotificationTime] = a.NotificationTime 51 | 52 | for k, v := range a.Labels { 53 | m[k] = v 54 | } 55 | 56 | for k, v := range a.Annotations { 57 | if k != RunbookURL && k != Message && k != Summary && k != SummaryCn { 58 | m[k] = v 59 | } 60 | } 61 | 62 | message := a.Annotations[Message] 63 | if message == "" { 64 | message = a.Annotations[Summary] 65 | if message == "" { 66 | message = a.Annotations[SummaryCn] 67 | } 68 | } 69 | m[Message] = message 70 | 71 | bs, err := jsoniter.Marshal(m) 72 | if err != nil { 73 | fmt.Println(err) 74 | return "" 75 | } 76 | 77 | return string(bs) 78 | } 79 | -------------------------------------------------------------------------------- /adapter/test/samples/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 2 | # Use of this source code is governed by a Apache license 3 | # that can be found in the LICENSE file. 4 | 5 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 6 | # Use of this source code is governed by a Apache license 7 | # that can be found in the LICENSE file. 8 | 9 | FROM golang:1.13 as socket-server 10 | 11 | COPY / / 12 | WORKDIR / 13 | ENV GOPROXY=https://goproxy.io 14 | RUN CGO_ENABLED=0 GO111MODULE=on go build -i -ldflags '-w -s' -o socket-server main.go 15 | 16 | FROM alpine:3.9 17 | 18 | COPY --from=socket-server /socket-server /usr/local/bin/ 19 | 20 | RUN apk add --update ca-certificates && update-ca-certificates 21 | RUN apk add curl 22 | RUN adduser -D -g kubesphere -u 1002 kubesphere 23 | RUN chown -R kubesphere:kubesphere /usr/local/bin/socket-server 24 | RUN apk add libcap 25 | RUN setcap 'CAP_NET_BIND_SERVICE=+ep' /usr/local/bin/socket-server 26 | 27 | USER kubesphere 28 | CMD ["sh"] 29 | -------------------------------------------------------------------------------- /adapter/test/samples/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2020 The KubeSphere Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "bytes" 21 | "flag" 22 | "fmt" 23 | "github.com/golang/glog" 24 | "github.com/spf13/cobra" 25 | "github.com/spf13/pflag" 26 | "golang.org/x/text/encoding/simplifiedchinese" 27 | "golang.org/x/text/transform" 28 | "io/ioutil" 29 | "log" 30 | "net" 31 | ) 32 | 33 | var ( 34 | port int 35 | ) 36 | 37 | func AddFlags(fs *pflag.FlagSet) { 38 | fs.IntVar(&port, "port", 8080, "Socket port") 39 | } 40 | 41 | func main() { 42 | cmd := newServerCommand() 43 | 44 | if err := cmd.Execute(); err != nil { 45 | log.Fatalln(err) 46 | } 47 | } 48 | 49 | func newServerCommand() *cobra.Command { 50 | cmd := &cobra.Command{ 51 | Use: "notification-adapter", 52 | Long: `The webhook to receive alert from notification manager, and send to socket`, 53 | RunE: func(cmd *cobra.Command, args []string) error { 54 | return Run() 55 | }, 56 | } 57 | AddFlags(cmd.Flags()) 58 | cmd.Flags().AddGoFlagSet(flag.CommandLine) 59 | 60 | return cmd 61 | } 62 | 63 | func Run() error { 64 | 65 | pflag.VisitAll(func(flag *pflag.Flag) { 66 | glog.Errorf("FLAG: --%s=%q", flag.Name, flag.Value) 67 | }) 68 | 69 | l, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%d", port)) 70 | if err != nil { 71 | return err 72 | } 73 | 74 | for { 75 | conn, err := l.Accept() 76 | if err != nil { 77 | fmt.Printf("accept error, %s\n", err.Error()) 78 | continue 79 | } 80 | 81 | go func() { 82 | bs, err := ioutil.ReadAll(conn) 83 | if err != nil { 84 | fmt.Printf("read error, %s\n", err.Error()) 85 | return 86 | } 87 | 88 | reader := transform.NewReader(bytes.NewReader(bs), simplifiedchinese.GBK.NewDecoder()) 89 | d, err := ioutil.ReadAll(reader) 90 | if err != nil { 91 | fmt.Printf("transform error, %s\n", err.Error()) 92 | return 93 | } 94 | 95 | fmt.Println(string(d)) 96 | }() 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /adapter/test/samples/socket.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: socket-server 5 | namespace: kubesphere-monitoring-system 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | name: socket-server 11 | template: 12 | metadata: 13 | labels: 14 | name: socket-server 15 | spec: 16 | containers: 17 | - name: socket-server 18 | image: kubespheredev/socket-server:latest 19 | command: 20 | - socket-server 21 | imagePullPolicy: Always 22 | env: 23 | - name: NAMESPACE 24 | valueFrom: 25 | fieldRef: 26 | apiVersion: v1 27 | fieldPath: metadata.namespace 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | name: socket-server 33 | namespace: kubesphere-monitoring-system 34 | spec: 35 | selector: 36 | name: socket-server 37 | ports: 38 | - name: http 39 | port: 8080 40 | targetPort: 8080 41 | -------------------------------------------------------------------------------- /adapter/test/send_alerts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | curl -XPOST -H 'Content-type':'application/json' -d @alert.json http://127.0.0.1:8080/alerts 3 | -------------------------------------------------------------------------------- /apis/v2beta2/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v2beta2 contains API Schema definitions for the notification v1alpha1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=notification.kubesphere.io 20 | package v2beta2 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "notification.kubesphere.io", Version: "v2beta2"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /apis/v2beta2/router_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v2beta2 18 | 19 | import ( 20 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 21 | ) 22 | 23 | type Channel struct { 24 | Tenant string `json:"tenant"` 25 | // Receiver type, known values are dingtalk, email, slack, sms, pushover, webhook, wechat. 26 | Type []string `json:"type,omitempty"` 27 | } 28 | 29 | type ReceiverSelector struct { 30 | Name []string `json:"name,omitempty"` 31 | RegexName string `json:"regexName,omitempty"` 32 | Selector *LabelSelector `json:"selector,omitempty"` 33 | Channels []Channel `json:"channels,omitempty"` 34 | // Receiver type, known values are dingtalk, email, slack, sms, pushover, webhook, wechat. 35 | Type string `json:"type,omitempty"` 36 | } 37 | 38 | // RouterSpec defines the desired state of Router 39 | type RouterSpec struct { 40 | // whether the router is enabled 41 | Enabled *bool `json:"enabled,omitempty"` 42 | AlertSelector *LabelSelector `json:"alertSelector"` 43 | // Receivers which need to receive the matched alert. 44 | Receivers ReceiverSelector `json:"receivers"` 45 | } 46 | 47 | // RouterStatus defines the observed state of Router 48 | type RouterStatus struct { 49 | } 50 | 51 | // +kubebuilder:object:root=true 52 | // +kubebuilder:resource:scope=Cluster,categories=notification-manager 53 | // +kubebuilder:subresource:status 54 | // +kubebuilder:storageversion 55 | 56 | // Router is the Schema for the router API 57 | type Router struct { 58 | metav1.TypeMeta `json:",inline"` 59 | metav1.ObjectMeta `json:"metadata,omitempty"` 60 | 61 | Spec RouterSpec `json:"spec,omitempty"` 62 | Status RouterStatus `json:"status,omitempty"` 63 | } 64 | 65 | // +kubebuilder:object:root=true 66 | 67 | // RouterList contains a list of Router 68 | type RouterList struct { 69 | metav1.TypeMeta `json:",inline"` 70 | metav1.ListMeta `json:"metadata,omitempty"` 71 | Items []Router `json:"items"` 72 | } 73 | 74 | func init() { 75 | SchemeBuilder.Register(&Router{}, &RouterList{}) 76 | } 77 | -------------------------------------------------------------------------------- /apis/v2beta2/router_webhook.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v2beta2 18 | 19 | import ( 20 | "regexp" 21 | 22 | "k8s.io/apimachinery/pkg/api/errors" 23 | "k8s.io/apimachinery/pkg/runtime" 24 | "k8s.io/apimachinery/pkg/runtime/schema" 25 | "k8s.io/apimachinery/pkg/util/validation/field" 26 | ctrl "sigs.k8s.io/controller-runtime" 27 | "sigs.k8s.io/controller-runtime/pkg/webhook" 28 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 29 | ) 30 | 31 | func (r *Router) SetupWebhookWithManager(mgr ctrl.Manager) error { 32 | return ctrl.NewWebhookManagedBy(mgr). 33 | For(r). 34 | Complete() 35 | } 36 | 37 | // +kubebuilder:webhook:verbs=create;update,mutating=false,failurePolicy=fail,groups=notification.kubesphere.io,resources=routers,versions=v2beta2 38 | var _ webhook.Validator = &Router{} 39 | 40 | // ValidateCreate implements webhook.Validator so a webhook will be registered for the type 41 | func (r *Router) ValidateCreate() (warnings admission.Warnings, err error) { 42 | 43 | return r.validateRouter() 44 | } 45 | 46 | // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type 47 | func (r *Router) ValidateUpdate(_ runtime.Object) (warnings admission.Warnings, err error) { 48 | return r.validateRouter() 49 | } 50 | 51 | // ValidateDelete implements webhook.Validator so a webhook will be registered for the type 52 | func (r *Router) ValidateDelete() (warnings admission.Warnings, err error) { 53 | return admission.Warnings{}, nil 54 | } 55 | 56 | func (r *Router) validateRouter() (warnings admission.Warnings, err error) { 57 | var allErrs field.ErrorList 58 | 59 | if err := validateSelector(r.Spec.AlertSelector); err != nil { 60 | allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "alertSelector"), r.Spec.AlertSelector, err.Error())) 61 | } 62 | 63 | if err := validateSelector(r.Spec.Receivers.Selector); err != nil { 64 | allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "receivers", "alertSelector"), r.Spec.Receivers.Selector, err.Error())) 65 | } 66 | 67 | if r.Spec.Receivers.RegexName != "" { 68 | if _, err := regexp.Compile(r.Spec.Receivers.RegexName); err != nil { 69 | allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "receivers", "regexName"), r.Spec.Receivers.RegexName, err.Error())) 70 | } 71 | } 72 | 73 | if allErrs == nil || len(allErrs) == 0 { 74 | return admission.Warnings{}, nil 75 | } 76 | 77 | return admission.Warnings{}, errors.NewInvalid( 78 | schema.GroupKind{Group: "notification.kubesphere.io", Kind: "Receiver"}, 79 | r.Name, allErrs) 80 | } 81 | -------------------------------------------------------------------------------- /apis/v2beta2/silence_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v2beta2 18 | 19 | import ( 20 | "time" 21 | 22 | "github.com/kubesphere/notification-manager/pkg/utils" 23 | "github.com/robfig/cron/v3" 24 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 25 | ) 26 | 27 | // SilenceSpec defines the desired state of Silence 28 | type SilenceSpec struct { 29 | // whether the silence is enabled 30 | Enabled *bool `json:"enabled,omitempty"` 31 | Matcher *LabelSelector `json:"matcher"` 32 | // The start time during which the silence is active. 33 | // 34 | // +kubebuilder:validation:Format: date-time 35 | StartsAt *metav1.Time `json:"startsAt,omitempty"` 36 | // The schedule in Cron format. 37 | // If set the silence will be active periodicity, and the startsAt will be invalid. 38 | Schedule string `json:"schedule,omitempty"` 39 | // The time range during which the silence is active. 40 | // If not set, the silence will be active ever. 41 | Duration *metav1.Duration `json:"duration,omitempty"` 42 | } 43 | 44 | // SilenceStatus defines the observed state of Silence 45 | type SilenceStatus struct { 46 | } 47 | 48 | // +kubebuilder:object:root=true 49 | // +kubebuilder:resource:scope=Cluster,categories=notification-manager 50 | // +kubebuilder:subresource:status 51 | // +kubebuilder:storageversion 52 | 53 | // Silence is the Schema for the Silence API 54 | type Silence struct { 55 | metav1.TypeMeta `json:",inline"` 56 | metav1.ObjectMeta `json:"metadata,omitempty"` 57 | 58 | Spec SilenceSpec `json:"spec,omitempty"` 59 | Status SilenceStatus `json:"status,omitempty"` 60 | } 61 | 62 | // +kubebuilder:object:root=true 63 | 64 | // SilenceList contains a list of Silence 65 | type SilenceList struct { 66 | metav1.TypeMeta `json:",inline"` 67 | metav1.ListMeta `json:"metadata,omitempty"` 68 | Items []Silence `json:"items"` 69 | } 70 | 71 | func init() { 72 | SchemeBuilder.Register(&Silence{}, &SilenceList{}) 73 | } 74 | 75 | func (s *Silence) IsActive() bool { 76 | 77 | if s.Spec.Enabled != nil && !*s.Spec.Enabled { 78 | return false 79 | } 80 | 81 | if !utils.StringIsNil(s.Spec.Schedule) { 82 | 83 | if s.Spec.Duration == nil { 84 | return true 85 | } 86 | 87 | schedule, _ := cron.ParseStandard(s.Spec.Schedule) 88 | if schedule.Next(time.Now()) == schedule.Next(time.Now().Add(-(*s.Spec.Duration).Duration)) { 89 | return false 90 | } else { 91 | return true 92 | } 93 | } else if s.Spec.StartsAt != nil { 94 | if s.Spec.StartsAt.After(time.Now()) { 95 | return false 96 | } 97 | 98 | if s.Spec.Duration == nil { 99 | return true 100 | } 101 | 102 | if s.Spec.StartsAt.Add((*s.Spec.Duration).Duration).After(time.Now()) { 103 | return true 104 | } 105 | 106 | return false 107 | } else { 108 | return true 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /apis/v2beta2/silence_webhook.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v2beta2 18 | 19 | import ( 20 | "github.com/robfig/cron/v3" 21 | "k8s.io/apimachinery/pkg/api/errors" 22 | "k8s.io/apimachinery/pkg/runtime" 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "k8s.io/apimachinery/pkg/util/validation/field" 25 | ctrl "sigs.k8s.io/controller-runtime" 26 | "sigs.k8s.io/controller-runtime/pkg/webhook" 27 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 28 | ) 29 | 30 | func (r *Silence) SetupWebhookWithManager(mgr ctrl.Manager) error { 31 | return ctrl.NewWebhookManagedBy(mgr). 32 | For(r). 33 | Complete() 34 | } 35 | 36 | // +kubebuilder:webhook:verbs=create;update,mutating=false,failurePolicy=fail,groups=notification.kubesphere.io,resources=silences,versions=v2beta2 37 | var _ webhook.Validator = &Silence{} 38 | 39 | // ValidateCreate implements webhook.Validator so a webhook will be registered for the type 40 | func (s *Silence) ValidateCreate() (warnings admission.Warnings, err error) { 41 | 42 | return s.validateSilence() 43 | } 44 | 45 | // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type 46 | func (s *Silence) ValidateUpdate(_ runtime.Object) (warnings admission.Warnings, err error) { 47 | return s.validateSilence() 48 | } 49 | 50 | // ValidateDelete implements webhook.Validator so a webhook will be registered for the type 51 | func (s *Silence) ValidateDelete() (warnings admission.Warnings, err error) { 52 | return admission.Warnings{}, nil 53 | } 54 | 55 | func (s *Silence) validateSilence() (warnings admission.Warnings, err error) { 56 | var allErrs field.ErrorList 57 | 58 | if err := validateSelector(s.Spec.Matcher); err != nil { 59 | allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "matcher"), s.Spec.Matcher, err.Error())) 60 | } 61 | 62 | if s.Spec.Schedule != "" { 63 | if _, err := cron.ParseStandard(s.Spec.Schedule); err != nil { 64 | allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "schedule"), s.Spec.Schedule, err.Error())) 65 | } 66 | } 67 | 68 | if allErrs == nil || len(allErrs) == 0 { 69 | return admission.Warnings{}, nil 70 | } 71 | 72 | return admission.Warnings{}, errors.NewInvalid( 73 | schema.GroupKind{Group: "notification.kubesphere.io", Kind: "Receiver"}, 74 | s.Name, allErrs) 75 | } 76 | -------------------------------------------------------------------------------- /cmd/notification-manager/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GOPROXY="https://goproxy.io" 2 | # Build the manager binary 3 | FROM golang:1.20 as builder 4 | ARG GOPROXY 5 | 6 | WORKDIR /workspace 7 | # Copy the Go Modules manifests 8 | COPY go.mod go.mod 9 | COPY go.sum go.sum 10 | 11 | # cache deps before building and copying source so that we don't need to re-download as much 12 | # and so that source changes don't invalidate our downloaded layer 13 | RUN GOPROXY=$GOPROXY go mod download 14 | 15 | # Copy the go source 16 | COPY cmd/notification-manager/main.go main.go 17 | COPY pkg/ pkg/ 18 | COPY apis/ apis/ 19 | 20 | # Build 21 | RUN CGO_ENABLED=0 GO111MODULE=on go build -a -o notification-manager main.go 22 | 23 | # Use distroless as minimal base image to package the manager binary 24 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 25 | FROM kubesphere/distroless-static:nonroot 26 | WORKDIR / 27 | COPY --from=builder /workspace/notification-manager . 28 | USER nonroot:nonroot 29 | 30 | ENTRYPOINT ["/notification-manager"] 31 | -------------------------------------------------------------------------------- /cmd/operator/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GOPROXY="https://goproxy.io" 2 | # Build the manager binary 3 | FROM golang:1.20 as builder 4 | ARG GOPROXY 5 | 6 | WORKDIR /workspace 7 | # Copy the Go Modules manifests 8 | COPY go.mod go.mod 9 | COPY go.sum go.sum 10 | 11 | # cache deps before building and copying source so that we don't need to re-download as much 12 | # and so that source changes don't invalidate our downloaded layer 13 | RUN GOPROXY=$GOPROXY go mod download 14 | 15 | # Copy the go source 16 | COPY cmd/operator/main.go main.go 17 | COPY controllers/ controllers/ 18 | COPY pkg/ pkg/ 19 | COPY apis/ apis/ 20 | 21 | # Build 22 | RUN GOPROXY=$GOPROXY CGO_ENABLED=0 GO111MODULE=on go build -a -o notification-manager-operator main.go 23 | 24 | # Use distroless as minimal base image to package the manager binary 25 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 26 | FROM kubesphere/distroless-static:nonroot 27 | WORKDIR / 28 | COPY --from=builder /workspace/notification-manager-operator . 29 | USER nonroot:nonroot 30 | 31 | ENTRYPOINT ["/notification-manager-operator"] 32 | -------------------------------------------------------------------------------- /config/cert/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - webhook-server-cert.yaml 5 | 6 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | # WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for 4 | # breaking changes 5 | apiVersion: cert-manager.io/v1alpha2 6 | kind: Issuer 7 | metadata: 8 | name: selfsigned-issuer 9 | namespace: system 10 | spec: 11 | selfSigned: {} 12 | --- 13 | apiVersion: cert-manager.io/v1alpha2 14 | kind: Certificate 15 | metadata: 16 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 17 | namespace: system 18 | spec: 19 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 20 | dnsNames: 21 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 22 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 23 | issuerRef: 24 | kind: Issuer 25 | name: selfsigned-issuer 26 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 27 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /config/ci/alerts.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Default", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "PullRequestNotification", 9 | "container": "pull-request-notification", 10 | "namespace": "kubsphere-monitoring-system", 11 | "pod": "pull-request-notification-67f5689d7-sqg89", 12 | "prometheus": "kubesphere-monitoring-system/k8s", 13 | "severity": "warning" 14 | }, 15 | "annotations": { 16 | "message": "Congratulations! Pull request notification triggered, your pr has passed.", 17 | "runbook_url": "" 18 | }, 19 | "startsAt": "2021-10-21T05:45:44.782098546Z", 20 | "endsAt": "0001-01-01T00:00:00Z", 21 | "generatorURL": "", 22 | "fingerprint": "83fb3d34d52108b0" 23 | } 24 | ], 25 | "groupLabels": { 26 | "alertname": "PullRequestNotification", 27 | "namespace": "kubesphere-monitoring-system" 28 | }, 29 | "commonLabels": { 30 | "alertname": "PullRequestNotification", 31 | "namespace": "kubesphere-monitoring-system", 32 | "prometheus": "kubesphere-monitoring-system/k8s", 33 | "severity": "warning" 34 | }, 35 | "commonAnnotations": { 36 | "runbook_url": "" 37 | }, 38 | "externalURL": "http://alertmanager-main-1:9093" 39 | } -------------------------------------------------------------------------------- /config/ci/slack-pr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: notification.kubesphere.io/v2beta2 2 | kind: Config 3 | metadata: 4 | name: default-slack-config 5 | labels: 6 | app: notification-manager 7 | type: default 8 | spec: 9 | slack: 10 | slackTokenSecret: 11 | valueFrom: 12 | secretKeyRef: 13 | namespace: kubesphere-monitoring-system 14 | key: token 15 | name: slack-token-secret 16 | --- 17 | apiVersion: notification.kubesphere.io/v2beta2 18 | kind: Receiver 19 | metadata: 20 | name: global-slack-receiver 21 | labels: 22 | app: notification-manager 23 | type: global 24 | spec: 25 | slack: 26 | channels: 27 | - alert-manager 28 | --- 29 | apiVersion: v1 30 | data: 31 | token: SLACK_SECRET 32 | kind: Secret 33 | metadata: 34 | labels: 35 | app: notification-manager 36 | name: slack-token-secret 37 | namespace: kubesphere-monitoring-system 38 | type: Opaque -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/notification.kubesphere.io_notificationmanagers.yaml 6 | - bases/notification.kubesphere.io_configs.yaml 7 | - bases/notification.kubesphere.io_receivers.yaml 8 | - bases/notification.kubesphere.io_silences.yaml 9 | - bases/notification.kubesphere.io_routers.yaml 10 | # +kubebuilder:scaffold:crdkustomizeresource 11 | 12 | patchesStrategicMerge: 13 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 14 | # patches here are for enabling the conversion webhook for each CRD 15 | #- patches/webhook_in_notificationmanagers.yaml 16 | #- patches/webhook_in_configs.yaml 17 | #- patches/webhook_in_receivers.yaml 18 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 19 | 20 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 21 | # patches here are for enabling the CA injection for each CRD 22 | #- patches/cainjection_in_notificationmanagers.yaml 23 | #- patches/cainjection_in_configs.yaml 24 | #- patches/cainjection_in_receivers.yaml 25 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 26 | 27 | # the following config is for teaching kustomize how to do kustomization for CRDs. 28 | configurations: 29 | - kustomizeconfig.yaml 30 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | group: apiextensions.k8s.io 8 | path: spec/conversion/webhook/clientConfig/service/name 9 | 10 | namespace: 11 | - kind: CustomResourceDefinition 12 | group: apiextensions.k8s.io 13 | path: spec/conversion/webhook/clientConfig/service/namespace 14 | create: false 15 | 16 | varReference: 17 | - path: metadata/annotations 18 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_configs.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: configs.notification.kubesphere.io 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_notificationmanagers.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: notificationmanagers.notification.kubesphere.io 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_receivers.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: receivers.notification.kubesphere.io 9 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_configs.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: configs.notification.kubesphere.io 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhook: 11 | clientConfig: 12 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 13 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 14 | caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURXekNDQWtPZ0F3SUJBZ0lVT3hOb1NwMTlmNS9CNllvZGlRek53MFdJWGhrd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1BERUxNQWtHQTFVRUJoTUNRMDR4Q3pBSkJnTlZCQWdNQWtoQ01Rc3dDUVlEVlFRS0RBSlJRekVUTUJFRwpBMVVFQXd3S2QyVmlhRzl2YXkxallUQWdGdzB5TVRBM01qY3dPREV5TXpaYUdBOHlNVEl4TURjd016QTRNVEl6Ck5sb3dQREVMTUFrR0ExVUVCaE1DUTA0eEN6QUpCZ05WQkFnTUFraENNUXN3Q1FZRFZRUUtEQUpSUXpFVE1CRUcKQTFVRUF3d0tkMlZpYUc5dmF5MWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQgpBTlpzblhIZ21meFJYL2MvQy95S0QzY3hMaGdpSzZ5MkphMlh4OUtYeWRPUjNLSStVSzZ2dXM2V1YzTGl0eTZDCmtPVFlScjV6ZlV3aXZZYUMydHVGRnhTUE80L085dHhFVlBha1UwUGo1N0tVRDBiUnJZWEpDY1V5Ri9TZUlCY0EKMlFmbDZEem0rWjd4NHM1TnE1NFMvUUhpYzJFclVVbHEwbmd3MFQ3UVRieDB4M2Ria0ZNRko0VjlLSjVZdkhOSwpLeWdwR2szb2RpUWZ2Yi81b2hjUUhkTXpQV0Rmd25GTERHZjFUWGFHK0VYeDZodmVoK0RXV2grQzA5ZlI5R05yCmhzNnlZaUU1cmVmY29EUlhrRGVCSkZ3eWtPaVErRE5Fc0RaSU40VHlHTkhmeTRYaUp3QWgxNXBsZTkzQWNTTVQKTlEwNWRYK2FiQmg0djQ4NDRab1lUS2tDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRko5NzNRUUVSVi9DVlRCZQovbmFyQmhZcS9GV0pNQjhHQTFVZEl3UVlNQmFBRko5NzNRUUVSVi9DVlRCZS9uYXJCaFlxL0ZXSk1BOEdBMVVkCkV3RUIvd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQk9ZOWNDbFpTYm1scndFN0YvZVBMVm4Kenl2dW0yUUE3MU9rcGRtWjN6cnV1MW16VmZNNU1ORndkUkJMOGduS05IdjEzaGhFeGQ4enJmQ2hYQWIzaWl5aQpSZnBSTzJodDBWSi9HQklaYlM1ZjIvZ1hvNXpSRHk0cFV0ekozOWZUZG9pNzQxNlhJdU9ubHI0bDk3ZnlRRTI4Cno0NlAzYlhidlZKU1VEcytFL1g0NVNHVS8xdFNCaFNnaTg1NllVWGxybWJNMDlHN29kSmx4VGozTG1qQ1NlQWUKWG9lYWlTUEl6UlMrWjVUQ0tudDdvbjVBSGQydzdrT0Q1K2tTK3gvbWY4aU1iaVV5dCtXYWVzdjNIcHg1TmwvWgprb1dxVVc1cXZlQ0poQUM0SVc5V3hmc2JWRXlZSTR0Zm9SNzZoWVhCdVVReE9BVHlCdjNJMmZYUUkvU2ZrdW89Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K 15 | service: 16 | namespace: system 17 | name: notification-manager-webhook 18 | path: /convert 19 | conversionReviewVersions: 20 | - v1 21 | - v1beta1 22 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_notificationmanagers.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: notificationmanagers.notification.kubesphere.io 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_receivers.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: receivers.notification.kubesphere.io 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhook: 11 | clientConfig: 12 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 13 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 14 | caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURXekNDQWtPZ0F3SUJBZ0lVT3hOb1NwMTlmNS9CNllvZGlRek53MFdJWGhrd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1BERUxNQWtHQTFVRUJoTUNRMDR4Q3pBSkJnTlZCQWdNQWtoQ01Rc3dDUVlEVlFRS0RBSlJRekVUTUJFRwpBMVVFQXd3S2QyVmlhRzl2YXkxallUQWdGdzB5TVRBM01qY3dPREV5TXpaYUdBOHlNVEl4TURjd016QTRNVEl6Ck5sb3dQREVMTUFrR0ExVUVCaE1DUTA0eEN6QUpCZ05WQkFnTUFraENNUXN3Q1FZRFZRUUtEQUpSUXpFVE1CRUcKQTFVRUF3d0tkMlZpYUc5dmF5MWpZVENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQgpBTlpzblhIZ21meFJYL2MvQy95S0QzY3hMaGdpSzZ5MkphMlh4OUtYeWRPUjNLSStVSzZ2dXM2V1YzTGl0eTZDCmtPVFlScjV6ZlV3aXZZYUMydHVGRnhTUE80L085dHhFVlBha1UwUGo1N0tVRDBiUnJZWEpDY1V5Ri9TZUlCY0EKMlFmbDZEem0rWjd4NHM1TnE1NFMvUUhpYzJFclVVbHEwbmd3MFQ3UVRieDB4M2Ria0ZNRko0VjlLSjVZdkhOSwpLeWdwR2szb2RpUWZ2Yi81b2hjUUhkTXpQV0Rmd25GTERHZjFUWGFHK0VYeDZodmVoK0RXV2grQzA5ZlI5R05yCmhzNnlZaUU1cmVmY29EUlhrRGVCSkZ3eWtPaVErRE5Fc0RaSU40VHlHTkhmeTRYaUp3QWgxNXBsZTkzQWNTTVQKTlEwNWRYK2FiQmg0djQ4NDRab1lUS2tDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRko5NzNRUUVSVi9DVlRCZQovbmFyQmhZcS9GV0pNQjhHQTFVZEl3UVlNQmFBRko5NzNRUUVSVi9DVlRCZS9uYXJCaFlxL0ZXSk1BOEdBMVVkCkV3RUIvd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBQk9ZOWNDbFpTYm1scndFN0YvZVBMVm4Kenl2dW0yUUE3MU9rcGRtWjN6cnV1MW16VmZNNU1ORndkUkJMOGduS05IdjEzaGhFeGQ4enJmQ2hYQWIzaWl5aQpSZnBSTzJodDBWSi9HQklaYlM1ZjIvZ1hvNXpSRHk0cFV0ekozOWZUZG9pNzQxNlhJdU9ubHI0bDk3ZnlRRTI4Cno0NlAzYlhidlZKU1VEcytFL1g0NVNHVS8xdFNCaFNnaTg1NllVWGxybWJNMDlHN29kSmx4VGozTG1qQ1NlQWUKWG9lYWlTUEl6UlMrWjVUQ0tudDdvbjVBSGQydzdrT0Q1K2tTK3gvbWY4aU1iaVV5dCtXYWVzdjNIcHg1TmwvWgprb1dxVVc1cXZlQ0poQUM0SVc5V3hmc2JWRXlZSTR0Zm9SNzZoWVhCdVVReE9BVHlCdjNJMmZYUUkvU2ZrdW89Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K 15 | service: 16 | namespace: system 17 | name: notification-manager-webhook 18 | path: /convert 19 | conversionReviewVersions: 20 | - v1 21 | - v1beta1 22 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | # Adds namespace to all resources. 5 | namespace: kubesphere-monitoring-system 6 | 7 | # Value of this field is prepended to the 8 | # names of all resources, e.g. a deployment named 9 | # "wordpress" becomes "alices-wordpress". 10 | # Note that it should also match with the prefix (text before '-') of the namespace 11 | # field above. 12 | namePrefix: notification-manager- 13 | 14 | # Labels to add to all resources and selectors. 15 | #commonLabels: 16 | # someName: someValue 17 | 18 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 19 | # crd/kustomization.yaml 20 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 21 | #- ../certmanager 22 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 23 | #- ../prometheus 24 | 25 | # Protect the /metrics endpoint by putting it behind auth. 26 | # If you want your controller-manager to expose the /metrics 27 | # endpoint w/o any authn/z, please comment the following line. 28 | 29 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 30 | # crd/kustomization.yaml 31 | 32 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 33 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 34 | # 'CERTMANAGER' needs to be enabled to use ca injection 35 | #- webhookcainjection_patch.yaml 36 | 37 | resources: 38 | - ../crd 39 | - ../rbac 40 | - ../manager 41 | - ../webhook 42 | - ../cert 43 | patches: 44 | - path: manager_auth_proxy_patch.yaml 45 | - path: manager_webhook_patch.yaml 46 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: operator 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: kubesphere/kube-rbac-proxy:v0.11.0 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--v=10" 19 | ports: 20 | - containerPort: 8443 21 | name: https 22 | - name: notification-manager-operator 23 | args: 24 | - "--metrics-addr=127.0.0.1:8080" 25 | - "--enable-leader-election" 26 | -------------------------------------------------------------------------------- /config/default/manager_webhook_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: operator 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: notification-manager-operator 11 | ports: 12 | - containerPort: 9443 13 | name: webhook-server 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp/k8s-webhook-server/serving-certs 17 | name: cert 18 | readOnly: true 19 | volumes: 20 | - name: cert 21 | secret: 22 | defaultMode: 420 23 | secretName: webhook-server-cert 24 | -------------------------------------------------------------------------------- /config/default/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch add annotation to admission webhook config and 2 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. 3 | apiVersion: admissionregistration.k8s.io/v1beta1 4 | kind: MutatingWebhookConfiguration 5 | metadata: 6 | name: mutating-webhook-configuration 7 | annotations: 8 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 9 | --- 10 | apiVersion: admissionregistration.k8s.io/v1beta1 11 | kind: ValidatingWebhookConfiguration 12 | metadata: 13 | name: validating-webhook-configuration 14 | annotations: 15 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 16 | -------------------------------------------------------------------------------- /config/helm/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | # Adds namespace to all resources. 4 | namespace: kubesphere-monitoring-system 5 | 6 | # Value of this field is prepended to the 7 | # names of all resources, e.g. a deployment named 8 | # "wordpress" becomes "alices-wordpress". 9 | # Note that it should also match with the prefix (text before '-') of the namespace 10 | # field above. 11 | namePrefix: notification-manager- 12 | 13 | # Labels to add to all resources and selectors. 14 | #commonLabels: 15 | # someName: someValue 16 | 17 | resources: 18 | - ../crd 19 | -------------------------------------------------------------------------------- /config/i18n/zh-cn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | zh-cn: | 4 | - name: zh-cn 5 | dictionary: 6 | alert: "告警" 7 | alerts: "告警" 8 | firing: "触发中" 9 | resolved: "已解决" 10 | alertname: "告警名称" 11 | alerttype: "告警类型" 12 | cluster: "集群" 13 | namespace: "项目" 14 | severity: "告警级别" 15 | container: "容器" 16 | pod: "容器组" 17 | service: "服务" 18 | deployment: "部署" 19 | job: "任务" 20 | daemonset: "守护进程集" 21 | statefulset: "有状态副本集" 22 | instance: "实例" 23 | resource: "资源" 24 | user: "用户" 25 | verb: "操作" 26 | group: "用户组" 27 | requestReceivedTimestamp: "请求接收时间" 28 | role: "角色" 29 | host_ip: "主机IP" 30 | node: "节点" 31 | rule_id: "告警规则" 32 | owner_kind: "目标类型" 33 | workload: "工作负载" 34 | rule_group: "规则组" 35 | rule_level: "规则级别" 36 | name: "名称" 37 | kind: ConfigMap 38 | metadata: 39 | name: zh-cn 40 | namespace: kubesphere-monitoring-system 41 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | apiVersion: kustomize.config.k8s.io/v1beta1 4 | kind: Kustomization 5 | images: 6 | - name: controller 7 | newName: kubesphere/notification-manager-operator 8 | newTag: latest 9 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: operator 5 | namespace: system 6 | labels: 7 | control-plane: controller-manager 8 | spec: 9 | selector: 10 | matchLabels: 11 | control-plane: controller-manager 12 | replicas: 1 13 | template: 14 | metadata: 15 | labels: 16 | control-plane: controller-manager 17 | spec: 18 | serviceAccountName: notification-manager-sa 19 | containers: 20 | - command: 21 | - /notification-manager-operator 22 | args: 23 | - --enable-leader-election 24 | image: controller:latest 25 | imagePullPolicy: Always 26 | name: notification-manager-operator 27 | resources: 28 | limits: 29 | cpu: 100m 30 | memory: 30Mi 31 | requests: 32 | cpu: 100m 33 | memory: 20Mi 34 | env: 35 | - name: NAMESPACE 36 | valueFrom: 37 | fieldRef: 38 | apiVersion: v1 39 | fieldPath: metadata.namespace 40 | terminationGracePeriodSeconds: 10 41 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-metrics 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | selector: 15 | matchLabels: 16 | control-plane: controller-manager 17 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: ["/metrics"] 7 | verbs: ["get"] 8 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: 8 | - tokenreviews 9 | verbs: ["create"] 10 | - apiGroups: ["authorization.k8s.io"] 11 | resources: 12 | - subjectaccessreviews 13 | verbs: ["create"] 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: sa 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: controller-metrics 7 | namespace: system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | targetPort: https 13 | selector: 14 | control-plane: controller-manager 15 | -------------------------------------------------------------------------------- /config/rbac/config_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit emailconfigs. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: config-editor-role 6 | rules: 7 | - apiGroups: 8 | - notification.kubesphere.io 9 | resources: 10 | - configs 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - notification.kubesphere.io 21 | resources: 22 | - configs/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/config_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view emailconfigs. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: config-viewer-role 6 | rules: 7 | - apiGroups: 8 | - notification.kubesphere.io 9 | resources: 10 | - configs 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - notification.kubesphere.io 17 | resources: 18 | - configs/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | # Comment the following 4 lines if you want to disable 5 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 6 | # which protects your /metrics endpoint. 7 | resources: 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | - service_account.yaml 13 | - auth_proxy_service.yaml 14 | - auth_proxy_role.yaml 15 | - auth_proxy_role_binding.yaml 16 | - auth_proxy_client_clusterrole.yaml 17 | 18 | # This is for tenant sidecar in kubesphere, if the notification manager not run in the kubesphere,comment out it. 19 | patches: 20 | - path: patches/patch.yaml 21 | target: 22 | group: rbac.authorization.k8s.io 23 | kind: ClusterRole 24 | name: controller-role 25 | version: v1 26 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - configmaps/status 23 | verbs: 24 | - get 25 | - update 26 | - patch 27 | - apiGroups: 28 | - coordination.k8s.io 29 | resources: 30 | - leases 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - create 36 | - update 37 | - patch 38 | - delete 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - events 43 | verbs: 44 | - create 45 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: sa 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/notificationmanager_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit notificationmanagers. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: notificationmanager-editor-role 6 | rules: 7 | - apiGroups: 8 | - notification.kubesphere.io 9 | resources: 10 | - notificationmanagers 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - notification.kubesphere.io 21 | resources: 22 | - notificationmanagers/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/notificationmanager_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view notificationmanagers. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: notificationmanager-viewer-role 6 | rules: 7 | - apiGroups: 8 | - notification.kubesphere.io 9 | resources: 10 | - notificationmanagers 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - notification.kubesphere.io 17 | resources: 18 | - notificationmanagers/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/patches/patch.yaml: -------------------------------------------------------------------------------- 1 | # The following role patch is for kubesphere tenant sidecar. 2 | - op: add 3 | path: /rules/- 4 | value: 5 | apiGroups: 6 | - "*" 7 | resources: 8 | - "*" 9 | verbs: 10 | - get 11 | - list 12 | - watch 13 | -------------------------------------------------------------------------------- /config/rbac/receiver_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit emailreceivers. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: receiver-editor-role 6 | rules: 7 | - apiGroups: 8 | - notification.kubesphere.io 9 | resources: 10 | - receivers 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - notification.kubesphere.io 21 | resources: 22 | - receivers/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/receiver_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view emailreceivers. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: receiver-viewer-role 6 | rules: 7 | - apiGroups: 8 | - notification.kubesphere.io 9 | resources: 10 | - receivers 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - notification.kubesphere.io 17 | resources: 18 | - receivers/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: controller-role 6 | rules: 7 | - apiGroups: 8 | - apps 9 | resources: 10 | - deployments 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - configmaps 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - namespaces 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - apiGroups: 36 | - "" 37 | resources: 38 | - secrets 39 | verbs: 40 | - get 41 | - list 42 | - watch 43 | - apiGroups: 44 | - "" 45 | resources: 46 | - services 47 | verbs: 48 | - create 49 | - delete 50 | - get 51 | - list 52 | - patch 53 | - update 54 | - watch 55 | - apiGroups: 56 | - notification.kubesphere.io 57 | resources: 58 | - configs 59 | - notificationmanagers 60 | - receivers 61 | - routers 62 | - silences 63 | verbs: 64 | - create 65 | - delete 66 | - get 67 | - list 68 | - patch 69 | - update 70 | - watch 71 | - apiGroups: 72 | - notification.kubesphere.io 73 | resources: 74 | - notificationmanagers/finalizers 75 | verbs: 76 | - update 77 | - apiGroups: 78 | - notification.kubesphere.io 79 | resources: 80 | - notificationmanagers/status 81 | verbs: 82 | - get 83 | - patch 84 | - update 85 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: controller-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: controller-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: sa 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: sa 5 | namespace: system 6 | -------------------------------------------------------------------------------- /config/samples/default_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: notification.kubesphere.io/v2beta1 2 | kind: Config 3 | metadata: 4 | name: default-config 5 | labels: 6 | app: notification-manager 7 | type: default 8 | spec: 9 | dingtalk: 10 | conversation: 11 | appkey: 12 | key: appkey 13 | name: defalut-config-secret 14 | appsecret: 15 | key: appsecret 16 | name: defalut-config-secret 17 | email: 18 | authPassword: 19 | key: password 20 | name: default-email-secret 21 | authUsername: sender1 22 | from: sender1@xyz.com 23 | requireTLS: true 24 | smartHost: 25 | host: imap.xyz.com 26 | port: 25 27 | slack: 28 | slackTokenSecret: 29 | key: token 30 | name: defalut-config-secret 31 | wechat: 32 | wechatApiUrl: https://qyapi.weixin.qq.com/cgi-bin/ 33 | wechatApiCorpId: wechat-api-corp-id 34 | wechatApiAgentId: wechat-api-agent-id 35 | wechatApiSecret: 36 | key: wechat 37 | name: defalut-config-secret 38 | -------------------------------------------------------------------------------- /config/samples/global_receiver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: notification.kubesphere.io/v2beta1 2 | kind: Receiver 3 | metadata: 4 | name: global-receiver 5 | labels: 6 | app: notification-manager 7 | type: global 8 | spec: 9 | dingtalk: 10 | conversation: 11 | chatids: 12 | - chat894f9f4d634eb283933af6c7102977b2 13 | chatbot: 14 | webhook: 15 | key: webhook 16 | name: global-receiver-secret 17 | keywords: 18 | - kubesphere 19 | secret: 20 | key: secret 21 | name: global-receiver-secret 22 | email: 23 | to: 24 | - receiver1@xyz.com 25 | - receiver2@xyz.com 26 | slack: 27 | channels: 28 | - global 29 | webhook: 30 | url: https://sample-webhook.svc:443/ 31 | httpConfig: 32 | basicAuth: 33 | username: user 34 | password: 35 | key: password 36 | name: global-receiver-secret 37 | tlsConfig: 38 | serverName: "*" 39 | rootCA: 40 | key: ca 41 | name: default-webhook-secret 42 | clientCertificate: 43 | cert: 44 | key: cert 45 | name: global-receiver-secret 46 | key: 47 | key: key 48 | name: global-receiver-secret 49 | insecureSkipVerify: false 50 | wechat: 51 | toUser: 52 | - user1 53 | - user2 54 | toParty: 55 | - party1 56 | - party2 57 | toTag: 58 | - tag1 59 | - tag2 -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - default_config.yaml 3 | - global_receiver.yaml 4 | - template.yaml 5 | - notification_manager.yaml 6 | 7 | namespace: kubesphere-monitoring-system 8 | 9 | commonLabels: 10 | app: notification-manager 11 | -------------------------------------------------------------------------------- /config/samples/notification_manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: notification.kubesphere.io/v2beta2 2 | kind: NotificationManager 3 | metadata: 4 | name: notification-manager 5 | spec: 6 | replicas: 1 7 | resources: 8 | limits: 9 | cpu: 500m 10 | memory: 1Gi 11 | requests: 12 | cpu: 100m 13 | memory: 20Mi 14 | image: kubesphere/notification-manager:latest 15 | imagePullPolicy: Always 16 | serviceAccountName: notification-manager-sa 17 | portName: webhook 18 | defaultConfigSelector: 19 | matchLabels: 20 | type: default 21 | receivers: 22 | tenantKey: user 23 | globalReceiverSelector: 24 | matchLabels: 25 | type: global 26 | tenantReceiverSelector: 27 | matchLabels: 28 | type: tenant 29 | options: 30 | email: 31 | notificationTimeout: 5 32 | slack: 33 | notificationTimeout: 5 34 | wechat: 35 | notificationTimeout: 5 36 | webhook: 37 | notificationTimeout: 5 38 | dingtalk: 39 | notificationTimeout: 5 40 | groupLabels: 41 | - alertname 42 | - namespace 43 | template: 44 | text: 45 | name: notification-manager-template 46 | namespace: kubesphere-monitoring-system 47 | -------------------------------------------------------------------------------- /config/samples/sms.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: notification.kubesphere.io/v2beta2 2 | kind: Receiver 3 | metadata: 4 | labels: 5 | app: notification-manager 6 | type: global 7 | name: global-sms-receiver 8 | spec: 9 | sms: 10 | enabled: true 11 | phoneNumbers: ["13612344321"] 12 | --- 13 | apiVersion: notification.kubesphere.io/v2beta2 14 | kind: Config 15 | metadata: 16 | labels: 17 | app: notification-manager 18 | type: default 19 | name: default-sms-config 20 | spec: 21 | sms: 22 | defaultProvider: huawei 23 | providers: 24 | huawei: 25 | url: https://rtcsms.cn-north-1.myhuaweicloud.com:10743/sms/batchSendSms/v1 26 | signature: xxx 27 | templateId: xxx 28 | templateParas: xxx 29 | sender: kubesphere 30 | appSecret: 31 | valueFrom: 32 | secretKeyRef: 33 | namespace: "default" 34 | key: huawei.appSecret 35 | name: default-sms-secret 36 | appKey: 37 | valueFrom: 38 | secretKeyRef: 39 | namespace: "default" 40 | key: huawei.appKey 41 | name: default-sms-secret 42 | aliyun: 43 | signName: xxxx 44 | templateCode: xxx 45 | accessKeyId: 46 | valueFrom: 47 | secretKeyRef: 48 | namespace: "default" 49 | key: aliyun.accessKeyId 50 | name: default-sms-secret 51 | accessKeySecret: 52 | valueFrom: 53 | secretKeyRef: 54 | namespace: "default" 55 | key: aliyun.accessKeySecret 56 | name: default-sms-secret 57 | tencent: 58 | templateID: xxx 59 | smsSdkAppid: xxx 60 | sign: xxxx 61 | secretId: 62 | valueFrom: 63 | secretKeyRef: 64 | namespace: "default" 65 | key: tencent.secretId 66 | name: default-sms-secret 67 | secretKey: 68 | valueFrom: 69 | secretKeyRef: 70 | namespace: "default" 71 | key: tencent.secretKey 72 | name: default-sms-secret 73 | 74 | --- 75 | apiVersion: v1 76 | data: 77 | aliyun.accessKeyId: eHh4eA== 78 | aliyun.accessKeySecret: eHh4eA== 79 | tencent.secretId: eHh4eA== 80 | tencent.secretKey: eHh4eA== 81 | huawei.appKey: eHh4eA== 82 | huawei.appSecret: eHh4eA== 83 | kind: Secret 84 | metadata: 85 | labels: 86 | app: notification-manager 87 | name: default-sms-secret 88 | type: Opaque 89 | -------------------------------------------------------------------------------- /config/samples/tenant-sidecar-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: notification-manager-tenant-sidecar-role 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - namespaces 10 | verbs: 11 | - get 12 | - list 13 | - watch 14 | - apiGroups: 15 | - rbac.authorization.k8s.io 16 | resources: 17 | - roles 18 | - rolebindings 19 | - clusterroles 20 | - clusterrolebindings 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | - apiGroups: 26 | - iam.kubesphere.io 27 | resources: 28 | - users 29 | - globalroles 30 | - globalrolebindings 31 | - groups 32 | - groupbindings 33 | - workspaceroles 34 | - workspacerolebindings 35 | verbs: 36 | - get 37 | - list 38 | - watch 39 | - apiGroups: 40 | - tenant.kubesphere.io 41 | resources: 42 | - workspaces 43 | - workspacetemplates 44 | verbs: 45 | - get 46 | - list 47 | - watch 48 | --- 49 | apiVersion: rbac.authorization.k8s.io/v1 50 | kind: ClusterRoleBinding 51 | metadata: 52 | name: notification-manager-tenant-sidecar-rolebinding 53 | roleRef: 54 | apiGroup: rbac.authorization.k8s.io 55 | kind: ClusterRole 56 | name: notification-manager-tenant-sidecar-role 57 | subjects: 58 | - kind: ServiceAccount 59 | name: notification-manager-sa 60 | namespace: kubesphere-monitoring-system 61 | -------------------------------------------------------------------------------- /config/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - manifests.yaml 6 | - service.yaml 7 | 8 | configurations: 9 | - kustomizeconfig.yaml 10 | 11 | -------------------------------------------------------------------------------- /config/webhook/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # the following config is for teaching kustomize where to look at when substituting vars. 2 | # It requires kustomize v2.1.0 or newer to work properly. 3 | nameReference: 4 | - kind: Service 5 | version: v1 6 | fieldSpecs: 7 | - kind: MutatingWebhookConfiguration 8 | group: admissionregistration.k8s.io 9 | path: webhooks/clientConfig/service/name 10 | - kind: ValidatingWebhookConfiguration 11 | group: admissionregistration.k8s.io 12 | path: webhooks/clientConfig/service/name 13 | 14 | namespace: 15 | - kind: MutatingWebhookConfiguration 16 | group: admissionregistration.k8s.io 17 | path: webhooks/clientConfig/service/namespace 18 | create: true 19 | - kind: ValidatingWebhookConfiguration 20 | group: admissionregistration.k8s.io 21 | path: webhooks/clientConfig/service/namespace 22 | create: true 23 | 24 | varReference: 25 | - path: metadata/annotations 26 | -------------------------------------------------------------------------------- /config/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: webhook 6 | namespace: system 7 | spec: 8 | ports: 9 | - port: 443 10 | targetPort: 9443 11 | selector: 12 | control-plane: controller-manager 13 | -------------------------------------------------------------------------------- /controllers/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "path/filepath" 21 | "testing" 22 | 23 | "github.com/kubesphere/notification-manager/apis/v2beta2" 24 | . "github.com/onsi/ginkgo" 25 | "github.com/onsi/ginkgo/reporters" 26 | . "github.com/onsi/gomega" 27 | "k8s.io/client-go/kubernetes/scheme" 28 | "k8s.io/client-go/rest" 29 | "sigs.k8s.io/controller-runtime/pkg/client" 30 | "sigs.k8s.io/controller-runtime/pkg/envtest" 31 | logf "sigs.k8s.io/controller-runtime/pkg/log" 32 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 33 | // +kubebuilder:scaffold:imports 34 | ) 35 | 36 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 37 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 38 | 39 | var cfg *rest.Config 40 | var k8sClient client.Client 41 | var testEnv *envtest.Environment 42 | 43 | func TestAPIs(t *testing.T) { 44 | RegisterFailHandler(Fail) 45 | 46 | RunSpecsWithDefaultAndCustomReporters(t, 47 | "Controller Suite", 48 | []Reporter{reporters.NewJUnitReporter("results.xml")}) 49 | } 50 | 51 | var _ = BeforeSuite(func(done Done) { 52 | logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) 53 | 54 | By("bootstrapping test environment") 55 | testEnv = &envtest.Environment{ 56 | CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, 57 | } 58 | 59 | var err error 60 | cfg, err = testEnv.Start() 61 | Expect(err).ToNot(HaveOccurred()) 62 | Expect(cfg).ToNot(BeNil()) 63 | 64 | err = v2beta2.AddToScheme(scheme.Scheme) 65 | Expect(err).NotTo(HaveOccurred()) 66 | 67 | // +kubebuilder:scaffold:scheme 68 | 69 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 70 | Expect(err).ToNot(HaveOccurred()) 71 | Expect(k8sClient).ToNot(BeNil()) 72 | 73 | close(done) 74 | }, 60) 75 | 76 | var _ = AfterSuite(func() { 77 | By("tearing down the test environment") 78 | err := testEnv.Stop() 79 | Expect(err).ToNot(HaveOccurred()) 80 | }) 81 | -------------------------------------------------------------------------------- /docs/crds/credential.md: -------------------------------------------------------------------------------- 1 | # Credential 2 | 3 | ## Overview 4 | 5 | The `credential` is used to store user credentials, like password, token, app secret, etc. 6 | 7 | ```yaml 8 | password: 9 | valueFrom: 10 | secretKeyRef: 11 | key: secret 12 | name: global-receiver-secret 13 | namespace: kubesphere-monitoring-system 14 | ``` 15 | 16 | ```yaml 17 | password: 18 | value: 123456 19 | ``` 20 | 21 | A `credential` allows the user to define: 22 | 23 | - `value` - The value saved in plaintext, not recommended for storing confidential information. 24 | - `valueFrom` - The object used to store user credentials, now only support `secret`. 25 | - `valueFrom.secretKeyRef` - The secret used to store user credentials. 26 | - `valueFrom.secretKeyRef.name` - The name of secret used to store user credentials. 27 | - `valueFrom.secretKeyRef.namespace` - The namespace of secret used to store user credentials. 28 | - `valueFrom.secretKeyRef.key` - The key of secret used to store user credentials. 29 | 30 | > If the `valueFrom.secretKeyRef.namespace` is not specified, Notification Manager will get the secret in the [`defaultSecretNamespace`](./notification-manager.md#DefaultSecretNamespace). 31 | > If the `defaultSecretNamespace` is not set, Notification Manager will get the secret in the namespace where the notification manager webhook is located. 32 | -------------------------------------------------------------------------------- /docs/crds/router.md: -------------------------------------------------------------------------------- 1 | # Router 2 | 3 | ## Overview 4 | 5 | `Router` CRD is used to send the specified notifications to the specified receivers. 6 | 7 | ```yaml 8 | apiVersion: notification.kubesphere.io/v2beta2 9 | kind: Router 10 | metadata: 11 | name: router1 12 | spec: 13 | alertSelector: 14 | matchExpressions: 15 | - key: alertname 16 | operator: In 17 | values: 18 | - CPUThrottlingHigh 19 | receivers: 20 | name: 21 | - user1 22 | regexName: "user1.*?" 23 | selector: [] 24 | type: email 25 | ``` 26 | 27 | A router resource allows user to define: 28 | 29 | - `alertSelector` - A label selector used to match alert. The matched alert will send to the `receivers`. 30 | - `receivers.name` - The name of receivers which notifications will send to. 31 | - `receivers.regexName` - A regular expression to match the receiver name. 32 | - `receivers.selector` - A label selector used to select receivers. 33 | - `type` - The type of receiver, known values are dingtalk, email, feishu, pushover, sms, slack, webhook, WeChat. 34 | 35 | ## Examples 36 | 37 | A router that routes all notifications to the all receivers of tenant `user1`. 38 | 39 | ```yaml 40 | apiVersion: notification.kubesphere.io/v2beta2 41 | kind: Router 42 | metadata: 43 | name: router1 44 | spec: 45 | receivers: 46 | selector: 47 | matchLabels: 48 | tenant: user1 49 | ``` 50 | 51 | A router that routes cluster-level notifications to the email receivers `user1`. 52 | 53 | ```yaml 54 | apiVersion: notification.kubesphere.io/v2beta2 55 | kind: Router 56 | metadata: 57 | name: router1 58 | spec: 59 | alertSelector: 60 | matchExpressions: 61 | - key: namespace 62 | operator: DoesNotExist 63 | receivers: 64 | name: 65 | - user1 66 | type: email 67 | ``` 68 | -------------------------------------------------------------------------------- /docs/crds/silence.md: -------------------------------------------------------------------------------- 1 | # Silence 2 | 3 | ## Overview 4 | 5 | `Silence` CRD is used to define policies to mute notifications for a given time. A silence is configured based on a label selector. 6 | If the incoming alert matches the label selector of an active silence, no notifications will be sent out for that alert. 7 | 8 | `Silence` can be categorized into 2 types `global` and `tenant` by label like `type = global`, `type = tenant` : 9 | - A global silence will mute all notifications that match the label selector. The global silence will take effect in the [silence](../../README.md#silence) step. 10 | - A tenant silence only mutes the notifications that will send to receivers of this tenant. The tenant silence will take effect in the [filter](../../README.md#filter) step. 11 | 12 | A silence resource allows the user to define: 13 | 14 | - `enabled` - whether the silence enabled. 15 | - `matcher` - The label selector used to match alert. 16 | - `startsAt` - The start time during which the silence is active. 17 | - `schedule` - The schedule in Cron format. If set, the silence will be active periodicity, and the startsAt will be invalid. 18 | - `duration` - The time range during which the silence is active. If not set, the silence will be active ever. 19 | 20 | > If the `startsAt` and `schedule` are not set, the silence will be active for ever. 21 | 22 | ### Examples 23 | 24 | A silence that mutes all notifications in namespace `test` and is active for ever. 25 | 26 | ```yaml 27 | apiVersion: notification.kubesphere.io/v2beta2 28 | kind: Silence 29 | metadata: 30 | name: silence1 31 | labels: 32 | type: global 33 | spec: 34 | matcher: 35 | matchExpressions: 36 | - key: namespace 37 | operator: In 38 | values: 39 | - test 40 | ``` 41 | 42 | A silence that mutes all notifications in namespace `test` and is activated at `2022-02-29T00:00:00Z` for 24 hours. 43 | 44 | ```yaml 45 | apiVersion: notification.kubesphere.io/v2beta2 46 | kind: Silence 47 | metadata: 48 | name: silence1 49 | labels: 50 | type: global 51 | spec: 52 | matcher: 53 | matchExpressions: 54 | - key: namespace 55 | operator: In 56 | values: 57 | - test 58 | startsAt: "2022-02-29T00:00:00Z" 59 | duration: 24h 60 | ``` 61 | 62 | A silence that mutes all tenant notifications and is activated at 10 PM for 8 hours every day. 63 | 64 | ```yaml 65 | apiVersion: notification.kubesphere.io/v2beta2 66 | kind: Silence 67 | metadata: 68 | name: silence1 69 | labels: 70 | type: global 71 | spec: 72 | matcher: 73 | matchExpressions: 74 | - key: namespace 75 | operator: In 76 | values: 77 | - test 78 | schedule: "0 22 * * *" 79 | duration: 8h 80 | ``` 81 | -------------------------------------------------------------------------------- /docs/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/notification-manager/27d61a692d28aa84719fc734b668f2570fc7a38a/docs/images/logo.png -------------------------------------------------------------------------------- /docs/images/notification-manager.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/notification-manager/27d61a692d28aa84719fc734b668f2570fc7a38a/docs/images/notification-manager.png -------------------------------------------------------------------------------- /docs/images/receivers_configs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/notification-manager/27d61a692d28aa84719fc734b668f2570fc7a38a/docs/images/receivers_configs.png -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /hack/generate-cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script create a new cert for conversion webhook. 4 | 5 | cd hack/ 6 | 7 | openssl genrsa -out ca.key 2048 8 | openssl req -x509 -new -nodes -key ca.key -subj "/C=CN/ST=HB/O=QC/CN=webhook-ca" -sha256 -days 36500 -out ca.crt 9 | openssl genrsa -out server.key 2048 10 | openssl req -new -nodes -keyout server.key -out server.csr -subj "/C=CN/ST=HB/O=QC/CN=notification-manager-webhook" -config openssl.cnf 11 | openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -extfile openssl.cnf -out server.crt -days 36500 -sha256 -extensions v3_req 12 | 13 | key=$(cat server.key | base64 -w 0) 14 | crt=$(cat server.crt | base64 -w 0) 15 | ca=$(cat ca.crt | base64 -w 0) 16 | 17 | sed -ri "s/(tls.crt: )[^\n]*/\1${crt}/" ../config/cert/webhook-server-cert.yaml 18 | sed -ri "s/(tls.key: )[^\n]*/\1${key}/" ../config/cert/webhook-server-cert.yaml 19 | sed -ri "s/(caBundle: )[^\n]*/\1${ca}/" ../config/crd/patches/webhook_in_configs.yaml 20 | sed -ri "s/(caBundle: )[^\n]*/\1${ca}/" ../config/crd/patches/webhook_in_receivers.yaml 21 | sed -ri "s/(caBundle: )[^\n]*/\1${ca}/" ../config/webhook/manifests.yaml 22 | 23 | rm -rf ca.* ca.srt server.* 24 | -------------------------------------------------------------------------------- /helm/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /helm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: 2.5.0 3 | description: Notification Manager manages notifications in multi-tenant K8s environment. It receives alerts or notifications from different senders and then send notifications to various tenant receivers based on alerts/notifications' tenant label like "namespace". 4 | name: notification-manager 5 | version: 2.5.0 6 | -------------------------------------------------------------------------------- /helm/README.md: -------------------------------------------------------------------------------- 1 | # notification-manager 2 | 3 | ```console 4 | helm install notification-manager 5 | ``` 6 | 7 | ## Requirements 8 | 9 | - Kubernetes v1.13+. 10 | - helm v3. 11 | 12 | ## Installing 13 | 14 | To install the chart with the release name `my-release`: 15 | 16 | ```console 17 | helm install my-release ./helm -n ${namespace} 18 | ``` 19 | 20 | The command deploys the notification-manager chart to the namespace ${namespace} of the Kubernetes cluster with the default configuration. The configuration section lists the parameters that can be configured during installation. 21 | 22 | ## Uninstalling 23 | 24 | To uninstall/delete the `my-release` deployment: 25 | 26 | ```console 27 | helm delete my-release 28 | ``` 29 | 30 | The command removes all the Kubernetes components associated with the chart and deletes the release. 31 | 32 | ## Configuration 33 | 34 | The following table lists the configurable parameters of the notification-manager chart and their default values. 35 | 36 | Parameter | Description | Default 37 | --- | --- | --- 38 | `operator.containers.proxy.image.tag` | The image tag of container kube-rbac-proxy | `v0.4.1` 39 | `operator.containers.proxy.image.pullPolicy` | The image pull policy of container kube-rbac-proxy | `IfNotPresent` 40 | `operator.containers.proxy.resources` | The resource quota of container kube-rbac-proxy | {} 41 | `operator.containers.operator.image.tag` | The image tag of container notification-manager-operator | `latest` 42 | `operator.containers.operator.image.pullPolicy` | The image pull policy of the container notification-manager-operator | `IfNotPresent` 43 | `operator.containers.operator.resources` | The resource quota of container notification-manager-operator | {} 44 | `operator.nodeSelector` | The nodeSelector of notification-manager-operator | {} 45 | `operator.tolerations` | The tolerations of notification-manager-operator | [] 46 | `operator.affinity` | The affinity of notification-manager-operator | {} 47 | `notificationmanager.replicas` | The replicas of notification-manager | 1 48 | `notificationmanager.image.tag` | The image tag of notification-manager | `latest` 49 | `notificationmanager.image.pullPolicy` | The image pull policy of notification-manager | `IfNotPresent` 50 | `notificationmanager.resources` | The resource quota of notification-manager | {} 51 | `notificationmanager.nodeSelector` | The nodeSelector of notification-manager | {} 52 | `notificationmanager.tolerations` | The tolerations of notification-manager | [] 53 | `notificationmanager.affinity` | The affinity of notification-manager | {} 54 | `notificationmanager.receivers` | The receivers configure of notification-manager | {} 55 | `notificationmanager.defaultConfigSelector` | The default config selector of notification-manager | {} 56 | `notificationmanager.notificationManagerNamespaces` | The namespace which will be watched by notification-manager | [] 57 | `notificationmanager.volumes` | List of volumes that can be mounted by containers belonging to the pod | [] 58 | `notificationmanager.volumeMounts` | Pod volumes to mount into the container's filesystem | [] 59 | 60 | -------------------------------------------------------------------------------- /helm/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/*namespace*/}} 2 | {{- define "nm.namespaceOverride" -}} 3 | {{- if .Values.namespaceOverride -}} 4 | {{- .Values.namespaceOverride -}} 5 | {{- else -}} 6 | {{- .Release.Namespace -}} 7 | {{- end -}} 8 | {{- end -}} 9 | 10 | 11 | {{- define "global.imageRegistry" -}} 12 | {{- $registry := default .Values.global.imageRegistry .Values.imageRegistryOverride }} 13 | {{- if $registry -}} 14 | {{- printf "%s/" $registry -}} 15 | {{- end -}} 16 | {{- end -}} 17 | 18 | 19 | {{- define "common.notificationmanager.nodeSelectors" -}} 20 | {{- $selector := default .Values.global.nodeSelector .Values.notificationmanager.nodeSelector }} 21 | {{- toYaml $selector | nindent 4 }} 22 | {{- end -}} 23 | 24 | {{- define "common.operator.nodeSelectors" -}} 25 | {{- $selector := default .Values.global.nodeSelector .Values.operator.nodeSelector }} 26 | {{- toYaml $selector | nindent 8 }} 27 | {{- end -}} -------------------------------------------------------------------------------- /helm/templates/clusterrolebindings.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: notification-manager-controller-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: notification-manager-controller-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: notification-manager-sa 12 | namespace: {{ include "nm.namespaceOverride" . }} 13 | 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: notification-manager-proxy-rolebinding 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: ClusterRole 22 | name: notification-manager-proxy-role 23 | subjects: 24 | - kind: ServiceAccount 25 | name: notification-manager-sa 26 | namespace: {{ include "nm.namespaceOverride" . }} 27 | 28 | {{- if .Values.kubesphere.enabled }} 29 | {{- if or (eq .Values.kubesphere.version "v3.1.0") (eq .Values.kubesphere.version "v3.2.0") }} 30 | --- 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: ClusterRoleBinding 33 | metadata: 34 | name: notification-manager-tenant-sidecar-rolebinding 35 | roleRef: 36 | apiGroup: rbac.authorization.k8s.io 37 | kind: ClusterRole 38 | name: notification-manager-tenant-sidecar-role 39 | subjects: 40 | - kind: ServiceAccount 41 | name: notification-manager-sa 42 | namespace: {{ include "nm.namespaceOverride" . }} 43 | {{- end}} 44 | {{- end}} 45 | -------------------------------------------------------------------------------- /helm/templates/clusterroles.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: notification-manager-controller-role 5 | rules: 6 | - apiGroups: 7 | - apps 8 | resources: 9 | - deployments 10 | verbs: 11 | - create 12 | - delete 13 | - get 14 | - list 15 | - patch 16 | - update 17 | - watch 18 | - apiGroups: 19 | - "" 20 | resources: 21 | - secrets 22 | - configmaps 23 | - namespaces 24 | verbs: 25 | - get 26 | - list 27 | - watch 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - services 32 | verbs: 33 | - create 34 | - delete 35 | - get 36 | - list 37 | - patch 38 | - update 39 | - watch 40 | - apiGroups: 41 | - notification.kubesphere.io 42 | resources: 43 | - configs 44 | - notificationmanagers 45 | - receivers 46 | - routers 47 | - silences 48 | verbs: 49 | - create 50 | - delete 51 | - get 52 | - list 53 | - patch 54 | - update 55 | - watch 56 | - apiGroups: 57 | - notification.kubesphere.io 58 | resources: 59 | - notificationmanagers/finalizers 60 | verbs: 61 | - update 62 | - apiGroups: 63 | - notification.kubesphere.io 64 | resources: 65 | - notificationmanagers/status 66 | verbs: 67 | - get 68 | - patch 69 | - update 70 | 71 | --- 72 | apiVersion: rbac.authorization.k8s.io/v1 73 | kind: ClusterRole 74 | metadata: 75 | name: notification-manager-metrics-reader 76 | rules: 77 | - nonResourceURLs: 78 | - /metrics 79 | verbs: 80 | - get 81 | 82 | --- 83 | apiVersion: rbac.authorization.k8s.io/v1 84 | kind: ClusterRole 85 | metadata: 86 | name: notification-manager-proxy-role 87 | rules: 88 | - apiGroups: 89 | - authentication.k8s.io 90 | resources: 91 | - tokenreviews 92 | verbs: 93 | - create 94 | - apiGroups: 95 | - authorization.k8s.io 96 | resources: 97 | - subjectaccessreviews 98 | verbs: 99 | - create 100 | 101 | {{- if .Values.kubesphere.enabled }} 102 | {{- if or (eq .Values.kubesphere.version "v3.1.0") (eq .Values.kubesphere.version "v3.2.0") }} 103 | --- 104 | apiVersion: rbac.authorization.k8s.io/v1 105 | kind: ClusterRole 106 | metadata: 107 | name: notification-manager-tenant-sidecar-role 108 | rules: 109 | - apiGroups: 110 | - "" 111 | resources: 112 | - namespaces 113 | verbs: 114 | - get 115 | - list 116 | - watch 117 | - apiGroups: 118 | - rbac.authorization.k8s.io 119 | resources: 120 | - roles 121 | - rolebindings 122 | - clusterroles 123 | - clusterrolebindings 124 | verbs: 125 | - get 126 | - list 127 | - watch 128 | - apiGroups: 129 | - iam.kubesphere.io 130 | resources: 131 | - users 132 | - globalroles 133 | - globalrolebindings 134 | - groups 135 | - groupbindings 136 | - workspaceroles 137 | - workspacerolebindings 138 | verbs: 139 | - get 140 | - list 141 | - watch 142 | - apiGroups: 143 | - tenant.kubesphere.io 144 | resources: 145 | - workspaces 146 | - workspacetemplates 147 | verbs: 148 | - get 149 | - list 150 | - watch 151 | {{- end}} 152 | {{- end}} 153 | 154 | -------------------------------------------------------------------------------- /helm/templates/notificationmanagers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: notification.kubesphere.io/v2beta2 2 | kind: NotificationManager 3 | metadata: 4 | labels: 5 | app: notification-manager 6 | name: notification-manager 7 | spec: 8 | {{- if .Values.notificationmanager.replicas }} 9 | replicas: {{ .Values.notificationmanager.replicas }} 10 | {{- end}} 11 | image: {{ include "global.imageRegistry" .}}{{ .Values.notificationmanager.image.repo }}:{{ .Values.notificationmanager.image.tag }} 12 | imagePullPolicy: {{ .Values.notificationmanager.image.pullPolicy }} 13 | serviceAccountName: notification-manager-sa 14 | portName: webhook 15 | nodeSelector: 16 | {{- include "common.notificationmanager.nodeSelectors" . }} 17 | affinity: 18 | {{- toYaml .Values.notificationmanager.affinity | nindent 4 }} 19 | tolerations: 20 | {{- toYaml .Values.notificationmanager.tolerations | nindent 4 }} 21 | resources: 22 | {{- toYaml .Values.notificationmanager.resources | nindent 4 }} 23 | receivers: 24 | {{- toYaml .Values.notificationmanager.receivers | nindent 4 }} 25 | defaultConfigSelector: 26 | {{- toYaml .Values.notificationmanager.defaultConfigSelector | nindent 4 }} 27 | volumeMounts: 28 | {{- toYaml .Values.notificationmanager.volumeMounts | nindent 4 }} 29 | volumes: 30 | {{- toYaml .Values.notificationmanager.volumes | nindent 4 }} 31 | defaultSecretNamespace: 32 | {{- toYaml .Values.notificationmanager.defaultSecretNamespace | nindent 4 }} 33 | {{- if .Values.kubesphere.enabled }} 34 | sidecars: 35 | tenant: 36 | {{- if .Values.kubesphere.image }} 37 | image: {{ .Values.kubesphere.image }} 38 | {{- else }} 39 | image: {{ include "global.imageRegistry" . }}{{ .Values.notificationmanager.sidecar.image.repo }}:{{ .Values.kubesphere.version }} 40 | {{- end }} 41 | name: tenant 42 | type: kubesphere 43 | {{- end }} 44 | template: 45 | {{- toYaml .Values.notificationmanager.template | nindent 4 }} 46 | groupLabels: 47 | {{- toYaml .Values.notificationmanager.groupLabels | nindent 4 }} 48 | annotations: 49 | {{- toYaml .Values.notificationmanager.annotations | nindent 4 }} 50 | labels: 51 | {{- toYaml .Values.notificationmanager.labels | nindent 4 }} 52 | env: 53 | {{- toYaml .Values.notificationmanager.env | nindent 4 }} -------------------------------------------------------------------------------- /helm/templates/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: notification-manager-operator 7 | namespace: {{ include "nm.namespaceOverride" . }} 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | control-plane: controller-manager 13 | template: 14 | metadata: 15 | labels: 16 | control-plane: controller-manager 17 | spec: 18 | containers: 19 | - args: 20 | - --secure-listen-address=0.0.0.0:8443 21 | - --upstream=http://127.0.0.1:8080/ 22 | - --logtostderr=true 23 | - --v=10 24 | image: {{ include "global.imageRegistry" . }}{{ .Values.operator.containers.proxy.image.repo }}:{{ .Values.operator.containers.proxy.image.tag }} 25 | imagePullPolicy: {{ .Values.operator.containers.proxy.image.pullPolicy }} 26 | name: kube-rbac-proxy 27 | ports: 28 | - containerPort: 8443 29 | name: https 30 | protocol: TCP 31 | resources: 32 | {{- toYaml .Values.operator.containers.proxy.resources | nindent 10 }} 33 | terminationMessagePath: /dev/termination-log 34 | terminationMessagePolicy: File 35 | - args: 36 | - --metrics-addr=127.0.0.1:8080 37 | - --enable-leader-election 38 | command: 39 | - /notification-manager-operator 40 | env: 41 | - name: NAMESPACE 42 | valueFrom: 43 | fieldRef: 44 | apiVersion: v1 45 | fieldPath: metadata.namespace 46 | {{- if .Values.timezone }} 47 | - name: TZ 48 | value: {{ .Values.timezone }} 49 | {{- end }} 50 | image: {{ include "global.imageRegistry" . }}{{ .Values.operator.containers.operator.image.repo }}:{{ .Values.operator.containers.operator.image.tag }} 51 | imagePullPolicy: {{ .Values.operator.containers.operator.image.pullPolicy }} 52 | name: notification-manager-operator 53 | ports: 54 | - containerPort: 9443 55 | name: webhook-server 56 | protocol: TCP 57 | resources: 58 | {{- toYaml .Values.operator.containers.operator.resources | nindent 10 }} 59 | volumeMounts: 60 | - mountPath: /tmp/k8s-webhook-server/serving-certs 61 | name: cert 62 | readOnly: true 63 | volumes: 64 | - name: cert 65 | secret: 66 | defaultMode: 420 67 | secretName: notification-manager-webhook-server-cert 68 | serviceAccount: notification-manager-sa 69 | serviceAccountName: notification-manager-sa 70 | nodeSelector: 71 | {{- include "common.operator.nodeSelectors" . }} 72 | affinity: 73 | {{- toYaml .Values.operator.affinity | nindent 8 }} 74 | tolerations: 75 | {{- toYaml .Values.operator.tolerations | nindent 8 }} 76 | -------------------------------------------------------------------------------- /helm/templates/rolebindings.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: notification-manager-leader-election-rolebinding 5 | namespace: {{ include "nm.namespaceOverride" . }} 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: notification-manager-leader-election-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: notification-manager-sa 13 | namespace: {{ include "nm.namespaceOverride" . }} 14 | 15 | -------------------------------------------------------------------------------- /helm/templates/roles.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: notification-manager-leader-election-role 5 | namespace: {{ include "nm.namespaceOverride" . }} 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - configmaps/status 23 | verbs: 24 | - get 25 | - update 26 | - patch 27 | - apiGroups: 28 | - coordination.k8s.io 29 | resources: 30 | - leases 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - create 36 | - update 37 | - patch 38 | - delete 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - events 43 | verbs: 44 | - create 45 | 46 | -------------------------------------------------------------------------------- /helm/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: notification-manager-sa 5 | namespace: {{ include "nm.namespaceOverride" . }} 6 | 7 | -------------------------------------------------------------------------------- /helm/templates/services.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: notification-manager-controller-metrics 7 | namespace: {{ include "nm.namespaceOverride" . }} 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | protocol: TCP 13 | targetPort: https 14 | selector: 15 | control-plane: controller-manager 16 | sessionAffinity: None 17 | type: ClusterIP 18 | --- 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: notification-manager-webhook 23 | namespace: {{ include "nm.namespaceOverride" . }} 24 | spec: 25 | ports: 26 | - port: 443 27 | targetPort: 9443 28 | selector: 29 | control-plane: controller-manager 30 | 31 | 32 | -------------------------------------------------------------------------------- /helm/templates/validating.yaml: -------------------------------------------------------------------------------- 1 | {{- $name := include "nm.namespaceOverride" . }} 2 | {{- $ca := genCA "webhook-ca" 36500 -}} 3 | {{- $dns := cat "notification-manager-webhook." $name ".svc" | nospace -}} 4 | {{- $cert := genSignedCert "notification-manager-webhook" (list) (list $dns) 36500 $ca -}} 5 | apiVersion: v1 6 | data: 7 | tls.crt: {{ $cert.Cert | b64enc }} 8 | tls.key: {{ $cert.Key | b64enc }} 9 | kind: Secret 10 | metadata: 11 | name: notification-manager-webhook-server-cert 12 | namespace: {{ include "nm.namespaceOverride" . }} 13 | type: kubernetes.io/tls 14 | 15 | --- 16 | apiVersion: admissionregistration.k8s.io/v1 17 | kind: ValidatingWebhookConfiguration 18 | metadata: 19 | name: notification-manager-validating-webhook 20 | webhooks: 21 | - admissionReviewVersions: 22 | - v1beta1 23 | clientConfig: 24 | caBundle: {{ $ca.Cert | b64enc }} 25 | service: 26 | name: notification-manager-webhook 27 | namespace: {{ include "nm.namespaceOverride" . }} 28 | path: /validate-notification-kubesphere-io-v2beta2-config 29 | failurePolicy: Fail 30 | name: vconfig.notification.kubesphere.io 31 | rules: 32 | - apiGroups: 33 | - notification.kubesphere.io 34 | apiVersions: 35 | - v2beta2 36 | operations: 37 | - CREATE 38 | - UPDATE 39 | resources: 40 | - configs 41 | sideEffects: None 42 | - admissionReviewVersions: 43 | - v1beta1 44 | clientConfig: 45 | caBundle: {{ $ca.Cert | b64enc }} 46 | service: 47 | name: notification-manager-webhook 48 | namespace: {{ include "nm.namespaceOverride" . }} 49 | path: /validate-notification-kubesphere-io-v2beta2-receiver 50 | failurePolicy: Fail 51 | name: vreceiver.notification.kubesphere.io 52 | rules: 53 | - apiGroups: 54 | - notification.kubesphere.io 55 | apiVersions: 56 | - v2beta2 57 | operations: 58 | - CREATE 59 | - UPDATE 60 | resources: 61 | - receivers 62 | sideEffects: None 63 | - admissionReviewVersions: 64 | - v1beta1 65 | clientConfig: 66 | caBundle: {{ $ca.Cert | b64enc }} 67 | service: 68 | name: notification-manager-webhook 69 | namespace: {{ include "nm.namespaceOverride" . }} 70 | path: /validate-notification-kubesphere-io-v2beta2-router 71 | failurePolicy: Fail 72 | name: vrouter.notification.kubesphere.io 73 | rules: 74 | - apiGroups: 75 | - notification.kubesphere.io 76 | apiVersions: 77 | - v2beta2 78 | operations: 79 | - CREATE 80 | - UPDATE 81 | resources: 82 | - routers 83 | sideEffects: None 84 | - admissionReviewVersions: 85 | - v1beta1 86 | clientConfig: 87 | caBundle: {{ $ca.Cert | b64enc }} 88 | service: 89 | name: notification-manager-webhook 90 | namespace: {{ include "nm.namespaceOverride" . }} 91 | path: /validate-notification-kubesphere-io-v2beta2-silence 92 | failurePolicy: Fail 93 | name: vsilence.notification.kubesphere.io 94 | rules: 95 | - apiGroups: 96 | - notification.kubesphere.io 97 | apiVersions: 98 | - v2beta2 99 | operations: 100 | - CREATE 101 | - UPDATE 102 | resources: 103 | - silences 104 | sideEffects: None 105 | -------------------------------------------------------------------------------- /helm/templates/zh-cn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | zh-cn: | 4 | - name: zh-cn 5 | dictionary: 6 | alert: "告警" 7 | alerts: "告警" 8 | firing: "触发中" 9 | resolved: "已解决" 10 | alertname: "告警名称" 11 | alerttype: "告警类型" 12 | cluster: "集群" 13 | namespace: "项目" 14 | severity: "告警级别" 15 | container: "容器" 16 | pod: "容器组" 17 | service: "服务" 18 | deployment: "部署" 19 | job: "任务" 20 | daemonset: "守护进程集" 21 | statefulset: "有状态副本集" 22 | instance: "实例" 23 | resource: "资源" 24 | user: "用户" 25 | verb: "操作" 26 | group: "用户组" 27 | requestReceivedTimestamp: "请求接收时间" 28 | role: "角色" 29 | host_ip: "主机IP" 30 | node: "节点" 31 | rule_id: "告警规则" 32 | owner_kind: "目标类型" 33 | workload: "工作负载" 34 | rule_group: "规则组" 35 | rule_level: "规则级别" 36 | name: "名称" 37 | receiver: "接收者" 38 | receiver_type: "接收者类型" 39 | workload_type: "工作负载类型" 40 | rule_type: "规则类型" 41 | reason: "原因" 42 | kind: ConfigMap 43 | metadata: 44 | name: zh-cn 45 | namespace: {{ include "nm.namespaceOverride" . }} -------------------------------------------------------------------------------- /helm/values.yaml: -------------------------------------------------------------------------------- 1 | namespaceOverride: "" 2 | 3 | kubesphere: 4 | enabled: false 5 | # supported version: v3.1.0, v3.2.0 6 | version: v3.2.0 7 | 8 | global: 9 | imageRegistry: "" 10 | nodeSelector: {} 11 | imageRegistryOverride: "" 12 | # value of notification-manager-operator 13 | operator: 14 | containers: 15 | proxy: 16 | image: 17 | repo: kubesphere/kube-rbac-proxy 18 | tag: v0.11.0 19 | pullPolicy: IfNotPresent 20 | resources: 21 | limits: 22 | cpu: 50m 23 | memory: 50Mi 24 | requests: 25 | cpu: 5m 26 | memory: 10Mi 27 | operator: 28 | image: 29 | repo: kubesphere/notification-manager-operator 30 | tag: v2.5.0 31 | pullPolicy: IfNotPresent 32 | resources: 33 | limits: 34 | cpu: 50m 35 | memory: 50Mi 36 | requests: 37 | cpu: 5m 38 | memory: 20Mi 39 | nodeSelector: {} 40 | tolerations: [] 41 | affinity: {} 42 | 43 | 44 | # value of notification-manager 45 | notificationmanager: 46 | image: 47 | repo: kubesphere/notification-manager 48 | tag: v2.5.0 49 | pullPolicy: IfNotPresent 50 | sidecar: 51 | image: 52 | repo: kubesphere/notification-tenant-sidecar 53 | replicas: 1 54 | resources: 55 | limits: 56 | cpu: 500m 57 | memory: 500Mi 58 | requests: 59 | cpu: 5m 60 | memory: 20Mi 61 | nodeSelector: {} 62 | tolerations: [] 63 | affinity: {} 64 | defaultConfigSelector: 65 | matchLabels: 66 | type: default 67 | receivers: 68 | tenantKey: user 69 | globalReceiverSelector: 70 | matchLabels: 71 | type: global 72 | tenantReceiverSelector: 73 | matchLabels: 74 | type: tenant 75 | options: 76 | dingtalk: 77 | notificationTimeout: 5 78 | email: 79 | notificationTimeout: 5 80 | slack: 81 | notificationTimeout: 5 82 | webhook: 83 | notificationTimeout: 5 84 | wechat: 85 | notificationTimeout: 5 86 | groupLabels: 87 | - alertname 88 | - namespace 89 | - cluster 90 | - alerttype 91 | template: 92 | language: English 93 | languagePack: 94 | - name: zh-cn 95 | namespace: kubesphere-monitoring-system 96 | text: 97 | name: notification-manager-template 98 | namespace: kubesphere-monitoring-system 99 | env: 100 | - name: TZ 101 | value: Asia/Shanghai 102 | -------------------------------------------------------------------------------- /pkg/aggregation/aggregation.go: -------------------------------------------------------------------------------- 1 | package aggregation 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | 7 | "github.com/go-kit/kit/log" 8 | "github.com/go-kit/kit/log/level" 9 | "github.com/kubesphere/notification-manager/pkg/controller" 10 | "github.com/kubesphere/notification-manager/pkg/internal" 11 | "github.com/kubesphere/notification-manager/pkg/stage" 12 | "github.com/kubesphere/notification-manager/pkg/template" 13 | "github.com/kubesphere/notification-manager/pkg/utils" 14 | "github.com/modern-go/reflect2" 15 | ) 16 | 17 | type aggregationStage struct { 18 | notifierCtl *controller.Controller 19 | } 20 | 21 | func NewStage(notifierCtl *controller.Controller) stage.Stage { 22 | return &aggregationStage{ 23 | notifierCtl: notifierCtl, 24 | } 25 | } 26 | 27 | func (s *aggregationStage) Exec(ctx context.Context, l log.Logger, data interface{}) (context.Context, interface{}, error) { 28 | 29 | if reflect2.IsNil(data) { 30 | return ctx, nil, nil 31 | } 32 | 33 | groupLabel := s.notifierCtl.GetGroupLabels() 34 | _ = level.Debug(l).Log("msg", "Start aggregation stage", "seq", ctx.Value("seq"), "group by", utils.ArrayToString(groupLabel, ",")) 35 | 36 | alertMap := data.(map[internal.Receiver][]*template.Alert) 37 | 38 | res := make(map[internal.Receiver][]*template.Data) 39 | for receiver, alerts := range alertMap { 40 | m := make(map[string][]*template.Alert) 41 | for _, alert := range alerts { 42 | group := labelToGroupKey(groupLabel, alert) 43 | as := m[group] 44 | as = append(as, alert) 45 | m[group] = as 46 | } 47 | 48 | var ds []*template.Data 49 | for k, v := range m { 50 | d := &template.Data{ 51 | GroupLabels: groupKeyToLabel(k), 52 | Alerts: v, 53 | } 54 | ds = append(ds, d.Format()) 55 | } 56 | 57 | res[receiver] = ds 58 | } 59 | 60 | return ctx, res, nil 61 | } 62 | 63 | func labelToGroupKey(groupLabel []string, alert *template.Alert) string { 64 | 65 | m := make(map[string]string) 66 | for _, k := range groupLabel { 67 | m[k] = alert.Labels[k] 68 | } 69 | 70 | bs, _ := json.Marshal(m) 71 | 72 | return string(bs) 73 | } 74 | 75 | func groupKeyToLabel(groupKey string) template.KV { 76 | 77 | label := template.KV{} 78 | _ = utils.JsonUnmarshal([]byte(groupKey), &label) 79 | for k, v := range label { 80 | if v == "" { 81 | delete(label, k) 82 | } 83 | } 84 | return label 85 | } 86 | -------------------------------------------------------------------------------- /pkg/async/group.go: -------------------------------------------------------------------------------- 1 | package async 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/kubesphere/notification-manager/pkg/utils" 9 | ) 10 | 11 | // Group has several workers, and the group can execute these workers concurrently, 12 | // wait for the workers to finish within a specified time, and receive the results returned by these workers. 13 | type Group struct { 14 | workers []func(stopCh chan interface{}) 15 | stopCh chan interface{} 16 | ctx context.Context 17 | } 18 | 19 | func NewGroup(ctx context.Context) *Group { 20 | return &Group{ 21 | ctx: ctx, 22 | } 23 | } 24 | 25 | // Add a worker to group 26 | func (g *Group) Add(w func(stopCh chan interface{})) { 27 | g.workers = append(g.workers, w) 28 | } 29 | 30 | // Wait execute all workers concurrently, and wait for all workers to end. 31 | func (g *Group) Wait() error { 32 | 33 | if len(g.workers) == 0 { 34 | return nil 35 | } 36 | 37 | g.stopCh = make(chan interface{}, len(g.workers)) 38 | 39 | for _, worker := range g.workers { 40 | go worker(g.stopCh) 41 | } 42 | 43 | var errs []error 44 | res := 0 45 | for { 46 | select { 47 | case <-g.ctx.Done(): 48 | return utils.Error("timeout") 49 | case val := <-g.stopCh: 50 | switch val.(type) { 51 | case error: 52 | errs = append(errs, val.(error)) 53 | case []error: 54 | errs = append(errs, val.([]error)...) 55 | default: 56 | } 57 | 58 | res = res + 1 59 | if res == len(g.workers) { 60 | if len(errs) == 0 { 61 | return nil 62 | } 63 | 64 | s := "" 65 | for _, err := range errs { 66 | s = fmt.Sprintf("%s%s,", s, err.Error()) 67 | } 68 | return utils.Error(strings.TrimSuffix(s, ",")) 69 | } 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /pkg/constants/constants.go: -------------------------------------------------------------------------------- 1 | package constants 2 | 3 | const ( 4 | HTML = "html" 5 | Text = "text" 6 | Markdown = "markdown" 7 | // Post message is Rich Text Format(RTF). RTF is a file format that lets you exchange text 8 | // files between different word processors in different operating systems. 9 | // More info: https://open.feishu.cn/document/uAjLw4CM/ukTMukTMukTM/im-v1/message/create_json#45e0953e 10 | Post = "post" 11 | Aliyun = "aliyun" 12 | Tencent = "tencent" 13 | AWS = "aws" 14 | 15 | DingTalk = "dingtalk" 16 | Email = "email" 17 | Feishu = "feishu" 18 | Pushover = "pushover" 19 | Slack = "slack" 20 | SMS = "sms" 21 | Webhook = "webhook" 22 | WeChat = "wechat" 23 | Discord = "discord" 24 | Telegram = "telegram" 25 | 26 | DiscordContent = "content" 27 | DiscordEmbed = "embed" 28 | 29 | Cluster = "cluster" 30 | Namespace = "namespace" 31 | 32 | AlertFiring = "firing" 33 | AlertResolved = "resolved" 34 | 35 | AlertName = "alertname" 36 | AlertType = "alerttype" 37 | AlertTime = "alerttime" 38 | AlertMessage = "message" 39 | AlertSummary = "summary" 40 | AlertSummaryCN = "summaryCn" 41 | 42 | ReceiverName = "receiver" 43 | 44 | Verify = "verify" 45 | Notification = "notification" 46 | 47 | DefaultWebhookTemplate = `{{ template "webhook.default.message" . }}` 48 | DefaultHistoryTemplate = `{{ template "nm.default.history" . }}` 49 | 50 | DefaultClusterName = "default" 51 | 52 | KubesphereConfigNamespace = "kubesphere-system" 53 | KubesphereConfigName = "kubesphere-config" 54 | KubesphereConfigKey = "kubesphere.yaml" 55 | ) 56 | -------------------------------------------------------------------------------- /pkg/controller/factories.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/kubesphere/notification-manager/apis/v2beta2" 7 | "github.com/kubesphere/notification-manager/pkg/constants" 8 | "github.com/kubesphere/notification-manager/pkg/internal" 9 | "github.com/kubesphere/notification-manager/pkg/internal/dingtalk" 10 | "github.com/kubesphere/notification-manager/pkg/internal/discord" 11 | "github.com/kubesphere/notification-manager/pkg/internal/email" 12 | "github.com/kubesphere/notification-manager/pkg/internal/feishu" 13 | "github.com/kubesphere/notification-manager/pkg/internal/pushover" 14 | "github.com/kubesphere/notification-manager/pkg/internal/slack" 15 | "github.com/kubesphere/notification-manager/pkg/internal/sms" 16 | "github.com/kubesphere/notification-manager/pkg/internal/telegram" 17 | "github.com/kubesphere/notification-manager/pkg/internal/webhook" 18 | "github.com/kubesphere/notification-manager/pkg/internal/wechat" 19 | "github.com/kubesphere/notification-manager/pkg/utils" 20 | "github.com/modern-go/reflect2" 21 | ) 22 | 23 | type receiverFactory = func(tenantID string, obj *v2beta2.Receiver) internal.Receiver 24 | type configFactory = func(obj *v2beta2.Config) internal.Config 25 | 26 | var receiverFactories map[string]receiverFactory 27 | var configFactories map[string]configFactory 28 | 29 | func init() { 30 | receiverFactories = make(map[string]receiverFactory) 31 | receiverFactories[constants.DingTalk] = dingtalk.NewReceiver 32 | receiverFactories[constants.Email] = email.NewReceiver 33 | receiverFactories[constants.Feishu] = feishu.NewReceiver 34 | receiverFactories[constants.Pushover] = pushover.NewReceiver 35 | receiverFactories[constants.Slack] = slack.NewReceiver 36 | receiverFactories[constants.SMS] = sms.NewReceiver 37 | receiverFactories[constants.Webhook] = webhook.NewReceiver 38 | receiverFactories[constants.WeChat] = wechat.NewReceiver 39 | receiverFactories[constants.Discord] = discord.NewReceiver 40 | receiverFactories[constants.Telegram] = telegram.NewReceiver 41 | 42 | configFactories = make(map[string]configFactory) 43 | configFactories[constants.DingTalk] = dingtalk.NewConfig 44 | configFactories[constants.Email] = email.NewConfig 45 | configFactories[constants.Feishu] = feishu.NewConfig 46 | configFactories[constants.Pushover] = pushover.NewConfig 47 | configFactories[constants.Slack] = slack.NewConfig 48 | configFactories[constants.SMS] = sms.NewConfig 49 | configFactories[constants.Webhook] = webhook.NewConfig 50 | configFactories[constants.WeChat] = wechat.NewConfig 51 | configFactories[constants.Discord] = discord.NewConfig 52 | configFactories[constants.Telegram] = telegram.NewConfig 53 | } 54 | 55 | func NewReceivers(tenantID string, obj *v2beta2.Receiver) map[string]internal.Receiver { 56 | 57 | if obj == nil { 58 | return nil 59 | } 60 | 61 | m := make(map[string]internal.Receiver) 62 | for k, fn := range receiverFactories { 63 | if r := fn(tenantID, obj); !reflect2.IsNil(r) { 64 | r.SetHash(utils.Hash(r)) 65 | m[fmt.Sprintf("%s/%s", k, obj.Name)] = r 66 | } 67 | } 68 | 69 | return m 70 | } 71 | 72 | func NewConfigs(obj *v2beta2.Config) map[string]internal.Config { 73 | 74 | if obj == nil { 75 | return nil 76 | } 77 | 78 | m := make(map[string]internal.Config) 79 | for k, fn := range configFactories { 80 | if c := fn(obj); !reflect2.IsNil(c) { 81 | m[fmt.Sprintf("%s/%s", k, obj.Name)] = c 82 | } 83 | } 84 | 85 | return m 86 | } 87 | -------------------------------------------------------------------------------- /pkg/filter/filter.go: -------------------------------------------------------------------------------- 1 | package filter 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-kit/kit/log" 7 | "github.com/go-kit/kit/log/level" 8 | "github.com/kubesphere/notification-manager/apis/v2beta2" 9 | "github.com/kubesphere/notification-manager/pkg/controller" 10 | "github.com/kubesphere/notification-manager/pkg/internal" 11 | "github.com/kubesphere/notification-manager/pkg/stage" 12 | "github.com/kubesphere/notification-manager/pkg/template" 13 | "github.com/modern-go/reflect2" 14 | ) 15 | 16 | type filterStage struct { 17 | notifierCtl *controller.Controller 18 | } 19 | 20 | func NewStage(notifierCtl *controller.Controller) stage.Stage { 21 | return &filterStage{ 22 | notifierCtl, 23 | } 24 | } 25 | 26 | func (s *filterStage) Exec(ctx context.Context, l log.Logger, data interface{}) (context.Context, interface{}, error) { 27 | 28 | if reflect2.IsNil(data) { 29 | return ctx, nil, nil 30 | } 31 | 32 | _ = level.Debug(l).Log("msg", "Start filter stage", "seq", ctx.Value("seq")) 33 | 34 | alertMap := data.(map[internal.Receiver][]*template.Alert) 35 | res := make(map[internal.Receiver][]*template.Alert) 36 | for receiver, alerts := range alertMap { 37 | as, err := s.mute(ctx, alerts, receiver) 38 | if err != nil { 39 | _ = level.Error(l).Log("msg", "Mute failed", "stage", "Filter", "seq", ctx.Value("seq"), "tenant", receiver.GetTenantID(), "error", err.Error()) 40 | return ctx, data, err 41 | } 42 | 43 | as, err = filter(as, receiver.GetAlertSelector()) 44 | if err != nil { 45 | _ = level.Error(l).Log("msg", "Filter failed", "stage", "Filter", "seq", ctx.Value("seq"), "error", err.Error(), "receiver", receiver.GetName()) 46 | return ctx, nil, err 47 | } 48 | 49 | res[receiver] = as 50 | } 51 | 52 | return ctx, res, nil 53 | } 54 | 55 | func (s *filterStage) mute(ctx context.Context, alerts []*template.Alert, receiver internal.Receiver) ([]*template.Alert, error) { 56 | 57 | silences, err := s.notifierCtl.GetActiveSilences(ctx, receiver.GetTenantID()) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | if len(silences) == 0 { 63 | return alerts, nil 64 | } 65 | 66 | var as []*template.Alert 67 | for _, alert := range alerts { 68 | flag := false 69 | for _, silence := range silences { 70 | if !silence.IsActive() { 71 | continue 72 | } 73 | 74 | if v2beta2.LabelMatchSelector(alert.Labels, silence.Spec.Matcher) { 75 | flag = true 76 | break 77 | } 78 | } 79 | 80 | if !flag { 81 | as = append(as, alert) 82 | } 83 | } 84 | 85 | return as, err 86 | } 87 | 88 | // FilterAlerts filter the alerts with label selector,if the selector is not correct, return all of the alerts. 89 | func filter(alerts []*template.Alert, selector *v2beta2.LabelSelector) ([]*template.Alert, error) { 90 | 91 | if selector == nil { 92 | return alerts, nil 93 | } 94 | 95 | var as []*template.Alert 96 | for _, alert := range alerts { 97 | ok, err := selector.Matches(alert.Labels) 98 | if err != nil { 99 | return nil, err 100 | } 101 | if ok { 102 | as = append(as, alert) 103 | } 104 | } 105 | 106 | return as, nil 107 | } 108 | -------------------------------------------------------------------------------- /pkg/history/history.go: -------------------------------------------------------------------------------- 1 | package history 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/go-kit/kit/log" 8 | "github.com/go-kit/kit/log/level" 9 | "github.com/kubesphere/notification-manager/pkg/controller" 10 | "github.com/kubesphere/notification-manager/pkg/internal" 11 | "github.com/kubesphere/notification-manager/pkg/notify" 12 | "github.com/kubesphere/notification-manager/pkg/stage" 13 | "github.com/kubesphere/notification-manager/pkg/template" 14 | "github.com/modern-go/reflect2" 15 | ) 16 | 17 | const ( 18 | historyRetryMax = 3 19 | historyRetryDelay = time.Second * 5 20 | ) 21 | 22 | type historyStage struct { 23 | notifierCtl *controller.Controller 24 | } 25 | 26 | func NewStage(notifierCtl *controller.Controller) stage.Stage { 27 | return &historyStage{ 28 | notifierCtl: notifierCtl, 29 | } 30 | } 31 | 32 | func (s *historyStage) Exec(ctx context.Context, l log.Logger, data interface{}) (context.Context, interface{}, error) { 33 | if reflect2.IsNil(data) { 34 | return ctx, nil, nil 35 | } 36 | 37 | receivers := s.notifierCtl.GetHistoryReceivers() 38 | if len(receivers) == 0 { 39 | return ctx, nil, nil 40 | } 41 | 42 | _ = level.Debug(l).Log("msg", "Start history stage", "seq", ctx.Value("seq")) 43 | 44 | input := data.(map[string]*template.Alert) 45 | d := &template.Data{} 46 | for _, alert := range input { 47 | if alert.NotifySuccessful { 48 | d.Alerts = append(d.Alerts, alert) 49 | } 50 | } 51 | 52 | if len(d.Alerts) == 0 { 53 | return ctx, nil, nil 54 | } 55 | 56 | alertMap := make(map[internal.Receiver][]*template.Data) 57 | for _, receiver := range receivers { 58 | alertMap[receiver] = []*template.Data{d} 59 | } 60 | 61 | for retry := 0; retry <= historyRetryMax; retry++ { 62 | notifyStage := notify.NewStage(s.notifierCtl) 63 | if _, _, err := notifyStage.Exec(ctx, l, alertMap); err == nil { 64 | return ctx, nil, nil 65 | } 66 | 67 | _ = level.Error(l).Log("msg", "Export history error", "seq", ctx.Value("seq"), "retry", retry) 68 | time.Sleep(historyRetryDelay) 69 | } 70 | 71 | _ = level.Error(l).Log("msg", "Export history error, all retry failed", "seq", ctx.Value("seq")) 72 | return ctx, data, nil 73 | } 74 | -------------------------------------------------------------------------------- /pkg/internal/discord/types.go: -------------------------------------------------------------------------------- 1 | package discord 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/kubesphere/notification-manager/apis/v2beta2" 7 | "github.com/kubesphere/notification-manager/pkg/constants" 8 | "github.com/kubesphere/notification-manager/pkg/internal" 9 | ) 10 | 11 | type Receiver struct { 12 | *internal.Common 13 | Webhook *v2beta2.Credential `json:"webhook"` 14 | Type *string `json:"type,omitempty"` 15 | MentionedUsers []string `json:"mentionedUsers,omitempty"` 16 | MentionedRoles []string `json:"mentionedRoles,omitempty"` 17 | } 18 | 19 | func NewReceiver(tenantID string, obj *v2beta2.Receiver) internal.Receiver { 20 | if obj.Spec.Discord == nil { 21 | return nil 22 | } 23 | discord := obj.Spec.Discord 24 | r := &Receiver{ 25 | Common: &internal.Common{ 26 | Name: obj.Name, 27 | TenantID: tenantID, 28 | Type: constants.Discord, 29 | Labels: obj.Labels, 30 | Enable: discord.Enabled, 31 | AlertSelector: discord.AlertSelector, 32 | Template: internal.Template{ 33 | TmplName: *discord.Template, 34 | TmplText: discord.TmplText, 35 | }, 36 | }, 37 | MentionedRoles: discord.MentionedRoles, 38 | MentionedUsers: discord.MentionedUsers, 39 | Type: discord.Type, 40 | } 41 | 42 | if discord.Webhook != nil { 43 | r.Webhook = discord.Webhook 44 | } 45 | 46 | return r 47 | } 48 | 49 | func (r *Receiver) SetConfig(_ internal.Config) { 50 | return 51 | } 52 | 53 | func (r *Receiver) Validate() error { 54 | if r.Type != nil { 55 | if *r.Type != constants.DiscordContent && *r.Type != constants.DiscordEmbed { 56 | return fmt.Errorf("discord receiver: type must be one of: `content` or `embed`") 57 | } 58 | } 59 | return nil 60 | } 61 | 62 | func (r *Receiver) Clone() internal.Receiver { 63 | 64 | out := &Receiver{ 65 | Common: r.Common.Clone(), 66 | Webhook: r.Webhook, 67 | } 68 | 69 | out.Type = r.Type 70 | out.MentionedUsers = append(out.MentionedUsers, r.MentionedUsers...) 71 | out.MentionedRoles = append(out.MentionedRoles, r.MentionedRoles...) 72 | return out 73 | } 74 | 75 | func (r *Receiver) GetChannels() (string, interface{}) { 76 | if r.Webhook == nil { 77 | return "", nil 78 | } else { 79 | return r.GetType(), r.Webhook.ToString() 80 | } 81 | } 82 | 83 | type Config struct { 84 | *internal.Common 85 | } 86 | 87 | func NewConfig(_ *v2beta2.Config) internal.Config { 88 | return nil 89 | } 90 | 91 | func (c *Config) Validate() error { 92 | return nil 93 | } 94 | 95 | func (c *Config) Clone() internal.Config { 96 | return nil 97 | } 98 | -------------------------------------------------------------------------------- /pkg/internal/interface.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "github.com/kubesphere/notification-manager/apis/v2beta2" 5 | ) 6 | 7 | type Receiver interface { 8 | GetTenantID() string 9 | GetName() string 10 | GetResourceVersion() uint64 11 | Enabled() bool 12 | GetType() string 13 | GetLabels() map[string]string 14 | GetAlertSelector() *v2beta2.LabelSelector 15 | GetConfigSelector() *v2beta2.LabelSelector 16 | SetConfig(c Config) 17 | Validate() error 18 | Clone() Receiver 19 | GetHash() string 20 | SetHash(h string) 21 | GetChannels() (string, interface{}) 22 | } 23 | 24 | type Config interface { 25 | GetResourceVersion() uint64 26 | GetLabels() map[string]string 27 | GetPriority() int 28 | Validate() error 29 | Clone() Config 30 | } 31 | -------------------------------------------------------------------------------- /pkg/internal/slack/types.go: -------------------------------------------------------------------------------- 1 | package slack 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/kubesphere/notification-manager/apis/v2beta2" 8 | "github.com/kubesphere/notification-manager/pkg/constants" 9 | "github.com/kubesphere/notification-manager/pkg/internal" 10 | "github.com/modern-go/reflect2" 11 | ) 12 | 13 | type Receiver struct { 14 | *internal.Common 15 | // The channel or user to send notifications to. 16 | Channels []string `json:"channels,omitempty"` 17 | *Config 18 | } 19 | 20 | func NewReceiver(tenantID string, obj *v2beta2.Receiver) internal.Receiver { 21 | if obj.Spec.Slack == nil { 22 | return nil 23 | } 24 | s := obj.Spec.Slack 25 | r := &Receiver{ 26 | Common: &internal.Common{ 27 | Name: obj.Name, 28 | TenantID: tenantID, 29 | Type: constants.Slack, 30 | Labels: obj.Labels, 31 | Enable: s.Enabled, 32 | AlertSelector: s.AlertSelector, 33 | ConfigSelector: s.SlackConfigSelector, 34 | Template: internal.Template{ 35 | TmplText: s.TmplText, 36 | }, 37 | }, 38 | Channels: s.Channels, 39 | } 40 | 41 | r.ResourceVersion, _ = strconv.ParseUint(obj.ResourceVersion, 10, 64) 42 | 43 | if s.Template != nil { 44 | r.TmplName = *s.Template 45 | } 46 | 47 | return r 48 | } 49 | 50 | func (r *Receiver) SetConfig(c internal.Config) { 51 | if reflect2.IsNil(c) { 52 | return 53 | } 54 | 55 | if nc, ok := c.(*Config); ok { 56 | r.Config = nc 57 | } 58 | } 59 | 60 | func (r *Receiver) Validate() error { 61 | 62 | if len(r.Channels) == 0 { 63 | return fmt.Errorf("slack receiver: channel must be specified") 64 | } 65 | 66 | if r.Config == nil { 67 | return fmt.Errorf("slack receiver: Config is nil") 68 | } 69 | 70 | return nil 71 | } 72 | 73 | func (r *Receiver) Clone() internal.Receiver { 74 | 75 | return &Receiver{ 76 | Common: r.Common.Clone(), 77 | Channels: r.Channels, 78 | Config: r.Config, 79 | } 80 | } 81 | 82 | func (r *Receiver) GetChannels() (string, interface{}) { 83 | return r.Type, r.Channels 84 | } 85 | 86 | type Config struct { 87 | *internal.Common 88 | // The token of user or bot. 89 | Token *v2beta2.Credential `json:"token,omitempty"` 90 | } 91 | 92 | func NewConfig(obj *v2beta2.Config) internal.Config { 93 | if obj.Spec.Slack == nil { 94 | return nil 95 | } 96 | 97 | c := &Config{ 98 | Common: &internal.Common{ 99 | Name: obj.Name, 100 | Labels: obj.Labels, 101 | Type: constants.Slack, 102 | }, 103 | Token: obj.Spec.Slack.SlackTokenSecret, 104 | } 105 | 106 | c.ResourceVersion, _ = strconv.ParseUint(obj.ResourceVersion, 10, 64) 107 | 108 | return c 109 | } 110 | 111 | func (c *Config) Validate() error { 112 | 113 | if err := internal.ValidateCredential(c.Token); err != nil { 114 | return fmt.Errorf("slack config: token error, %s", err.Error()) 115 | } 116 | 117 | return nil 118 | } 119 | 120 | func (c *Config) Clone() internal.Config { 121 | 122 | return &Config{ 123 | Common: c.Common.Clone(), 124 | Token: c.Token, 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /pkg/internal/telegram/types.go: -------------------------------------------------------------------------------- 1 | package telegram 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/kubesphere/notification-manager/apis/v2beta2" 8 | "github.com/kubesphere/notification-manager/pkg/constants" 9 | "github.com/kubesphere/notification-manager/pkg/internal" 10 | "github.com/modern-go/reflect2" 11 | ) 12 | 13 | type Receiver struct { 14 | *internal.Common 15 | *Config 16 | 17 | // The channel or user to send notifications to. 18 | Channels []string `json:"channels,omitempty"` 19 | MentionedUsers []string `json:"mentionedUsers,omitempty"` 20 | } 21 | 22 | func NewReceiver(tenantID string, obj *v2beta2.Receiver) internal.Receiver { 23 | if obj.Spec.Telegram == nil { 24 | return nil 25 | } 26 | telegram := obj.Spec.Telegram 27 | r := &Receiver{ 28 | Common: &internal.Common{ 29 | Name: obj.Name, 30 | TenantID: tenantID, 31 | Type: constants.Telegram, 32 | Labels: obj.Labels, 33 | Enable: telegram.Enabled, 34 | AlertSelector: telegram.AlertSelector, 35 | ConfigSelector: telegram.TelegramConfigSelector, 36 | Template: internal.Template{ 37 | TmplName: *telegram.Template, 38 | TmplText: telegram.TmplText, 39 | }, 40 | }, 41 | Channels: telegram.Channels, 42 | MentionedUsers: telegram.MentionedUsers, 43 | } 44 | 45 | r.ResourceVersion, _ = strconv.ParseUint(obj.ResourceVersion, 10, 64) 46 | 47 | return r 48 | } 49 | 50 | func (r *Receiver) SetConfig(c internal.Config) { 51 | if reflect2.IsNil(c) { 52 | return 53 | } 54 | 55 | if nc, ok := c.(*Config); ok { 56 | r.Config = nc 57 | } 58 | } 59 | 60 | func (r *Receiver) Validate() error { 61 | 62 | if len(r.Channels) == 0 { 63 | return fmt.Errorf("telegram receiver: channel must be specified") 64 | } 65 | 66 | if r.Config == nil { 67 | return fmt.Errorf("telegram receiver: Config is nil") 68 | } 69 | 70 | return nil 71 | } 72 | 73 | func (r *Receiver) Clone() internal.Receiver { 74 | 75 | return &Receiver{ 76 | Common: r.Common.Clone(), 77 | Channels: r.Channels, 78 | MentionedUsers: r.MentionedUsers, 79 | Config: r.Config, 80 | } 81 | } 82 | 83 | func (r *Receiver) GetChannels() (string, interface{}) { 84 | return r.Type, r.Channels 85 | } 86 | 87 | type Config struct { 88 | *internal.Common 89 | // The token of user or bot. 90 | Token *v2beta2.Credential `json:"token,omitempty"` 91 | } 92 | 93 | func NewConfig(obj *v2beta2.Config) internal.Config { 94 | if obj.Spec.Telegram == nil { 95 | return nil 96 | } 97 | 98 | c := &Config{ 99 | Common: &internal.Common{ 100 | Name: obj.Name, 101 | Labels: obj.Labels, 102 | Type: constants.Telegram, 103 | }, 104 | Token: obj.Spec.Telegram.TelegramTokenSecret, 105 | } 106 | 107 | c.ResourceVersion, _ = strconv.ParseUint(obj.ResourceVersion, 10, 64) 108 | 109 | return c 110 | } 111 | 112 | func (c *Config) Validate() error { 113 | 114 | if err := internal.ValidateCredential(c.Token); err != nil { 115 | return fmt.Errorf("telegram config: token error, %s", err.Error()) 116 | } 117 | 118 | return nil 119 | } 120 | 121 | func (c *Config) Clone() internal.Config { 122 | 123 | return &Config{ 124 | Common: c.Common.Clone(), 125 | Token: c.Token, 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /pkg/internal/webhook/types.go: -------------------------------------------------------------------------------- 1 | package webhook 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | 8 | "github.com/kubesphere/notification-manager/apis/v2beta2" 9 | "github.com/kubesphere/notification-manager/pkg/constants" 10 | "github.com/kubesphere/notification-manager/pkg/internal" 11 | ) 12 | 13 | type Receiver struct { 14 | *internal.Common 15 | // `url` gives the location of the webhook, in standard URL form. 16 | URL string `json:"url,omitempty"` 17 | HttpConfig *v2beta2.HTTPClientConfig `json:"httpConfig,omitempty"` 18 | *Config 19 | } 20 | 21 | func NewReceiver(tenantID string, obj *v2beta2.Receiver) internal.Receiver { 22 | if obj.Spec.Webhook == nil { 23 | return nil 24 | } 25 | w := obj.Spec.Webhook 26 | r := &Receiver{ 27 | Common: &internal.Common{ 28 | Name: obj.Name, 29 | TenantID: tenantID, 30 | Type: constants.Webhook, 31 | Labels: obj.Labels, 32 | Enable: w.Enabled, 33 | AlertSelector: w.AlertSelector, 34 | Template: internal.Template{ 35 | TmplText: w.TmplText, 36 | }, 37 | }, 38 | HttpConfig: w.HTTPConfig, 39 | } 40 | 41 | r.ResourceVersion, _ = strconv.ParseUint(obj.ResourceVersion, 10, 64) 42 | 43 | if w.Template != nil { 44 | r.TmplName = *w.Template 45 | } 46 | 47 | if w.URL != nil { 48 | r.URL = *w.URL 49 | } else if w.Service != nil { 50 | service := w.Service 51 | if service.Scheme == nil || len(*service.Scheme) == 0 { 52 | r.URL = fmt.Sprintf("http://%s.%s", service.Name, service.Namespace) 53 | } else { 54 | r.URL = fmt.Sprintf("%s://%s.%s", *service.Scheme, service.Name, service.Namespace) 55 | } 56 | 57 | if service.Port != nil { 58 | r.URL = fmt.Sprintf("%s:%d/", r.URL, *service.Port) 59 | } 60 | 61 | if service.Path != nil { 62 | r.URL = fmt.Sprintf("%s%s", r.URL, strings.TrimPrefix(*service.Path, "/")) 63 | } 64 | } 65 | 66 | return r 67 | } 68 | 69 | func (r *Receiver) SetConfig(_ internal.Config) { 70 | return 71 | } 72 | 73 | func (r *Receiver) Validate() error { 74 | 75 | if len(r.URL) == 0 { 76 | return fmt.Errorf("webhook rceiver: url is nil") 77 | } 78 | 79 | return nil 80 | } 81 | 82 | func (r *Receiver) Clone() internal.Receiver { 83 | 84 | return &Receiver{ 85 | Common: r.Common.Clone(), 86 | URL: r.URL, 87 | HttpConfig: r.HttpConfig, 88 | Config: r.Config, 89 | } 90 | } 91 | 92 | func (r *Receiver) GetChannels() (string, interface{}) { 93 | return r.Type, r.URL 94 | } 95 | 96 | type Config struct { 97 | *internal.Common 98 | } 99 | 100 | func NewConfig(_ *v2beta2.Config) internal.Config { 101 | return nil 102 | } 103 | 104 | func (c *Config) Validate() error { 105 | return nil 106 | } 107 | 108 | func (c *Config) Clone() internal.Config { 109 | 110 | return nil 111 | } 112 | -------------------------------------------------------------------------------- /pkg/notify/notifier/interface.go: -------------------------------------------------------------------------------- 1 | package notifier 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/kubesphere/notification-manager/pkg/template" 7 | ) 8 | 9 | type Notifier interface { 10 | Notify(ctx context.Context, data *template.Data) error 11 | SetSentSuccessfulHandler(*func([]*template.Alert)) 12 | } 13 | -------------------------------------------------------------------------------- /pkg/notify/notifier/sms/aliyun.go: -------------------------------------------------------------------------------- 1 | package sms 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | 7 | openapi "github.com/alibabacloud-go/darabonba-openapi/client" 8 | dysmsapi "github.com/alibabacloud-go/dysmsapi-20170525/v2/client" 9 | "github.com/kubesphere/notification-manager/apis/v2beta2" 10 | "github.com/kubesphere/notification-manager/pkg/controller" 11 | "github.com/kubesphere/notification-manager/pkg/utils" 12 | ) 13 | 14 | const ( 15 | aliyunMaxPhoneNums = 1000 16 | ) 17 | 18 | type AliyunNotifier struct { 19 | SignName string 20 | notifierCtl *controller.Controller 21 | TemplateCode string 22 | AccessKeyId *v2beta2.Credential 23 | AccessKeySecret *v2beta2.Credential 24 | PhoneNums string 25 | } 26 | 27 | func NewAliyunProvider(c *controller.Controller, providers *v2beta2.Providers, phoneNumbers []string) Provider { 28 | phoneNums := handleAliyunPhoneNums(phoneNumbers) 29 | return &AliyunNotifier{ 30 | SignName: providers.Aliyun.SignName, 31 | notifierCtl: c, 32 | TemplateCode: providers.Aliyun.TemplateCode, 33 | AccessKeyId: providers.Aliyun.AccessKeyId, 34 | AccessKeySecret: providers.Aliyun.AccessKeySecret, 35 | PhoneNums: phoneNums, 36 | } 37 | } 38 | 39 | func (a *AliyunNotifier) MakeRequest(_ context.Context, messages string) error { 40 | accessKeyId, err := a.notifierCtl.GetCredential(a.AccessKeyId) 41 | if err != nil { 42 | return utils.Errorf("[Aliyun SendSms] cannot get accessKeyId: %s", err.Error()) 43 | } 44 | accessKeySecret, err := a.notifierCtl.GetCredential(a.AccessKeySecret) 45 | if err != nil { 46 | return utils.Errorf("[Aliyun SendSms] cannot get accessKeySecret: %s", err.Error()) 47 | } 48 | c := &openapi.Config{} 49 | c.AccessKeyId = &accessKeyId 50 | c.AccessKeySecret = &accessKeySecret 51 | client, err := dysmsapi.NewClient(c) 52 | if err != nil { 53 | return utils.Errorf("[Aliyun SendSms] cannot make a client with accessKeyId:%s,accessKeySecret:%s", 54 | a.AccessKeyId.ValueFrom.SecretKeyRef.Name, a.AccessKeySecret.ValueFrom.SecretKeyRef.Name) 55 | } 56 | 57 | templateParam := `{"code":"` + messages + `"}` 58 | req := &dysmsapi.SendSmsRequest{ 59 | PhoneNumbers: &a.PhoneNums, 60 | SignName: &a.SignName, 61 | TemplateCode: &a.TemplateCode, 62 | TemplateParam: &templateParam, 63 | } 64 | resp, err := client.SendSms(req) 65 | if err != nil { 66 | return utils.Errorf("[Aliyun SendSms] An API error occurs: %s", err.Error()) 67 | } 68 | 69 | if stringValue(resp.Body.Code) != "OK" { 70 | return utils.Errorf("[Aliyun SendSms] Send failed: %s", stringValue(resp.Body.Message)) 71 | } 72 | 73 | return nil 74 | } 75 | 76 | func handleAliyunPhoneNums(phoneNumbers []string) string { 77 | if len(phoneNumbers) > aliyunMaxPhoneNums { 78 | phoneNumbers = phoneNumbers[:aliyunMaxPhoneNums] 79 | } 80 | return strings.Join(phoneNumbers, ",") 81 | } 82 | -------------------------------------------------------------------------------- /pkg/notify/notifier/sms/aws.go: -------------------------------------------------------------------------------- 1 | package sms 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | 7 | "github.com/aws/aws-sdk-go-v2/aws" 8 | "github.com/aws/aws-sdk-go-v2/config" 9 | "github.com/aws/aws-sdk-go-v2/credentials" 10 | "github.com/aws/aws-sdk-go-v2/service/sns" 11 | "github.com/kubesphere/notification-manager/apis/v2beta2" 12 | "github.com/kubesphere/notification-manager/pkg/controller" 13 | "github.com/kubesphere/notification-manager/pkg/utils" 14 | ) 15 | 16 | const ( 17 | awsMaxPhoneNums = 1000 18 | ) 19 | 20 | type AWSNotifier struct { 21 | notifierCtl *controller.Controller 22 | AccessKeyId *v2beta2.Credential 23 | SecretAccessKey *v2beta2.Credential 24 | PhoneNums string 25 | Region string 26 | } 27 | 28 | func NewAWSProvider(c *controller.Controller, providers *v2beta2.Providers, phoneNumbers []string) Provider { 29 | phoneNums := handleAWSPhoneNums(phoneNumbers) 30 | return &AWSNotifier{ 31 | notifierCtl: c, 32 | AccessKeyId: providers.AWS.AccessKeyId, 33 | SecretAccessKey: providers.AWS.SecretAccessKey, 34 | PhoneNums: phoneNums, 35 | Region: providers.AWS.Region, 36 | } 37 | } 38 | 39 | type SNSPublishAPI interface { 40 | Publish(ctx context.Context, 41 | params *sns.PublishInput, 42 | optFns ...func(*sns.Options)) (*sns.PublishOutput, error) 43 | } 44 | 45 | func PublishMessage(c context.Context, api SNSPublishAPI, input *sns.PublishInput) (*sns.PublishOutput, error) { 46 | return api.Publish(c, input) 47 | } 48 | 49 | func (a *AWSNotifier) MakeRequest(ctx context.Context, messages string) error { 50 | accessKeyId, err := a.notifierCtl.GetCredential(a.AccessKeyId) 51 | if err != nil { 52 | return utils.Errorf("[AWS SendSms] cannot get accessKeyId: %s", err.Error()) 53 | } 54 | secretAccessKey, err := a.notifierCtl.GetCredential(a.SecretAccessKey) 55 | if err != nil { 56 | return utils.Errorf("[AWS SendSms] cannot get secretAccessKey: %s", err.Error()) 57 | } 58 | 59 | cfg, err := config.LoadDefaultConfig(ctx, 60 | config.WithCredentialsProvider(credentials.StaticCredentialsProvider{ 61 | Value: aws.Credentials{ 62 | AccessKeyID: accessKeyId, SecretAccessKey: secretAccessKey, 63 | }, 64 | }), 65 | config.WithRegion(a.Region)) 66 | if err != nil { 67 | return utils.Errorf("[AWS SendSms]configuration error: %s", err.Error()) 68 | } 69 | 70 | client := sns.NewFromConfig(cfg) 71 | 72 | input := &sns.PublishInput{ 73 | Message: &messages, 74 | PhoneNumber: &a.PhoneNums, 75 | } 76 | 77 | _, err = PublishMessage(ctx, client, input) 78 | if err != nil { 79 | return utils.Errorf("[AWS SendSms] Send failed: %s", err.Error()) 80 | } 81 | 82 | return nil 83 | } 84 | 85 | func handleAWSPhoneNums(phoneNumbers []string) string { 86 | if len(phoneNumbers) > awsMaxPhoneNums { 87 | phoneNumbers = phoneNumbers[:awsMaxPhoneNums] 88 | } 89 | return strings.Join(phoneNumbers, ",") 90 | } 91 | -------------------------------------------------------------------------------- /pkg/notify/notifier/sms/interface.go: -------------------------------------------------------------------------------- 1 | package sms 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/kubesphere/notification-manager/apis/v2beta2" 7 | "github.com/kubesphere/notification-manager/pkg/controller" 8 | "github.com/kubesphere/notification-manager/pkg/utils" 9 | ) 10 | 11 | type Provider interface { 12 | MakeRequest(ctx context.Context, messages string) error 13 | } 14 | 15 | type ProviderFactory func(c *controller.Controller, providers *v2beta2.Providers, phoneNumbers []string) Provider 16 | 17 | var availableFactoryFuncs = map[string]ProviderFactory{} 18 | 19 | // register providers here 20 | func init() { 21 | Register("aliyun", NewAliyunProvider) 22 | Register("tencent", NewTencentProvider) 23 | Register("huawei", NewHuaweiProvider) 24 | Register("aws", NewAWSProvider) 25 | } 26 | 27 | func Register(name string, p ProviderFactory) { 28 | if len(availableFactoryFuncs) == 0 { 29 | availableFactoryFuncs = make(map[string]ProviderFactory) 30 | } 31 | availableFactoryFuncs[name] = p 32 | } 33 | 34 | func GetProviderFunc(name string) (ProviderFactory, error) { 35 | if name != "" { 36 | // check whether the default provider is registered 37 | p, ok := availableFactoryFuncs[name] 38 | if !ok { 39 | return nil, utils.Error("the given default sms provider not registered") 40 | } 41 | return p, nil 42 | } else { 43 | // use the first available provider func if the default provider not given 44 | var providerFunc ProviderFactory 45 | for _, p := range availableFactoryFuncs { 46 | if p != nil { 47 | providerFunc = p 48 | break 49 | } 50 | } 51 | if providerFunc != nil { 52 | return providerFunc, nil 53 | } 54 | return nil, utils.Error("cannot find a registered provider") 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /pkg/notify/notifier/sms/tencent.go: -------------------------------------------------------------------------------- 1 | package sms 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/kubesphere/notification-manager/apis/v2beta2" 9 | "github.com/kubesphere/notification-manager/pkg/controller" 10 | "github.com/kubesphere/notification-manager/pkg/utils" 11 | "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" 12 | "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" 13 | "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/regions" 14 | smsApi "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/sms/v20190711" 15 | ) 16 | 17 | const ( 18 | tencentMaxPhoneNums = 200 19 | ) 20 | 21 | type TencentNotifier struct { 22 | Sign string 23 | notifierCtl *controller.Controller 24 | TemplateID string 25 | SecretId *v2beta2.Credential 26 | SecretKey *v2beta2.Credential 27 | PhoneNums []string 28 | SmsSdkAppid string 29 | } 30 | 31 | func NewTencentProvider(c *controller.Controller, providers *v2beta2.Providers, phoneNumbers []string) Provider { 32 | phoneNum := handleTencentPhoneNum(phoneNumbers) 33 | return &TencentNotifier{ 34 | Sign: providers.Tencent.Sign, 35 | notifierCtl: c, 36 | TemplateID: providers.Tencent.TemplateID, 37 | SecretId: providers.Tencent.SecretId, 38 | SecretKey: providers.Tencent.SecretKey, 39 | PhoneNums: phoneNum, 40 | SmsSdkAppid: providers.Tencent.SmsSdkAppid, 41 | } 42 | } 43 | 44 | func (t *TencentNotifier) MakeRequest(_ context.Context, messages string) error { 45 | secretId, err := t.notifierCtl.GetCredential(t.SecretId) 46 | if err != nil { 47 | return utils.Errorf("[Tencent SendSms]cannot get accessKeyId: %s", err.Error()) 48 | } 49 | secretKey, err := t.notifierCtl.GetCredential(t.SecretKey) 50 | if err != nil { 51 | return utils.Errorf("[Tencent SendSms] cannot get secretKey: %s", err.Error()) 52 | } 53 | credential := common.NewCredential(secretId, secretKey) 54 | client, _ := smsApi.NewClient(credential, regions.Guangzhou, profile.NewClientProfile()) 55 | 56 | req := smsApi.NewSendSmsRequest() 57 | req.SmsSdkAppid = common.StringPtr(t.SmsSdkAppid) 58 | req.Sign = common.StringPtr(t.Sign) 59 | // req.SenderId = common.StringPtr("xxx") 60 | // req.SessionContext = common.StringPtr("xxx") 61 | // req.ExtendCode = common.StringPtr("0") 62 | req.TemplateParamSet = common.StringPtrs([]string{messages}) 63 | req.TemplateID = common.StringPtr(t.TemplateID) 64 | req.PhoneNumberSet = common.StringPtrs(t.PhoneNums) 65 | 66 | resp, err := client.SendSms(req) 67 | 68 | if err != nil { 69 | return utils.Errorf("[Tencent SendSms] An API error occurs: %s", err.Error()) 70 | } 71 | 72 | sendStatusSet := resp.Response.SendStatusSet 73 | failedPhoneNums := make([]string, 0) 74 | if len(sendStatusSet) != 0 { 75 | for _, sendStatus := range sendStatusSet { 76 | if sendStatus != nil && stringValue(sendStatus.Code) != "OK" { 77 | failedPhoneNums = append(failedPhoneNums, stringValue(sendStatus.PhoneNumber)) 78 | } 79 | } 80 | } 81 | 82 | if len(failedPhoneNums) != 0 { 83 | return utils.Errorf("[Tencent SendSms] Some phonenums send failed: %s", strings.Join(failedPhoneNums, ",")) 84 | } 85 | 86 | return nil 87 | 88 | } 89 | 90 | func handleTencentPhoneNum(phoneNumbers []string) []string { 91 | if len(phoneNumbers) > tencentMaxPhoneNums { 92 | phoneNumbers = phoneNumbers[:tencentMaxPhoneNums] 93 | } 94 | tencentPhoneNums := make([]string, 0) 95 | for _, p := range phoneNumbers { 96 | if ok := strings.HasPrefix(p, "+86"); !ok { 97 | p = fmt.Sprintf("+86%s", p) 98 | } 99 | tencentPhoneNums = append(tencentPhoneNums, p) 100 | } 101 | return tencentPhoneNums 102 | } 103 | -------------------------------------------------------------------------------- /pkg/notify/notifier/token.go: -------------------------------------------------------------------------------- 1 | package notifier 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | "github.com/go-kit/kit/log" 9 | "github.com/go-kit/kit/log/level" 10 | "github.com/kubesphere/notification-manager/pkg/utils" 11 | ) 12 | 13 | type token struct { 14 | accessToken string 15 | accessTokenAt time.Time 16 | expires time.Duration 17 | mutex sync.Mutex 18 | } 19 | 20 | type AccessTokenService struct { 21 | mutex sync.Mutex 22 | tokens map[string]*token 23 | } 24 | 25 | var ats *AccessTokenService 26 | 27 | func init() { 28 | ats = &AccessTokenService{ 29 | tokens: make(map[string]*token), 30 | } 31 | } 32 | 33 | func GetAccessTokenService() *AccessTokenService { 34 | return ats 35 | } 36 | 37 | func (ats *AccessTokenService) InvalidToken(ctx context.Context, key string, l log.Logger) { 38 | 39 | ats.mutex.Lock() 40 | defer ats.mutex.Unlock() 41 | 42 | ch := make(chan interface{}) 43 | 44 | go func() { 45 | t, ok := ats.tokens[key] 46 | if ok { 47 | t.accessTokenAt = time.Time{} 48 | } 49 | ch <- struct{}{} 50 | }() 51 | 52 | select { 53 | case <-ctx.Done(): 54 | _ = level.Error(l).Log("msg", "invalid token timeout") 55 | return 56 | case <-ch: 57 | return 58 | } 59 | } 60 | 61 | func (ats *AccessTokenService) GetToken(ctx context.Context, key string, getToken func(ctx context.Context) (string, time.Duration, error)) (string, error) { 62 | 63 | ats.mutex.Lock() 64 | defer ats.mutex.Unlock() 65 | 66 | ch := make(chan interface{}) 67 | 68 | go func() { 69 | t, ok := ats.tokens[key] 70 | if ok && time.Since(t.accessTokenAt) < t.expires { 71 | ch <- t.accessToken 72 | return 73 | } 74 | 75 | accessToken, expires, err := getToken(ctx) 76 | if err != nil { 77 | ch <- err 78 | return 79 | } else { 80 | ats.tokens[key] = &token{ 81 | accessToken: accessToken, 82 | accessTokenAt: time.Now(), 83 | expires: expires, 84 | } 85 | ch <- accessToken 86 | return 87 | } 88 | }() 89 | 90 | select { 91 | case <-ctx.Done(): 92 | return "", utils.Error("get token timeout") 93 | case val := <-ch: 94 | switch val.(type) { 95 | case error: 96 | return "", val.(error) 97 | case string: 98 | return val.(string), nil 99 | default: 100 | return "", utils.Error("wrong token type") 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /pkg/silence/silence.go: -------------------------------------------------------------------------------- 1 | package silence 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-kit/kit/log" 7 | "github.com/go-kit/kit/log/level" 8 | "github.com/kubesphere/notification-manager/pkg/controller" 9 | "github.com/kubesphere/notification-manager/pkg/stage" 10 | "github.com/kubesphere/notification-manager/pkg/template" 11 | "github.com/modern-go/reflect2" 12 | ) 13 | 14 | type silenceStage struct { 15 | notifierCtl *controller.Controller 16 | } 17 | 18 | func NewStage(notifierCtl *controller.Controller) stage.Stage { 19 | return &silenceStage{ 20 | notifierCtl, 21 | } 22 | } 23 | 24 | func (s *silenceStage) Exec(ctx context.Context, l log.Logger, data interface{}) (context.Context, interface{}, error) { 25 | if reflect2.IsNil(data) { 26 | return ctx, nil, nil 27 | } 28 | 29 | input := data.([]*template.Alert) 30 | 31 | _ = level.Debug(l).Log("msg", "Start silence stage", "seq", ctx.Value("seq"), "alert", len(input)) 32 | 33 | ss, err := s.notifierCtl.GetActiveSilences(ctx, "") 34 | if err != nil { 35 | _ = level.Error(l).Log("msg", "Get silence failed", "stage", "Silence", "seq", ctx.Value("seq"), "error", err.Error()) 36 | return ctx, nil, err 37 | } 38 | 39 | if len(ss) == 0 { 40 | return ctx, input, nil 41 | } 42 | 43 | var output []*template.Alert 44 | for _, alert := range input { 45 | mute := false 46 | for _, silence := range ss { 47 | ok, err := silence.Spec.Matcher.Matches(alert.Labels) 48 | if err != nil { 49 | return nil, nil, err 50 | } 51 | if ok { 52 | mute = true 53 | break 54 | } 55 | } 56 | 57 | if !mute { 58 | output = append(output, alert) 59 | } 60 | } 61 | 62 | return ctx, output, nil 63 | } 64 | -------------------------------------------------------------------------------- /pkg/stage/stage.go: -------------------------------------------------------------------------------- 1 | package stage 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-kit/kit/log" 7 | "github.com/modern-go/reflect2" 8 | ) 9 | 10 | // A Stage processes alerts under the constraints of the given context. 11 | type Stage interface { 12 | Exec(ctx context.Context, l log.Logger, data interface{}) (context.Context, interface{}, error) 13 | } 14 | 15 | // A MultiStage executes a series of stages sequentially. 16 | type MultiStage []Stage 17 | 18 | // Exec implements the Stage interface. 19 | func (ms MultiStage) Exec(ctx context.Context, l log.Logger, data interface{}) (context.Context, interface{}, error) { 20 | var err error 21 | for _, s := range ms { 22 | if reflect2.IsNil(data) { 23 | return ctx, nil, nil 24 | } 25 | 26 | ctx, data, err = s.Exec(ctx, l, data) 27 | if err != nil { 28 | return ctx, data, err 29 | } 30 | } 31 | return ctx, data, nil 32 | } 33 | -------------------------------------------------------------------------------- /pkg/store/provider/interface.go: -------------------------------------------------------------------------------- 1 | package provider 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/kubesphere/notification-manager/pkg/template" 7 | ) 8 | 9 | type Provider interface { 10 | Push(alert *template.Alert) error 11 | Pull(batchSize int, batchWait time.Duration) ([]*template.Alert, error) 12 | Close() error 13 | } 14 | -------------------------------------------------------------------------------- /pkg/store/provider/memory/memory.go: -------------------------------------------------------------------------------- 1 | package memory 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/kubesphere/notification-manager/pkg/store/provider" 8 | "github.com/kubesphere/notification-manager/pkg/template" 9 | "github.com/kubesphere/notification-manager/pkg/utils" 10 | "gopkg.in/alecthomas/kingpin.v2" 11 | ) 12 | 13 | var ( 14 | queueLen *int 15 | pushTimeout *time.Duration 16 | ) 17 | 18 | type memProvider struct { 19 | ch chan *template.Alert 20 | } 21 | 22 | func init() { 23 | queueLen = kingpin.Flag( 24 | "store.memory.queue", 25 | "Memory cache queue capacity", 26 | ).Default("10000").Int() 27 | pushTimeout = kingpin.Flag( 28 | "store.memory.pushTimeout", 29 | "Push timeout", 30 | ).Default("3s").Duration() 31 | } 32 | 33 | func NewProvider() provider.Provider { 34 | 35 | return &memProvider{ 36 | ch: make(chan *template.Alert, *queueLen), 37 | } 38 | } 39 | 40 | func (p *memProvider) Push(alert *template.Alert) error { 41 | ctx, cancel := context.WithTimeout(context.Background(), *pushTimeout) 42 | defer cancel() 43 | 44 | select { 45 | case p.ch <- alert: 46 | return nil 47 | case <-ctx.Done(): 48 | return utils.Error("Time out") 49 | } 50 | } 51 | 52 | func (p *memProvider) Pull(batchSize int, batchWait time.Duration) ([]*template.Alert, error) { 53 | 54 | ctx, cancel := context.WithTimeout(context.Background(), batchWait) 55 | defer cancel() 56 | 57 | var as []*template.Alert 58 | for { 59 | select { 60 | case <-ctx.Done(): 61 | return as, nil 62 | case alert := <-p.ch: 63 | if alert == nil { 64 | return as, utils.Error("Store closed") 65 | } 66 | as = append(as, alert) 67 | if len(as) >= batchSize { 68 | return as, nil 69 | } 70 | } 71 | } 72 | } 73 | 74 | func (p *memProvider) Close() error { 75 | close(p.ch) 76 | return nil 77 | } 78 | -------------------------------------------------------------------------------- /pkg/store/store.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/kubesphere/notification-manager/pkg/store/provider" 5 | "github.com/kubesphere/notification-manager/pkg/store/provider/memory" 6 | ) 7 | 8 | const ( 9 | providerMemory = "memory" 10 | ) 11 | 12 | type AlertStore struct { 13 | provider.Provider 14 | } 15 | 16 | func NewAlertStore(provider string) *AlertStore { 17 | 18 | as := &AlertStore{} 19 | 20 | if provider == providerMemory { 21 | as.Provider = memory.NewProvider() 22 | } 23 | 24 | return as 25 | } 26 | -------------------------------------------------------------------------------- /pkg/template/language.go: -------------------------------------------------------------------------------- 1 | package template 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/kubesphere/notification-manager/pkg/constants" 7 | "sigs.k8s.io/yaml" 8 | ) 9 | 10 | type languagePack struct { 11 | Name string `json:"name,omitempty"` 12 | Dictionary map[string]string `json:"dictionary,omitempty"` 13 | } 14 | 15 | func ParserDictionary(pack []string) (map[string]map[string]string, error) { 16 | 17 | dictionary := make(map[string]map[string]string) 18 | for _, p := range pack { 19 | var lps []languagePack 20 | if err := yaml.Unmarshal([]byte(p), &lps); err != nil { 21 | return nil, err 22 | } 23 | 24 | for _, lp := range lps { 25 | m := dictionary[lp.Name] 26 | if m == nil { 27 | m = make(map[string]string) 28 | } 29 | for k, v := range lp.Dictionary { 30 | m[strings.ToLower(k)] = v 31 | } 32 | dictionary[lp.Name] = m 33 | } 34 | } 35 | 36 | dictionary[DefaultLanguage] = map[string]string{ 37 | constants.AlertFiring: strings.ToUpper(constants.AlertFiring), 38 | constants.AlertResolved: strings.ToUpper(constants.AlertResolved), 39 | } 40 | 41 | return dictionary, nil 42 | } 43 | -------------------------------------------------------------------------------- /pkg/utils/error.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "fmt" 4 | 5 | func Error(msg string) error { 6 | return Errorf("%s", msg) 7 | } 8 | 9 | func Errorf(format string, a ...interface{}) error { 10 | return fmt.Errorf(format, a...) 11 | } 12 | -------------------------------------------------------------------------------- /pkg/utils/hash.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/mitchellh/hashstructure" 7 | ) 8 | 9 | func Hash(val interface{}) string { 10 | 11 | hash, _ := hashstructure.Hash(val, nil) 12 | return fmt.Sprintf("%d", hash) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/utils/http.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "net/http" 9 | "net/url" 10 | ) 11 | 12 | func UrlWithPath(u, path string) (string, error) { 13 | 14 | postMessageURL, err := url.Parse(u) 15 | if err != nil { 16 | return "", err 17 | } 18 | 19 | postMessageURL.Path += path 20 | return postMessageURL.String(), nil 21 | } 22 | 23 | func UrlWithParameters(u string, parameters map[string]string) (string, error) { 24 | 25 | postMessageURL, err := url.Parse(u) 26 | if err != nil { 27 | return "", err 28 | } 29 | postMessageURL.Query() 30 | values := postMessageURL.Query() 31 | for k, v := range parameters { 32 | values.Set(k, v) 33 | } 34 | 35 | postMessageURL.RawQuery = values.Encode() 36 | return postMessageURL.String(), nil 37 | } 38 | 39 | func DoHttpRequest(ctx context.Context, client *http.Client, request *http.Request) ([]byte, error) { 40 | 41 | if client == nil { 42 | client = &http.Client{} 43 | } 44 | 45 | resp, err := client.Do(request.WithContext(ctx)) 46 | if err != nil { 47 | return nil, err 48 | } 49 | 50 | defer func() { 51 | _, _ = io.Copy(ioutil.Discard, resp.Body) 52 | _ = resp.Body.Close() 53 | }() 54 | 55 | body, err := ioutil.ReadAll(resp.Body) 56 | if err != nil { 57 | return nil, err 58 | } 59 | 60 | if resp.StatusCode != http.StatusOK { 61 | msg := "" 62 | if len(body) > 0 { 63 | msg = string(body) 64 | } 65 | return body, fmt.Errorf("%d, %s", resp.StatusCode, msg) 66 | } 67 | 68 | return body, nil 69 | } 70 | -------------------------------------------------------------------------------- /pkg/utils/json.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "io" 5 | 6 | json "github.com/json-iterator/go" 7 | ) 8 | 9 | func MapToStruct(mv map[string]interface{}, v interface{}) error { 10 | bs, err := json.Marshal(mv) 11 | if err != nil { 12 | return err 13 | } 14 | 15 | err = json.Unmarshal(bs, &v) 16 | if err != nil { 17 | return err 18 | } 19 | 20 | return nil 21 | } 22 | 23 | func JsonDecode(reader io.Reader, v interface{}) error { 24 | if err := json.NewDecoder(reader).Decode(v); err != nil { 25 | return err 26 | } 27 | 28 | return nil 29 | } 30 | 31 | func JsonEncode(writer io.Writer, v interface{}) error { 32 | if err := json.NewEncoder(writer).Encode(v); err != nil { 33 | return err 34 | } 35 | 36 | return nil 37 | } 38 | 39 | func JsonMarshal(v interface{}) ([]byte, error) { 40 | return json.Marshal(v) 41 | } 42 | 43 | func JsonMarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { 44 | return json.MarshalIndent(v, prefix, indent) 45 | } 46 | 47 | func JsonUnmarshal(data []byte, v interface{}) error { 48 | return json.Unmarshal(data, v) 49 | } 50 | -------------------------------------------------------------------------------- /pkg/utils/string.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | ) 7 | 8 | func StringIsNil(s string) bool { 9 | return s == "" 10 | } 11 | 12 | func ArrayToString(array []string, sep string) string { 13 | 14 | if len(array) == 0 { 15 | return "" 16 | } 17 | 18 | s := "" 19 | for _, elem := range array { 20 | s = s + elem + sep 21 | } 22 | 23 | return strings.TrimSuffix(s, sep) 24 | } 25 | 26 | func StringInList(s string, list []string) bool { 27 | 28 | for _, v := range list { 29 | if s == v { 30 | return true 31 | } 32 | } 33 | 34 | return false 35 | } 36 | 37 | func RegularMatch(expr, s string) bool { 38 | 39 | if StringIsNil(expr) { 40 | return false 41 | } 42 | 43 | regex, _ := regexp.Compile(expr) 44 | return regex.Match([]byte(s)) 45 | } 46 | -------------------------------------------------------------------------------- /pkg/webhook/webhook.go: -------------------------------------------------------------------------------- 1 | package webhook 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/go-chi/chi" 9 | "github.com/go-chi/chi/middleware" 10 | "github.com/go-kit/kit/log" 11 | "github.com/go-kit/kit/log/level" 12 | "github.com/kubesphere/notification-manager/pkg/controller" 13 | "github.com/kubesphere/notification-manager/pkg/store" 14 | v1 "github.com/kubesphere/notification-manager/pkg/webhook/v1" 15 | ) 16 | 17 | type Options struct { 18 | ListenAddress string 19 | WebhookTimeout time.Duration 20 | WorkerTimeout time.Duration 21 | } 22 | 23 | type Webhook struct { 24 | router chi.Router 25 | *Options 26 | logger log.Logger 27 | handler *v1.HttpHandler 28 | } 29 | 30 | func New(logger log.Logger, notifierCtl *controller.Controller, alerts *store.AlertStore, o *Options) *Webhook { 31 | 32 | h := &Webhook{ 33 | Options: o, 34 | logger: logger, 35 | } 36 | 37 | h.handler = v1.New(logger, h.WorkerTimeout, notifierCtl, alerts) 38 | h.router = chi.NewRouter() 39 | 40 | h.router.Use(middleware.RequestID) 41 | // h.router.Use(middleware.Logger) 42 | h.router.Use(middleware.Recoverer) 43 | h.router.Use(middleware.Timeout(2 * h.WebhookTimeout)) 44 | h.router.Get("/receivers", h.handler.ListReceivers) 45 | h.router.Get("/configs", h.handler.ListConfigs) 46 | h.router.Get("/receiverWithConfig", h.handler.ListReceiverWithConfig) 47 | h.router.Post("/api/v2/alerts", h.handler.Alert) 48 | h.router.Post("/api/v2/verify", h.handler.Verify) 49 | h.router.Post("/api/v2/notifications", h.handler.Notification) 50 | h.router.Get("/metrics", h.handler.ServeMetrics) 51 | h.router.Get("/-/reload", h.handler.ServeReload) 52 | h.router.Get("/-/ready", h.handler.ServeHealthCheck) 53 | h.router.Get("/-/live", h.handler.ServeReadinessCheck) 54 | h.router.Get("/status", h.handler.ServeStatus) 55 | 56 | return h 57 | } 58 | 59 | func (h *Webhook) Run(ctx context.Context) error { 60 | var err error 61 | httpSrv := &http.Server{ 62 | Addr: h.ListenAddress, 63 | Handler: h.router, 64 | } 65 | 66 | srvClosed := make(chan struct{}) 67 | go func() { 68 | select { 69 | case <-ctx.Done(): 70 | // We received an interrupt signal, shut down. 71 | if err := httpSrv.Shutdown(ctx); err != nil { 72 | // Error from closing listeners, or context timeout: 73 | _ = level.Error(h.logger).Log("msg", "Shutdown HTTP server", "err", err) 74 | } 75 | _ = level.Info(h.logger).Log("msg", "Shutdown HTTP server") 76 | close(srvClosed) 77 | } 78 | }() 79 | 80 | if err = httpSrv.ListenAndServe(); err != http.ErrServerClosed { 81 | // Error starting or closing listener: 82 | _ = level.Error(h.logger).Log("msg", "HTTP server ListenAndServe", "err", err) 83 | } 84 | 85 | _ = level.Error(h.logger).Log("msg", "HTTP server exit", "err", err) 86 | <-srvClosed 87 | 88 | return err 89 | } 90 | -------------------------------------------------------------------------------- /sidecar/kubernetes/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 2 | # Use of this source code is governed by a Apache license 3 | # that can be found in the LICENSE file. 4 | 5 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 6 | # Use of this source code is governed by a Apache license 7 | # that can be found in the LICENSE file. 8 | 9 | FROM golang:1.13 as tenant-sidecar 10 | 11 | COPY cmd/main.go / 12 | WORKDIR / 13 | ENV GOPROXY=https://goproxy.io 14 | RUN CGO_ENABLED=0 GO111MODULE=on go build -i -ldflags '-w -s' -o tenant-sidecar main.go 15 | 16 | FROM kubespheredev/distroless-static:nonroot 17 | WORKDIR / 18 | COPY --from=tenant-sidecar /tenant-sidecar . 19 | USER nonroot:nonroot 20 | 21 | ENTRYPOINT ["/tenant-sidecar"] 22 | 23 | 24 | -------------------------------------------------------------------------------- /sidecar/kubernetes/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 2 | # Use of this source code is governed by a Apache license 3 | # that can be found in the LICENSE file. 4 | 5 | IMG ?= kubesphere/notification-tenant-sidecar-kubernetes:v1.0.0 6 | AMD64 ?= -amd64 7 | 8 | all: docker-build 9 | 10 | # Build tenant sidecar binary 11 | tenant-sidecar: 12 | go build -o tenant-sidecar cmd/main.go 13 | 14 | # Build the docker image 15 | docker-build: 16 | docker buildx build --platform linux/amd64,linux/arm64 --push -f Dockerfile -t ${IMG} . 17 | 18 | # Build the docker image for arm64 19 | docker-build-amd64: 20 | docker build -f Dockerfile -t ${IMG}${AMD64} . 21 | 22 | # Push the docker image 23 | push-amd64: 24 | docker push ${IMG}${AMD64} 25 | -------------------------------------------------------------------------------- /sidecar/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Notification tenant sidecar 2 | 3 | It is a tenant sidecar for kubernetes, just a sample. -------------------------------------------------------------------------------- /sidecar/kubernetes/cmd/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2020 The KubeSphere Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "flag" 21 | "fmt" 22 | "github.com/golang/glog" 23 | "log" 24 | "net/http" 25 | "sync" 26 | 27 | "github.com/emicklei/go-restful" 28 | "github.com/spf13/cobra" 29 | "github.com/spf13/pflag" 30 | ) 31 | 32 | var waitHandlerGroup sync.WaitGroup 33 | 34 | func main() { 35 | 36 | cmd := NewServerCommand() 37 | 38 | if err := cmd.Execute(); err != nil { 39 | log.Fatalln(err) 40 | } 41 | } 42 | 43 | func NewServerCommand() *cobra.Command { 44 | cmd := &cobra.Command{ 45 | Use: "notification-adapter", 46 | Long: `The webhook to receive alert from notification manager, and send to socket`, 47 | RunE: func(cmd *cobra.Command, args []string) error { 48 | return Run() 49 | }, 50 | } 51 | cmd.Flags().AddGoFlagSet(flag.CommandLine) 52 | 53 | return cmd 54 | } 55 | 56 | func Run() error { 57 | 58 | pflag.VisitAll(func(flag *pflag.Flag) { 59 | glog.Errorf("FLAG: --%s=%q", flag.Name, flag.Value) 60 | }) 61 | 62 | return httpserver() 63 | } 64 | 65 | func httpserver() error { 66 | container := restful.NewContainer() 67 | ws := new(restful.WebService) 68 | ws.Path("/api/v2"). 69 | Consumes(restful.MIME_JSON). 70 | Produces(restful.MIME_JSON) 71 | ws.Route(ws.GET("/tenant").To(handler)) 72 | ws.Route(ws.GET("/readiness").To(readiness)) 73 | ws.Route(ws.GET("/liveness").To(readiness)) 74 | ws.Route(ws.GET("/preStop").To(preStop)) 75 | 76 | container.Add(ws) 77 | 78 | server := &http.Server{ 79 | Addr: ":19094", 80 | Handler: container, 81 | } 82 | 83 | if err := server.ListenAndServe(); err != nil { 84 | glog.Fatal(err) 85 | } 86 | 87 | return nil 88 | } 89 | 90 | func handler(req *restful.Request, resp *restful.Response) { 91 | 92 | waitHandlerGroup.Add(1) 93 | defer waitHandlerGroup.Done() 94 | 95 | ns := req.QueryParameter("namespace") 96 | if len(ns) == 0 { 97 | responseWithHeaderAndEntity(resp, http.StatusBadRequest, "namespace must not be nil") 98 | return 99 | } 100 | 101 | fmt.Printf("get tenants with namespace `%s`", ns) 102 | 103 | tenants := []string{ns} 104 | responseWithJson(resp, tenants) 105 | } 106 | 107 | // readiness 108 | func readiness(_ *restful.Request, resp *restful.Response) { 109 | 110 | responseWithHeaderAndEntity(resp, http.StatusOK, "") 111 | } 112 | 113 | // preStop 114 | func preStop(_ *restful.Request, resp *restful.Response) { 115 | 116 | glog.Errorf("waitting for message handler close") 117 | waitHandlerGroup.Wait() 118 | glog.Errorf("message handler closed") 119 | responseWithHeaderAndEntity(resp, http.StatusOK, "") 120 | glog.Flush() 121 | } 122 | 123 | func responseWithJson(resp *restful.Response, value interface{}) { 124 | e := resp.WriteAsJson(value) 125 | if e != nil { 126 | glog.Errorf("response error %s", e) 127 | } 128 | } 129 | 130 | func responseWithHeaderAndEntity(resp *restful.Response, status int, value interface{}) { 131 | e := resp.WriteHeaderAndEntity(status, value) 132 | if e != nil { 133 | glog.Errorf("response error %s", e) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /sidecar/kubernetes/test/get-tenants.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | curl -XGET http://127.0.0.1:19094/api/v2/tenant?namespace=test 3 | -------------------------------------------------------------------------------- /sidecar/kubesphere/3.1.0/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 2 | # Use of this source code is governed by a Apache license 3 | # that can be found in the LICENSE file. 4 | 5 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 6 | # Use of this source code is governed by a Apache license 7 | # that can be found in the LICENSE file. 8 | 9 | FROM golang:1.13 as tenant-sidecar 10 | 11 | COPY / / 12 | COPY pkg/ pkg/ 13 | WORKDIR / 14 | ENV GOPROXY=https://goproxy.io 15 | RUN CGO_ENABLED=0 GO111MODULE=on go build -a -i -ldflags '-w -s' -o tenant-sidecar cmd/main.go 16 | 17 | FROM kubesphere/distroless-static:nonroot 18 | WORKDIR / 19 | COPY --from=tenant-sidecar /tenant-sidecar . 20 | USER nonroot:nonroot 21 | 22 | ENTRYPOINT ["/tenant-sidecar"] 23 | 24 | 25 | -------------------------------------------------------------------------------- /sidecar/kubesphere/3.1.0/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 2 | # Use of this source code is governed by a Apache license 3 | # that can be found in the LICENSE file. 4 | 5 | IMG ?= kubesphere/notification-tenant-sidecar:v3.1.0 6 | AMD64 ?= -amd64 7 | 8 | all: docker-build 9 | 10 | # Build tenant sidecar binary 11 | tenant-sidecar: 12 | go build -o tenant-sidecar cmd/main.go 13 | 14 | # Build the docker image 15 | docker-build: 16 | docker buildx build --platform linux/amd64,linux/arm64 --push -f Dockerfile -t ${IMG} . 17 | 18 | # Build the docker image for arm64 19 | docker-build-amd64: 20 | docker build -f Dockerfile -t ${IMG}${AMD64} . 21 | 22 | # Push the docker image 23 | push-amd64: 24 | docker push ${IMG}${AMD64} 25 | -------------------------------------------------------------------------------- /sidecar/kubesphere/3.1.0/README.md: -------------------------------------------------------------------------------- 1 | # Notification tenant sidecar 2 | 3 | It is a tenant sidecar for kubesphere v3.1.0. -------------------------------------------------------------------------------- /sidecar/kubesphere/3.1.0/go.mod: -------------------------------------------------------------------------------- 1 | module kubesphere 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/emicklei/go-restful v2.16.0+incompatible 7 | github.com/spf13/cobra v1.1.1 8 | github.com/spf13/pflag v1.0.5 9 | k8s.io/api v0.19.3 10 | k8s.io/apimachinery v0.19.3 11 | k8s.io/apiserver v0.19.3 12 | k8s.io/client-go v12.0.0+incompatible 13 | k8s.io/klog v1.0.0 14 | kubesphere.io/kubesphere v0.0.0-20210430091105-1f57ec2e38f2 15 | sigs.k8s.io/controller-runtime v0.6.4 16 | ) 17 | 18 | replace ( 19 | github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.4.0 20 | k8s.io/api => k8s.io/api v0.18.6 21 | k8s.io/apiserver => k8s.io/apiserver v0.18.6 22 | k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.6 23 | k8s.io/client-go => k8s.io/client-go v0.18.6 24 | k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 25 | k8s.io/kubectl => k8s.io/kubectl v0.18.6 26 | kubesphere.io/client-go v0.0.0 => kubesphere.io/client-go v0.3.1 27 | ) 28 | -------------------------------------------------------------------------------- /sidecar/kubesphere/3.1.0/pkg/tenant/tenant.go: -------------------------------------------------------------------------------- 1 | package tenant 2 | 3 | import ( 4 | "kubesphere/pkg/ks" 5 | 6 | "k8s.io/klog" 7 | ) 8 | 9 | var ( 10 | tenants map[string]map[string]string 11 | ) 12 | 13 | func FromNamespace(ns string) []string { 14 | 15 | m, ok := tenants[ns] 16 | if !ok { 17 | return nil 18 | } 19 | 20 | array := make([]string, 0) 21 | for k := range m { 22 | array = append(array, k) 23 | } 24 | return array 25 | } 26 | 27 | func Reload(r *ks.Runtime) error { 28 | 29 | m := make(map[string]map[string]string) 30 | 31 | users, err := r.ListUser() 32 | if err != nil { 33 | klog.Errorf("list users error, %s", err.Error()) 34 | return err 35 | } 36 | 37 | for _, u := range users { 38 | 39 | workspaces, err := r.ListWorkspaces(u) 40 | if err != nil { 41 | klog.Errorf("list workspaces error, %s", err.Error()) 42 | return err 43 | } 44 | workspaces = append(workspaces, "") 45 | for _, workspace := range workspaces { 46 | namespaces, err := r.ListNamespaces(u, workspace) 47 | if err != nil { 48 | klog.Errorf("list namespaces error, %s", err.Error()) 49 | return err 50 | } 51 | 52 | for _, namespace := range namespaces { 53 | array, ok := m[namespace] 54 | if !ok { 55 | array = make(map[string]string) 56 | } 57 | 58 | array[u.Name] = "" 59 | m[namespace] = array 60 | } 61 | } 62 | } 63 | 64 | tenants = m 65 | 66 | klog.Info("reload tenant") 67 | return nil 68 | } 69 | -------------------------------------------------------------------------------- /sidecar/kubesphere/3.1.0/test/get-tenants.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | curl -XGET http://127.0.0.1:19094/api/v2/tenant?namespace=kubesphere-monitoring-system 3 | -------------------------------------------------------------------------------- /sidecar/kubesphere/3.2.0/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use of this source code is governed by a Apache license 2 | # that can be found in the LICENSE file. 3 | 4 | FROM golang:1.16 as tenant-sidecar 5 | 6 | COPY / / 7 | COPY pkg/ pkg/ 8 | WORKDIR / 9 | ENV GOPROXY=https://goproxy.cn 10 | RUN CGO_ENABLED=0 GO111MODULE=on go build -a -i -ldflags '-w -s' -o tenant-sidecar cmd/main.go 11 | 12 | FROM kubesphere/distroless-static:nonroot 13 | WORKDIR / 14 | COPY --from=tenant-sidecar /tenant-sidecar . 15 | USER nonroot:nonroot 16 | 17 | ENTRYPOINT ["/tenant-sidecar"] 18 | 19 | -------------------------------------------------------------------------------- /sidecar/kubesphere/3.2.0/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 2 | # Use of this source code is governed by a Apache license 3 | # that can be found in the LICENSE file. 4 | 5 | IMG ?= kubesphere/notification-tenant-sidecar:v3.2.0 6 | AMD64 ?= -amd64 7 | 8 | all: docker-build 9 | 10 | # Build tenant sidecar binary 11 | tenant-sidecar: 12 | go build -o tenant-sidecar cmd/main.go 13 | 14 | # Build the docker image 15 | docker-build: 16 | docker buildx build --platform linux/amd64,linux/arm64 --push -f Dockerfile -t ${IMG} . 17 | 18 | # Build the docker image for arm64 19 | docker-build-amd64: 20 | docker build -f Dockerfile -t ${IMG}${AMD64} . 21 | 22 | # Push the docker image 23 | push-amd64: 24 | docker push ${IMG}${AMD64} 25 | -------------------------------------------------------------------------------- /sidecar/kubesphere/3.2.0/README.md: -------------------------------------------------------------------------------- 1 | # Notification tenant sidecar 2 | 3 | It is a tenant sidecar for kubesphere v3.2.0. -------------------------------------------------------------------------------- /sidecar/kubesphere/3.2.0/pkg/tenant/tenant.go: -------------------------------------------------------------------------------- 1 | package tenant 2 | 3 | import ( 4 | "kubesphere/pkg/ks" 5 | 6 | "k8s.io/klog" 7 | ) 8 | 9 | var ( 10 | tenants map[string]map[string]string 11 | ) 12 | 13 | func FromNamespace(ns string) []string { 14 | 15 | m, ok := tenants[ns] 16 | if !ok { 17 | return nil 18 | } 19 | 20 | array := make([]string, 0) 21 | for k := range m { 22 | array = append(array, k) 23 | } 24 | return array 25 | } 26 | 27 | func Reload(r *ks.Runtime) error { 28 | 29 | m := make(map[string]map[string]string) 30 | 31 | users, err := r.ListUser() 32 | if err != nil { 33 | klog.Errorf("list users error, %s", err.Error()) 34 | return err 35 | } 36 | 37 | for _, u := range users { 38 | 39 | workspaces, err := r.ListWorkspaces(u) 40 | if err != nil { 41 | klog.Errorf("list workspaces error, %s", err.Error()) 42 | return err 43 | } 44 | workspaces = append(workspaces, "") 45 | for _, workspace := range workspaces { 46 | namespaces, err := r.ListNamespaces(u, workspace) 47 | if err != nil { 48 | klog.Errorf("list namespaces error, %s", err.Error()) 49 | return err 50 | } 51 | 52 | for _, namespace := range namespaces { 53 | array, ok := m[namespace] 54 | if !ok { 55 | array = make(map[string]string) 56 | } 57 | 58 | array[u.Name] = "" 59 | m[namespace] = array 60 | } 61 | } 62 | } 63 | 64 | tenants = m 65 | 66 | klog.Info("reload tenant") 67 | return nil 68 | } 69 | -------------------------------------------------------------------------------- /sidecar/kubesphere/3.2.0/test/get-tenants.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | curl -XGET http://127.0.0.1:19094/api/v2/tenant?namespace=kubesphere-monitoring-system 3 | -------------------------------------------------------------------------------- /sidecar/kubesphere/4.0.0/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use of this source code is governed by a Apache license 2 | # that can be found in the LICENSE file. 3 | 4 | FROM golang:1.20 as tenant-sidecar 5 | 6 | COPY / / 7 | WORKDIR / 8 | ENV GOPROXY=https://goproxy.io 9 | RUN CGO_ENABLED=0 GO111MODULE=on go build -a -o tenant-sidecar main.go backend.go 10 | 11 | FROM kubesphere/distroless-static:nonroot 12 | WORKDIR / 13 | COPY --from=tenant-sidecar /tenant-sidecar . 14 | USER nonroot:nonroot 15 | 16 | ENTRYPOINT ["/tenant-sidecar"] 17 | 18 | -------------------------------------------------------------------------------- /sidecar/kubesphere/4.0.0/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2018 The KubeSphere Authors. All rights reserved. 2 | # Use of this source code is governed by a Apache license 3 | # that can be found in the LICENSE file. 4 | 5 | IMG ?= kubesphere/notification-tenant-sidecar:v4.0.2 6 | AMD64 ?= -amd64 7 | 8 | all: docker-build 9 | 10 | # Build tenant sidecar binary 11 | tenant-sidecar: 12 | go build -o tenant-sidecar main.go backend.go 13 | 14 | # Build the docker image 15 | docker-build: 16 | docker buildx build --platform linux/amd64,linux/arm64 --push -f Dockerfile -t ${IMG} . 17 | 18 | # Build the docker image for arm64 19 | docker-build-amd64: 20 | docker build -f Dockerfile -t ${IMG}${AMD64} . 21 | 22 | # Push the docker image 23 | push-amd64: 24 | docker push ${IMG}${AMD64} 25 | -------------------------------------------------------------------------------- /sidecar/kubesphere/4.0.0/README.md: -------------------------------------------------------------------------------- 1 | # Notification tenant sidecar 2 | 3 | It is a tenant sidecar for kubesphere v4.0.0. -------------------------------------------------------------------------------- /sidecar/kubesphere/4.0.0/go.mod: -------------------------------------------------------------------------------- 1 | // This is a generated file. Do not edit directly. 2 | // Run hack/pin-dependency.sh to change pinned dependency versions. 3 | // Run hack/update-vendor.sh to update go.mod files and the vendor directory. 4 | 5 | module sidecar 6 | 7 | go 1.20 8 | 9 | require ( 10 | github.com/emicklei/go-restful/v3 v3.11.0 11 | github.com/spf13/cobra v1.7.0 12 | github.com/spf13/pflag v1.0.5 13 | k8s.io/api v0.28.2 14 | k8s.io/klog v1.0.0 15 | kubesphere.io/api v0.0.0 16 | kubesphere.io/client-go v0.3.2-0.20231222062608-a76cf3626c7e 17 | ) 18 | 19 | require ( 20 | github.com/go-logr/logr v1.2.4 // indirect 21 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 22 | github.com/go-openapi/jsonreference v0.20.2 // indirect 23 | github.com/go-openapi/swag v0.22.3 // indirect 24 | github.com/gogo/protobuf v1.3.2 // indirect 25 | github.com/golang/protobuf v1.5.3 // indirect 26 | github.com/google/gnostic-models v0.6.8 // indirect 27 | github.com/google/go-cmp v0.5.9 // indirect 28 | github.com/google/gofuzz v1.2.0 // indirect 29 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 30 | github.com/josharian/intern v1.0.0 // indirect 31 | github.com/json-iterator/go v1.1.12 // indirect 32 | github.com/mailru/easyjson v0.7.7 // indirect 33 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 34 | github.com/modern-go/reflect2 v1.0.2 // indirect 35 | golang.org/x/net v0.33.0 // indirect 36 | golang.org/x/oauth2 v0.12.0 // indirect 37 | golang.org/x/text v0.21.0 // indirect 38 | golang.org/x/time v0.3.0 // indirect 39 | google.golang.org/appengine v1.6.7 // indirect 40 | google.golang.org/protobuf v1.33.0 // indirect 41 | gopkg.in/inf.v0 v0.9.1 // indirect 42 | gopkg.in/yaml.v2 v2.4.0 // indirect 43 | gopkg.in/yaml.v3 v3.0.1 // indirect 44 | k8s.io/apimachinery v0.28.2 // indirect 45 | k8s.io/client-go v0.28.2 // indirect 46 | k8s.io/klog/v2 v2.100.1 // indirect 47 | k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect 48 | k8s.io/utils v0.0.0-20230505201702-9f6742963106 // indirect 49 | sigs.k8s.io/controller-runtime v0.16.0 // indirect 50 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 51 | sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect 52 | sigs.k8s.io/yaml v1.3.0 // indirect 53 | ) 54 | 55 | replace ( 56 | github.com/emicklei/go-restful v2.9.6+incompatible => github.com/emicklei/go-restful/v3 v3.8.0 57 | kubesphere.io/api => kubesphere.io/api v0.0.0-20240307025220-d785e90d0c05 58 | ) 59 | -------------------------------------------------------------------------------- /sidecar/kubesphere/4.0.0/test/get-tenants.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | curl -XGET http://127.0.0.1:19094/api/v2/tenant?cluster=host&namespace=kubesphere-monitoring-system 3 | -------------------------------------------------------------------------------- /test/send_alerts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | curl -XPOST -d @testdata/alert.json http://127.0.0.1:19093/api/v2/alerts 3 | # curl -XPOST -d @./alert.json http://notification-manager-svc.kubesphere-monitoring-system.svc:19093/api/v2/alerts 4 | -------------------------------------------------------------------------------- /test/testdata/alert1.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Critical", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubePodCrashLooping", 9 | "container": "busybox-3jb7u6", 10 | "instance": "10.233.71.230:8080", 11 | "job": "kube-state-metrics", 12 | "namespace": "pp1", 13 | "pod": "dd1-0", 14 | "prometheus": "kubesphere-monitoring-system/k8s", 15 | "severity": "critical" 16 | }, 17 | "annotations": { 18 | "message": "Pod pp1/dd1-0 (busybox-3jb7u6) is restarting 1.07 times / 5 minutes.", 19 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodcrashlooping" 20 | }, 21 | "startsAt": "2020-02-26T07:05:04.989876849Z", 22 | "endsAt": "0001-01-01T00:00:00Z", 23 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=rate%28kube_pod_container_status_restarts_total%7Bjob%3D%22kube-state-metrics%22%7D%5B15m%5D%29+%2A+60+%2A+5+%3E+0\u0026g0.tab=1", 24 | "fingerprint": "a4c6c4f7a49ca0ae" 25 | } 26 | ], 27 | "groupLabels": { 28 | "alertname": "KubePodCrashLooping", 29 | "namespace": "pp1" 30 | }, 31 | "commonLabels": { 32 | "alertname": "KubePodCrashLooping", 33 | "container": "busybox-3jb7u6", 34 | "instance": "10.233.71.230:8080", 35 | "job": "kube-state-metrics", 36 | "namespace": "pp1", 37 | "pod": "dd1-0", 38 | "prometheus": "kubesphere-monitoring-system/k8s", 39 | "severity": "critical" 40 | }, 41 | "commonAnnotations": { 42 | "message": "Pod pp1/dd1-0 (busybox-3jb7u6) is restarting 1.07 times / 5 minutes.", 43 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodcrashlooping" 44 | }, 45 | "externalURL": "http://alertmanager-main-2:9093" 46 | } -------------------------------------------------------------------------------- /test/testdata/alert10.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Critical", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubePodNotReady", 9 | "namespace": "t1", 10 | "pod": "s2i-1-viafnz-5474c8b5b7-px7kz", 11 | "prometheus": "kubesphere-monitoring-system/k8s", 12 | "severity": "critical" 13 | }, 14 | "annotations": { 15 | "message": "Pod t1/s2i-1-viafnz-5474c8b5b7-px7kz has been in a non-ready state for longer than 15 minutes.", 16 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodnotready" 17 | }, 18 | "startsAt": "2020-02-26T07:04:04.989876849Z", 19 | "endsAt": "0001-01-01T00:00:00Z", 20 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=sum+by%28namespace%2C+pod%29+%28max+by%28namespace%2C+pod%29+%28kube_pod_status_phase%7Bjob%3D%22kube-state-metrics%22%2Cphase%3D~%22Pending%7CUnknown%22%7D%29+%2A+on%28namespace%2C+pod%29+group_left%28owner_kind%29+max+by%28namespace%2C+pod%2C+owner_kind%29+%28kube_pod_owner%7Bowner_kind%21%3D%22Job%22%7D%29%29+%3E+0\u0026g0.tab=1", 21 | "fingerprint": "6362d3bf47e6c5a5" 22 | }, 23 | { 24 | "status": "firing", 25 | "labels": { 26 | "alertname": "KubePodNotReady", 27 | "namespace": "t1", 28 | "pod": "s2i-1-viafnz-89f49c678-2mzsm", 29 | "prometheus": "kubesphere-monitoring-system/k8s", 30 | "severity": "critical" 31 | }, 32 | "annotations": { 33 | "message": "Pod t1/s2i-1-viafnz-89f49c678-2mzsm has been in a non-ready state for longer than 15 minutes.", 34 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodnotready" 35 | }, 36 | "startsAt": "2020-02-26T07:04:04.989876849Z", 37 | "endsAt": "0001-01-01T00:00:00Z", 38 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=sum+by%28namespace%2C+pod%29+%28max+by%28namespace%2C+pod%29+%28kube_pod_status_phase%7Bjob%3D%22kube-state-metrics%22%2Cphase%3D~%22Pending%7CUnknown%22%7D%29+%2A+on%28namespace%2C+pod%29+group_left%28owner_kind%29+max+by%28namespace%2C+pod%2C+owner_kind%29+%28kube_pod_owner%7Bowner_kind%21%3D%22Job%22%7D%29%29+%3E+0\u0026g0.tab=1", 39 | "fingerprint": "3e0404fc5e56b99d" 40 | } 41 | ], 42 | "groupLabels": { 43 | "alertname": "KubePodNotReady", 44 | "namespace": "t1" 45 | }, 46 | "commonLabels": { 47 | "alertname": "KubePodNotReady", 48 | "namespace": "t1", 49 | "prometheus": "kubesphere-monitoring-system/k8s", 50 | "severity": "critical" 51 | }, 52 | "commonAnnotations": { 53 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodnotready" 54 | }, 55 | "externalURL": "http://alertmanager-main-2:9093" 56 | } -------------------------------------------------------------------------------- /test/testdata/alert11.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Critical", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubeDeploymentReplicasMismatch", 9 | "deployment": "nginx", 10 | "instance": "10.233.71.230:8080", 11 | "job": "kube-state-metrics", 12 | "namespace": "default", 13 | "pod": "kube-state-metrics-7c6f4866fc-rclv2", 14 | "prometheus": "kubesphere-monitoring-system/k8s", 15 | "severity": "critical" 16 | }, 17 | "annotations": { 18 | "message": "Deployment default/nginx has not matched the expected number of replicas for longer than 15 minutes.", 19 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentreplicasmismatch" 20 | }, 21 | "startsAt": "2020-02-26T07:04:04.989876849Z", 22 | "endsAt": "0001-01-01T00:00:00Z", 23 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=kube_deployment_spec_replicas%7Bjob%3D%22kube-state-metrics%22%7D+%21%3D+kube_deployment_status_replicas_available%7Bjob%3D%22kube-state-metrics%22%7D\u0026g0.tab=1", 24 | "fingerprint": "62b01482fb2e79e3" 25 | } 26 | ], 27 | "groupLabels": { 28 | "alertname": "KubeDeploymentReplicasMismatch", 29 | "namespace": "default" 30 | }, 31 | "commonLabels": { 32 | "alertname": "KubeDeploymentReplicasMismatch", 33 | "deployment": "nginx", 34 | "instance": "10.233.71.230:8080", 35 | "job": "kube-state-metrics", 36 | "namespace": "default", 37 | "pod": "kube-state-metrics-7c6f4866fc-rclv2", 38 | "prometheus": "kubesphere-monitoring-system/k8s", 39 | "severity": "critical" 40 | }, 41 | "commonAnnotations": { 42 | "message": "Deployment default/nginx has not matched the expected number of replicas for longer than 15 minutes.", 43 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentreplicasmismatch" 44 | }, 45 | "externalURL": "http://alertmanager-main-2:9093" 46 | } -------------------------------------------------------------------------------- /test/testdata/alert12.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Default", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubeJobFailed", 9 | "condition": "true", 10 | "instance": "10.233.71.230:8080", 11 | "job": "kube-state-metrics", 12 | "job_name": "dddd", 13 | "namespace": "demo-project", 14 | "pod": "kube-state-metrics-7c6f4866fc-rclv2", 15 | "prometheus": "kubesphere-monitoring-system/k8s", 16 | "severity": "warning" 17 | }, 18 | "annotations": { 19 | "message": "Job demo-project/dddd failed to complete.", 20 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobfailed" 21 | }, 22 | "startsAt": "2020-02-26T07:04:04.989876849Z", 23 | "endsAt": "0001-01-01T00:00:00Z", 24 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=kube_job_failed%7Bjob%3D%22kube-state-metrics%22%7D+%3E+0\u0026g0.tab=1", 25 | "fingerprint": "3c8dec50ef8cf6fd" 26 | } 27 | ], 28 | "groupLabels": { 29 | "alertname": "KubeJobFailed", 30 | "namespace": "demo-project" 31 | }, 32 | "commonLabels": { 33 | "alertname": "KubeJobFailed", 34 | "condition": "true", 35 | "instance": "10.233.71.230:8080", 36 | "job": "kube-state-metrics", 37 | "job_name": "dddd", 38 | "namespace": "demo-project", 39 | "pod": "kube-state-metrics-7c6f4866fc-rclv2", 40 | "prometheus": "kubesphere-monitoring-system/k8s", 41 | "severity": "warning" 42 | }, 43 | "commonAnnotations": { 44 | "message": "Job demo-project/dddd failed to complete.", 45 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobfailed" 46 | }, 47 | "externalURL": "http://alertmanager-main-2:9093" 48 | } -------------------------------------------------------------------------------- /test/testdata/alert13.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Default", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubeContainerWaiting", 9 | "container": "container-ztj830", 10 | "namespace": "t1", 11 | "pod": "s2i-1-viafnz-89f49c678-2mzsm", 12 | "prometheus": "kubesphere-monitoring-system/k8s", 13 | "severity": "warning" 14 | }, 15 | "annotations": { 16 | "message": "Pod t1/s2i-1-viafnz-89f49c678-2mzsm container container-ztj830 has been in waiting state for longer than 1 hour.", 17 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontainerwaiting" 18 | }, 19 | "startsAt": "2020-02-26T07:49:04.989876849Z", 20 | "endsAt": "0001-01-01T00:00:00Z", 21 | "generatorURL": "http://prometheus-k8s-1:9090/graph?g0.expr=sum+by%28namespace%2C+pod%2C+container%29+%28kube_pod_container_status_waiting_reason%7Bjob%3D%22kube-state-metrics%22%7D%29+%3E+0\u0026g0.tab=1", 22 | "fingerprint": "838279128e4c0505" 23 | } 24 | ], 25 | "groupLabels": { 26 | "alertname": "KubeContainerWaiting", 27 | "namespace": "t1" 28 | }, 29 | "commonLabels": { 30 | "alertname": "KubeContainerWaiting", 31 | "container": "container-ztj830", 32 | "namespace": "t1", 33 | "pod": "s2i-1-viafnz-89f49c678-2mzsm", 34 | "prometheus": "kubesphere-monitoring-system/k8s", 35 | "severity": "warning" 36 | }, 37 | "commonAnnotations": { 38 | "message": "Pod t1/s2i-1-viafnz-89f49c678-2mzsm container container-ztj830 has been in waiting state for longer than 1 hour.", 39 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontainerwaiting" 40 | }, 41 | "externalURL": "http://alertmanager-main-1:9093" 42 | } -------------------------------------------------------------------------------- /test/testdata/alert14.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Default", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "AlertmanagerFailedReload", 9 | "endpoint": "web", 10 | "instance": "10.233.71.231:9093", 11 | "job": "alertmanager-main", 12 | "namespace": "kubesphere-monitoring-system", 13 | "pod": "alertmanager-main-0", 14 | "prometheus": "kubesphere-monitoring-system/k8s", 15 | "service": "alertmanager-main", 16 | "severity": "warning" 17 | }, 18 | "annotations": { 19 | "message": "Reloading Alertmanager's configuration has failed for kubesphere-monitoring-system/alertmanager-main-0." 20 | }, 21 | "startsAt": "2020-04-17T01:16:28.952838294Z", 22 | "endsAt": "0001-01-01T00:00:00Z", 23 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=alertmanager_config_last_reload_successful%7Bjob%3D%22alertmanager-main%22%2Cnamespace%3D%22kubesphere-monitoring-system%22%7D+%3D%3D+0\u0026g0.tab=1", 24 | "fingerprint": "5cb96f8b51c2b647" 25 | } 26 | ], 27 | "groupLabels": { 28 | "alertname": "AlertmanagerFailedReload", 29 | "namespace": "kubesphere-monitoring-system" 30 | }, 31 | "commonLabels": { 32 | "alertname": "AlertmanagerFailedReload", 33 | "endpoint": "web", 34 | "instance": "10.233.71.231:9093", 35 | "job": "alertmanager-main", 36 | "namespace": "kubesphere-monitoring-system", 37 | "pod": "alertmanager-main-0", 38 | "prometheus": "kubesphere-monitoring-system/k8s", 39 | "service": "alertmanager-main", 40 | "severity": "warning" 41 | }, 42 | "commonAnnotations": { 43 | "message": "Reloading Alertmanager's configuration has failed for kubesphere-monitoring-system/alertmanager-main-0." 44 | }, 45 | "externalURL": "http://alertmanager-main-1:9093" 46 | } -------------------------------------------------------------------------------- /test/testdata/alert15.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Default", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubeContainerWaiting", 9 | "container": "container-ztj830", 10 | "namespace": "t1", 11 | "pod": "s2i-1-viafnz-5474c8b5b7-px7kz", 12 | "prometheus": "kubesphere-monitoring-system/k8s", 13 | "severity": "warning" 14 | }, 15 | "annotations": { 16 | "message": "Pod t1/s2i-1-viafnz-5474c8b5b7-px7kz container container-ztj830 has been in waiting state for longer than 1 hour.", 17 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontainerwaiting" 18 | }, 19 | "startsAt": "2020-02-26T07:49:04.989876849Z", 20 | "endsAt": "0001-01-01T00:00:00Z", 21 | "generatorURL": "http://prometheus-k8s-1:9090/graph?g0.expr=sum+by%28namespace%2C+pod%2C+container%29+%28kube_pod_container_status_waiting_reason%7Bjob%3D%22kube-state-metrics%22%7D%29+%3E+0\u0026g0.tab=1", 22 | "fingerprint": "eb4aaaee1917a07b" 23 | }, 24 | { 25 | "status": "firing", 26 | "labels": { 27 | "alertname": "KubeContainerWaiting", 28 | "container": "container-ztj830", 29 | "namespace": "t1", 30 | "pod": "s2i-1-viafnz-89f49c678-2mzsm", 31 | "prometheus": "kubesphere-monitoring-system/k8s", 32 | "severity": "warning" 33 | }, 34 | "annotations": { 35 | "message": "Pod t1/s2i-1-viafnz-89f49c678-2mzsm container container-ztj830 has been in waiting state for longer than 1 hour.", 36 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontainerwaiting" 37 | }, 38 | "startsAt": "2020-02-26T07:49:04.989876849Z", 39 | "endsAt": "0001-01-01T00:00:00Z", 40 | "generatorURL": "http://prometheus-k8s-1:9090/graph?g0.expr=sum+by%28namespace%2C+pod%2C+container%29+%28kube_pod_container_status_waiting_reason%7Bjob%3D%22kube-state-metrics%22%7D%29+%3E+0\u0026g0.tab=1", 41 | "fingerprint": "838279128e4c0505" 42 | } 43 | ], 44 | "groupLabels": { 45 | "alertname": "KubeContainerWaiting", 46 | "namespace": "t1" 47 | }, 48 | "commonLabels": { 49 | "alertname": "KubeContainerWaiting", 50 | "container": "container-ztj830", 51 | "namespace": "t1", 52 | "prometheus": "kubesphere-monitoring-system/k8s", 53 | "severity": "warning" 54 | }, 55 | "commonAnnotations": { 56 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontainerwaiting" 57 | }, 58 | "externalURL": "http://alertmanager-main-0:9093" 59 | } -------------------------------------------------------------------------------- /test/testdata/alert2.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Default", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "CPUThrottlingHigh", 9 | "container": "default-http-backend", 10 | "namespace": "kubesphere-controls-system", 11 | "pod": "default-http-backend-6555ff6898-5w7gk", 12 | "prometheus": "kubesphere-monitoring-system/k8s", 13 | "severity": "warning" 14 | }, 15 | "annotations": { 16 | "message": "30.61% throttling of CPU in namespace kubesphere-controls-system for container default-http-backend in pod default-http-backend-6555ff6898-5w7gk.", 17 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" 18 | }, 19 | "startsAt": "2020-04-17T01:02:44.782098546Z", 20 | "endsAt": "0001-01-01T00:00:00Z", 21 | "generatorURL": "http://prometheus-k8s-1:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", 22 | "fingerprint": "7ae66fe9d5b9cab7" 23 | } 24 | ], 25 | "groupLabels": { 26 | "alertname": "CPUThrottlingHigh", 27 | "namespace": "kubesphere-controls-system" 28 | }, 29 | "commonLabels": { 30 | "alertname": "CPUThrottlingHigh", 31 | "container": "default-http-backend", 32 | "namespace": "kubesphere-controls-system", 33 | "pod": "default-http-backend-6555ff6898-5w7gk", 34 | "prometheus": "kubesphere-monitoring-system/k8s", 35 | "severity": "warning" 36 | }, 37 | "commonAnnotations": { 38 | "message": "30.61% throttling of CPU in namespace kubesphere-controls-system for container default-http-backend in pod default-http-backend-6555ff6898-5w7gk.", 39 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" 40 | }, 41 | "externalURL": "http://alertmanager-main-2:9093" 42 | } -------------------------------------------------------------------------------- /test/testdata/alert4.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Default", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "CPUThrottlingHigh", 9 | "container": "grafana", 10 | "namespace": "kubesphere-monitoring-system", 11 | "pod": "grafana-6f476b6dd8-2dpzg", 12 | "prometheus": "kubesphere-monitoring-system/k8s", 13 | "severity": "warning" 14 | }, 15 | "annotations": { 16 | "message": "32.26% throttling of CPU in namespace kubesphere-monitoring-system for container grafana in pod grafana-6f476b6dd8-2dpzg.", 17 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" 18 | }, 19 | "startsAt": "2020-04-16T09:07:14.782098546Z", 20 | "endsAt": "0001-01-01T00:00:00Z", 21 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_throttled_periods_total%7Bcontainer%21%3D%22%22%7D%5B5m%5D%29%29+%2F+sum+by%28container%2C+pod%2C+namespace%29+%28increase%28container_cpu_cfs_periods_total%5B5m%5D%29%29+%3E+%2825+%2F+100%29\u0026g0.tab=1", 22 | "fingerprint": "b1f23c7a83eb6df0" 23 | } 24 | ], 25 | "groupLabels": { 26 | "alertname": "CPUThrottlingHigh", 27 | "namespace": "kubesphere-monitoring-system" 28 | }, 29 | "commonLabels": { 30 | "alertname": "CPUThrottlingHigh", 31 | "container": "grafana", 32 | "namespace": "kubesphere-monitoring-system", 33 | "pod": "grafana-6f476b6dd8-2dpzg", 34 | "prometheus": "kubesphere-monitoring-system/k8s", 35 | "severity": "warning" 36 | }, 37 | "commonAnnotations": { 38 | "message": "32.26% throttling of CPU in namespace kubesphere-monitoring-system for container grafana in pod grafana-6f476b6dd8-2dpzg.", 39 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh" 40 | }, 41 | "externalURL": "http://alertmanager-main-2:9093" 42 | } -------------------------------------------------------------------------------- /test/testdata/alert5.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Default", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubeContainerWaiting", 9 | "container": "container-ztj830", 10 | "namespace": "t1", 11 | "pod": "s2i-1-viafnz-5474c8b5b7-px7kz", 12 | "prometheus": "kubesphere-monitoring-system/k8s", 13 | "severity": "warning" 14 | }, 15 | "annotations": { 16 | "message": "Pod t1/s2i-1-viafnz-5474c8b5b7-px7kz container container-ztj830 has been in waiting state for longer than 1 hour.", 17 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontainerwaiting" 18 | }, 19 | "startsAt": "2020-02-26T07:49:04.989876849Z", 20 | "endsAt": "0001-01-01T00:00:00Z", 21 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=sum+by%28namespace%2C+pod%2C+container%29+%28kube_pod_container_status_waiting_reason%7Bjob%3D%22kube-state-metrics%22%7D%29+%3E+0\u0026g0.tab=1", 22 | "fingerprint": "eb4aaaee1917a07b" 23 | } 24 | ], 25 | "groupLabels": { 26 | "alertname": "KubeContainerWaiting", 27 | "namespace": "t1" 28 | }, 29 | "commonLabels": { 30 | "alertname": "KubeContainerWaiting", 31 | "container": "container-ztj830", 32 | "namespace": "t1", 33 | "pod": "s2i-1-viafnz-5474c8b5b7-px7kz", 34 | "prometheus": "kubesphere-monitoring-system/k8s", 35 | "severity": "warning" 36 | }, 37 | "commonAnnotations": { 38 | "message": "Pod t1/s2i-1-viafnz-5474c8b5b7-px7kz container container-ztj830 has been in waiting state for longer than 1 hour.", 39 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontainerwaiting" 40 | }, 41 | "externalURL": "http://alertmanager-main-2:9093" 42 | } -------------------------------------------------------------------------------- /test/testdata/alert6.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Default", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "AlertmanagerFailedReload", 9 | "endpoint": "web", 10 | "instance": "10.233.106.136:9093", 11 | "job": "alertmanager-main", 12 | "namespace": "kubesphere-monitoring-system", 13 | "pod": "alertmanager-main-2", 14 | "prometheus": "kubesphere-monitoring-system/k8s", 15 | "service": "alertmanager-main", 16 | "severity": "warning" 17 | }, 18 | "annotations": { 19 | "message": "Reloading Alertmanager's configuration has failed for kubesphere-monitoring-system/alertmanager-main-2." 20 | }, 21 | "startsAt": "2020-04-17T01:16:58.952838294Z", 22 | "endsAt": "0001-01-01T00:00:00Z", 23 | "generatorURL": "http://prometheus-k8s-1:9090/graph?g0.expr=alertmanager_config_last_reload_successful%7Bjob%3D%22alertmanager-main%22%2Cnamespace%3D%22kubesphere-monitoring-system%22%7D+%3D%3D+0\u0026g0.tab=1", 24 | "fingerprint": "710e8c79ac0e6052" 25 | }, 26 | { 27 | "status": "firing", 28 | "labels": { 29 | "alertname": "AlertmanagerFailedReload", 30 | "endpoint": "web", 31 | "instance": "10.233.106.137:9093", 32 | "job": "alertmanager-main", 33 | "namespace": "kubesphere-monitoring-system", 34 | "pod": "alertmanager-main-1", 35 | "prometheus": "kubesphere-monitoring-system/k8s", 36 | "service": "alertmanager-main", 37 | "severity": "warning" 38 | }, 39 | "annotations": { 40 | "message": "Reloading Alertmanager's configuration has failed for kubesphere-monitoring-system/alertmanager-main-1." 41 | }, 42 | "startsAt": "2020-04-17T01:16:28.952838294Z", 43 | "endsAt": "0001-01-01T00:00:00Z", 44 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=alertmanager_config_last_reload_successful%7Bjob%3D%22alertmanager-main%22%2Cnamespace%3D%22kubesphere-monitoring-system%22%7D+%3D%3D+0\u0026g0.tab=1", 45 | "fingerprint": "5e385eac3aacc5fc" 46 | } 47 | ], 48 | "groupLabels": { 49 | "alertname": "AlertmanagerFailedReload", 50 | "namespace": "kubesphere-monitoring-system" 51 | }, 52 | "commonLabels": { 53 | "alertname": "AlertmanagerFailedReload", 54 | "endpoint": "web", 55 | "job": "alertmanager-main", 56 | "namespace": "kubesphere-monitoring-system", 57 | "prometheus": "kubesphere-monitoring-system/k8s", 58 | "service": "alertmanager-main", 59 | "severity": "warning" 60 | }, 61 | "commonAnnotations": {}, 62 | "externalURL": "http://alertmanager-main-2:9093" 63 | } -------------------------------------------------------------------------------- /test/testdata/alert7.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Critical", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubeDeploymentReplicasMismatch", 9 | "deployment": "s2i-1-viafnz", 10 | "instance": "10.233.71.230:8080", 11 | "job": "kube-state-metrics", 12 | "namespace": "t1", 13 | "pod": "kube-state-metrics-7c6f4866fc-rclv2", 14 | "prometheus": "kubesphere-monitoring-system/k8s", 15 | "severity": "critical" 16 | }, 17 | "annotations": { 18 | "message": "Deployment t1/s2i-1-viafnz has not matched the expected number of replicas for longer than 15 minutes.", 19 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentreplicasmismatch" 20 | }, 21 | "startsAt": "2020-02-26T07:04:04.989876849Z", 22 | "endsAt": "0001-01-01T00:00:00Z", 23 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=kube_deployment_spec_replicas%7Bjob%3D%22kube-state-metrics%22%7D+%21%3D+kube_deployment_status_replicas_available%7Bjob%3D%22kube-state-metrics%22%7D\u0026g0.tab=1", 24 | "fingerprint": "eb2c25952544e774" 25 | } 26 | ], 27 | "groupLabels": { 28 | "alertname": "KubeDeploymentReplicasMismatch", 29 | "namespace": "t1" 30 | }, 31 | "commonLabels": { 32 | "alertname": "KubeDeploymentReplicasMismatch", 33 | "deployment": "s2i-1-viafnz", 34 | "instance": "10.233.71.230:8080", 35 | "job": "kube-state-metrics", 36 | "namespace": "t1", 37 | "pod": "kube-state-metrics-7c6f4866fc-rclv2", 38 | "prometheus": "kubesphere-monitoring-system/k8s", 39 | "severity": "critical" 40 | }, 41 | "commonAnnotations": { 42 | "message": "Deployment t1/s2i-1-viafnz has not matched the expected number of replicas for longer than 15 minutes.", 43 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentreplicasmismatch" 44 | }, 45 | "externalURL": "http://alertmanager-main-2:9093" 46 | } -------------------------------------------------------------------------------- /test/testdata/alert8.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Critical", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubeStatefulSetReplicasMismatch", 9 | "instance": "10.233.71.230:8080", 10 | "job": "kube-state-metrics", 11 | "namespace": "pp1", 12 | "pod": "kube-state-metrics-7c6f4866fc-rclv2", 13 | "prometheus": "kubesphere-monitoring-system/k8s", 14 | "severity": "critical", 15 | "statefulset": "dd1" 16 | }, 17 | "annotations": { 18 | "message": "StatefulSet pp1/dd1 has not matched the expected number of replicas for longer than 15 minutes.", 19 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetreplicasmismatch" 20 | }, 21 | "startsAt": "2020-02-26T07:04:04.989876849Z", 22 | "endsAt": "0001-01-01T00:00:00Z", 23 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=kube_statefulset_status_replicas_ready%7Bjob%3D%22kube-state-metrics%22%7D+%21%3D+kube_statefulset_status_replicas%7Bjob%3D%22kube-state-metrics%22%7D\u0026g0.tab=1", 24 | "fingerprint": "90082e8cd5da0e16" 25 | } 26 | ], 27 | "groupLabels": { 28 | "alertname": "KubeStatefulSetReplicasMismatch", 29 | "namespace": "pp1" 30 | }, 31 | "commonLabels": { 32 | "alertname": "KubeStatefulSetReplicasMismatch", 33 | "instance": "10.233.71.230:8080", 34 | "job": "kube-state-metrics", 35 | "namespace": "pp1", 36 | "pod": "kube-state-metrics-7c6f4866fc-rclv2", 37 | "prometheus": "kubesphere-monitoring-system/k8s", 38 | "severity": "critical", 39 | "statefulset": "dd1" 40 | }, 41 | "commonAnnotations": { 42 | "message": "StatefulSet pp1/dd1 has not matched the expected number of replicas for longer than 15 minutes.", 43 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetreplicasmismatch" 44 | }, 45 | "externalURL": "http://alertmanager-main-2:9093" 46 | } -------------------------------------------------------------------------------- /test/testdata/alert9.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Default", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubeJobCompletion", 9 | "instance": "10.233.71.230:8080", 10 | "job": "kube-state-metrics", 11 | "job_name": "dddd", 12 | "namespace": "demo-project", 13 | "pod": "kube-state-metrics-7c6f4866fc-rclv2", 14 | "prometheus": "kubesphere-monitoring-system/k8s", 15 | "severity": "warning" 16 | }, 17 | "annotations": { 18 | "message": "Job demo-project/dddd is taking more than one hour to complete.", 19 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion" 20 | }, 21 | "startsAt": "2020-02-26T07:49:04.989876849Z", 22 | "endsAt": "0001-01-01T00:00:00Z", 23 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=kube_job_spec_completions%7Bjob%3D%22kube-state-metrics%22%7D+-+kube_job_status_succeeded%7Bjob%3D%22kube-state-metrics%22%7D+%3E+0\u0026g0.tab=1", 24 | "fingerprint": "9dc21fa4c13a99e1" 25 | } 26 | ], 27 | "groupLabels": { 28 | "alertname": "KubeJobCompletion", 29 | "namespace": "demo-project" 30 | }, 31 | "commonLabels": { 32 | "alertname": "KubeJobCompletion", 33 | "instance": "10.233.71.230:8080", 34 | "job": "kube-state-metrics", 35 | "job_name": "dddd", 36 | "namespace": "demo-project", 37 | "pod": "kube-state-metrics-7c6f4866fc-rclv2", 38 | "prometheus": "kubesphere-monitoring-system/k8s", 39 | "severity": "warning" 40 | }, 41 | "commonAnnotations": { 42 | "message": "Job demo-project/dddd is taking more than one hour to complete.", 43 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion" 44 | }, 45 | "externalURL": "http://alertmanager-main-2:9093" 46 | } -------------------------------------------------------------------------------- /test/testdata/alerts-without-namespace.json: -------------------------------------------------------------------------------- 1 | { 2 | "receiver": "Critical", 3 | "status": "firing", 4 | "alerts": [ 5 | { 6 | "status": "firing", 7 | "labels": { 8 | "alertname": "KubeletDown", 9 | "prometheus": "kubesphere-monitoring-system/k8s", 10 | "severity": "critical" 11 | }, 12 | "annotations": { 13 | "message": "Kubelet has disappeared from Prometheus target discovery.", 14 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletdown" 15 | }, 16 | "startsAt": "2020-02-26T07:03:36.952819501Z", 17 | "endsAt": "0001-01-01T00:00:00Z", 18 | "generatorURL": "http://prometheus-k8s-0:9090/graph?g0.expr=absent%28up%7Bjob%3D%22kubelet%22%2Cmetrics_path%3D%22%2Fmetrics%22%7D+%3D%3D+1%29\u0026g0.tab=1", 19 | "fingerprint": "784778fbea8cd7a6" 20 | } 21 | ], 22 | "groupLabels": { 23 | "alertname": "KubeletDown" 24 | }, 25 | "commonLabels": { 26 | "alertname": "KubeletDown", 27 | "prometheus": "kubesphere-monitoring-system/k8s", 28 | "severity": "critical" 29 | }, 30 | "commonAnnotations": { 31 | "message": "Kubelet has disappeared from Prometheus target discovery.", 32 | "runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletdown" 33 | }, 34 | "externalURL": "http://alertmanager-main-2:9093" 35 | } --------------------------------------------------------------------------------