├── .gitignore ├── .vscode └── settings.json ├── DEV_README.md ├── LICENSE ├── Makefile ├── README.md ├── docs ├── morpheus_demo1.gif ├── morpheus_demo2.gif ├── morpheus_demo3.gif ├── morpheus_demo4.gif ├── morpheus_demo5.gif ├── morpheus_demo6.gif ├── morpheus_demo8.gif ├── morpheus_logo.png └── temp.txt ├── imageConfigs ├── grafana │ ├── grafana.ini │ └── provisioning │ │ ├── dashboards │ │ ├── additional-system.json │ │ ├── cadvisor-dashboard.json │ │ ├── dashboard.json │ │ ├── dashboard.yml │ │ ├── docker_metrics.json │ │ ├── k8s-views-nodes.json │ │ ├── sample.json │ │ └── snapshot-dashboard.json │ │ └── datasources │ │ └── srcProm.yml ├── postgres │ └── init_prod.sql └── prometheus │ └── prometheus.yml ├── jest.config.js ├── jest.setup.js ├── k8sConfig ├── Makefile ├── kind.yaml └── manifests │ ├── alertmanager-alertmanager.yaml │ ├── alertmanager-networkPolicy.yaml │ ├── alertmanager-podDisruptionBudget.yaml │ ├── alertmanager-prometheusRule.yaml │ ├── alertmanager-secret.yaml │ ├── alertmanager-service.yaml │ ├── alertmanager-serviceAccount.yaml │ ├── alertmanager-serviceMonitor.yaml │ ├── blackboxExporter-clusterRole.yaml │ ├── blackboxExporter-clusterRoleBinding.yaml │ ├── blackboxExporter-configuration.yaml │ ├── blackboxExporter-deployment.yaml │ ├── blackboxExporter-networkPolicy.yaml │ ├── blackboxExporter-service.yaml │ ├── blackboxExporter-serviceAccount.yaml │ ├── blackboxExporter-serviceMonitor.yaml │ ├── grafana-config.yaml │ ├── grafana-dashboardDatasources.yaml │ ├── grafana-dashboardDefinitions.yaml │ ├── grafana-dashboardSources.yaml │ ├── grafana-deployment.yaml │ ├── grafana-networkPolicy.yaml │ ├── grafana-prometheusRule.yaml │ ├── grafana-service.yaml │ ├── grafana-serviceAccount.yaml │ ├── grafana-serviceMonitor.yaml │ ├── kubePrometheus-prometheusRule.yaml │ ├── kubeStateMetrics-clusterRole.yaml │ ├── kubeStateMetrics-clusterRoleBinding.yaml │ ├── kubeStateMetrics-deployment.yaml │ ├── kubeStateMetrics-networkPolicy.yaml │ ├── kubeStateMetrics-prometheusRule.yaml │ ├── kubeStateMetrics-service.yaml │ ├── kubeStateMetrics-serviceAccount.yaml │ ├── kubeStateMetrics-serviceMonitor.yaml │ ├── kubernetesControlPlane-prometheusRule.yaml │ ├── kubernetesControlPlane-serviceMonitorApiserver.yaml │ ├── kubernetesControlPlane-serviceMonitorCoreDNS.yaml │ ├── kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml │ ├── kubernetesControlPlane-serviceMonitorKubeScheduler.yaml │ ├── kubernetesControlPlane-serviceMonitorKubelet.yaml │ ├── nodeExporter-clusterRole.yaml │ ├── nodeExporter-clusterRoleBinding.yaml │ ├── nodeExporter-daemonset.yaml │ ├── nodeExporter-networkPolicy.yaml │ ├── nodeExporter-prometheusRule.yaml │ ├── nodeExporter-service.yaml │ ├── nodeExporter-serviceAccount.yaml │ ├── nodeExporter-serviceMonitor.yaml │ ├── prometheus-clusterRole.yaml │ ├── prometheus-clusterRoleBinding.yaml │ ├── prometheus-networkPolicy.yaml │ ├── prometheus-podDisruptionBudget.yaml │ ├── prometheus-prometheus.yaml │ ├── prometheus-prometheusRule.yaml │ ├── prometheus-roleBindingConfig.yaml │ ├── prometheus-roleBindingSpecificNamespaces.yaml │ ├── prometheus-roleConfig.yaml │ ├── prometheus-roleSpecificNamespaces.yaml │ ├── prometheus-service.yaml │ ├── prometheus-serviceAccount.yaml │ ├── prometheus-serviceMonitor.yaml │ ├── prometheusAdapter-apiService.yaml │ ├── prometheusAdapter-clusterRole.yaml │ ├── prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml │ ├── prometheusAdapter-clusterRoleBinding.yaml │ ├── prometheusAdapter-clusterRoleBindingDelegator.yaml │ ├── prometheusAdapter-clusterRoleServerResources.yaml │ ├── prometheusAdapter-configMap.yaml │ ├── prometheusAdapter-deployment.yaml │ ├── prometheusAdapter-networkPolicy.yaml │ ├── prometheusAdapter-podDisruptionBudget.yaml │ ├── prometheusAdapter-roleBindingAuthReader.yaml │ ├── prometheusAdapter-service.yaml │ ├── prometheusAdapter-serviceAccount.yaml │ ├── prometheusAdapter-serviceMonitor.yaml │ ├── prometheusOperator-clusterRole.yaml │ ├── prometheusOperator-clusterRoleBinding.yaml │ ├── prometheusOperator-deployment.yaml │ ├── prometheusOperator-networkPolicy.yaml │ ├── prometheusOperator-prometheusRule.yaml │ ├── prometheusOperator-service.yaml │ ├── prometheusOperator-serviceAccount.yaml │ ├── prometheusOperator-serviceMonitor.yaml │ └── setup │ ├── 0alertmanagerConfigCustomResourceDefinition.yaml │ ├── 0alertmanagerCustomResourceDefinition.yaml │ ├── 0podmonitorCustomResourceDefinition.yaml │ ├── 0probeCustomResourceDefinition.yaml │ ├── 0prometheusCustomResourceDefinition.yaml │ ├── 0prometheusagentCustomResourceDefinition.yaml │ ├── 0prometheusruleCustomResourceDefinition.yaml │ ├── 0scrapeconfigCustomResourceDefinition.yaml │ ├── 0servicemonitorCustomResourceDefinition.yaml │ ├── 0thanosrulerCustomResourceDefinition.yaml │ └── namespace.yaml ├── morpheus ├── docker-compose-morpheus.yaml ├── dockerfile.dev └── scripts │ ├── dockerfile-p2p.yaml │ ├── prometheus_to_postgres.py │ └── requirements.txt └── nextui ├── .eslintrc.json ├── .gitignore ├── README.md ├── __tests__ ├── Header.test.tsx ├── MetricsDisplay.test.tsx ├── Routing.test.tsx ├── SystemData.test.tsx ├── getMetrics.test.js └── route.test.js ├── app ├── api │ ├── AIChat │ │ └── route.ts │ ├── aws-bedrock │ │ ├── getMetrics.ts │ │ └── route.ts │ ├── db-metrics │ │ ├── route.ts │ │ └── run-prometheus-to-postgres.ts │ ├── openai-analyze-metrics │ │ └── route.ts │ ├── prometheus-query │ │ └── route.ts │ ├── settings │ │ └── route.ts │ └── v1 │ │ ├── clusterview │ │ └── route.ts │ │ ├── docker │ │ └── containers │ │ │ ├── [id] │ │ │ ├── start │ │ │ │ └── route.ts │ │ │ ├── stats │ │ │ │ └── route.ts │ │ │ └── stop │ │ │ │ └── route.ts │ │ │ └── route.ts │ │ └── usersettings │ │ └── route.ts ├── components │ ├── Header │ │ ├── Dashboard.tsx │ │ ├── Header.tsx │ │ └── page.tsx │ ├── MetricsDisplay.module.css │ ├── MetricsDisplay.tsx │ └── sideBar │ │ ├── page.tsx │ │ └── sideBar.tsx ├── dashboard │ ├── Dashboard.module.scss │ ├── data │ │ └── page.tsx │ ├── page.tsx │ └── settings │ │ ├── actions.ts │ │ └── page.tsx ├── docker │ ├── containers │ │ ├── ContainerActions.tsx │ │ ├── Dashboard.tsx │ │ ├── dashboard.css │ │ └── page.tsx │ ├── login.tsx │ └── page.tsx ├── globals.css ├── kubernetes │ ├── Dashboard.tsx │ ├── DashboardAichat.tsx │ ├── aichat-api.tsx │ ├── aichat-format.tsx │ ├── clusterView.css │ ├── kubernetes.module.scss │ ├── page.tsx │ └── react-graph-vis.d.ts ├── layout.tsx ├── metrics │ ├── Dashboard.tsx │ └── page.tsx ├── navbar.module.scss ├── openai-analyze │ └── page.tsx ├── page.tsx ├── systemData │ ├── Dashboard.tsx │ ├── page.tsx │ └── systemData.module.scss └── theme │ ├── darkTheme.ts │ └── lightTheme.ts ├── db └── pgModel.ts ├── jest.config.js ├── jest.setup.js ├── next.config.mjs ├── package-lock.json ├── package.json ├── postcss.config.mjs ├── public ├── aws-bedrock-logo.png ├── deploy-128.png ├── morpheus-logo.png ├── node-128.png ├── pod-128.png ├── sidebarIcon.png └── svc-128.png ├── tailwind.config.ts └── tsconfig.json /.gitignore: -------------------------------------------------------------------------------- 1 | .history 2 | .DS_Store 3 | .env.local 4 | 5 | # Logs 6 | logs 7 | *.log 8 | npm-debug.log* 9 | yarn-debug.log* 10 | yarn-error.log* 11 | lerna-debug.log* 12 | .pnpm-debug.log* 13 | 14 | # Diagnostic reports (https://nodejs.org/api/report.html) 15 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 16 | 17 | # Runtime data 18 | pids 19 | *.pid 20 | *.seed 21 | *.pid.lock 22 | 23 | # Directory for instrumented libs generated by jscoverage/JSCover 24 | lib-cov 25 | 26 | # Coverage directory used by tools like istanbul 27 | coverage 28 | *.lcov 29 | 30 | # nyc test coverage 31 | .nyc_output 32 | 33 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 34 | .grunt 35 | 36 | # Bower dependency directory (https://bower.io/) 37 | bower_components 38 | 39 | # node-waf configuration 40 | .lock-wscript 41 | 42 | # Compiled binary addons (https://nodejs.org/api/addons.html) 43 | build/Release 44 | 45 | # Dependency directories 46 | node_modules/ 47 | jspm_packages/ 48 | 49 | # Snowpack dependency directory (https://snowpack.dev/) 50 | web_modules/ 51 | 52 | # TypeScript cache 53 | *.tsbuildinfo 54 | 55 | # Optional npm cache directory 56 | .npm 57 | 58 | # Optional eslint cache 59 | .eslintcache 60 | 61 | # Optional stylelint cache 62 | .stylelintcache 63 | 64 | # Microbundle cache 65 | .rpt2_cache/ 66 | .rts2_cache_cjs/ 67 | .rts2_cache_es/ 68 | .rts2_cache_umd/ 69 | 70 | # Optional REPL history 71 | .node_repl_history 72 | 73 | # Output of 'npm pack' 74 | *.tgz 75 | 76 | # Yarn Integrity file 77 | .yarn-integrity 78 | 79 | # dotenv environment variable files 80 | .env 81 | .env.development.local 82 | .env.test.local 83 | .env.production.local 84 | .env.local 85 | 86 | # parcel-bundler cache (https://parceljs.org/) 87 | .cache 88 | .parcel-cache 89 | 90 | # Next.js build output 91 | .next 92 | out 93 | 94 | # Nuxt.js build / generate output 95 | .nuxt 96 | dist 97 | 98 | # Gatsby files 99 | .cache/ 100 | # Comment in the public line in if your project uses Gatsby and not Next.js 101 | # https://nextjs.org/blog/next-9-1#public-directory-support 102 | # public 103 | 104 | # vuepress build output 105 | .vuepress/dist 106 | 107 | # vuepress v2.x temp and cache directory 108 | .temp 109 | .cache 110 | 111 | # Docusaurus cache and generated files 112 | .docusaurus 113 | 114 | # Serverless directories 115 | .serverless/ 116 | 117 | # FuseBox cache 118 | .fusebox/ 119 | 120 | # DynamoDB Local files 121 | .dynamodb/ 122 | 123 | # TernJS port file 124 | .tern-port 125 | 126 | # Stores VSCode versions used for testing VSCode extensions 127 | .vscode-test 128 | 129 | # yarn v2 130 | .yarn/cache 131 | .yarn/unplugged 132 | .yarn/build-state.yml 133 | .yarn/install-state.gz 134 | .pnp.* 135 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "workbench.localHistory.enabled": true, 3 | "editor.tabSize": 2, 4 | "editor.formatOnSave": true, 5 | "editor.formatOnPaste": true, 6 | "editor.formatOnType": true, 7 | "editor.codeActionsOnSave": { 8 | "source.fixAll.eslint": "explicit" 9 | }, 10 | "editor.acceptSuggestionOnEnter": "off" 11 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Open Source Labs Beta 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Make sure to update versions to whatever the latest is 2 | 3 | IMAGE?=osp/morpheus 4 | 5 | # ONLY CHANGE THIS VERSION TO YOUR GROUP 6 | VERSION?=1.0.0 7 | 8 | DEV_NAME=morpheus-dev 9 | DOCKERFILEDIRECTORY=morpheus 10 | BUILDER=buildx-multi-arch 11 | VITE_DEV_PORT=4000 12 | 13 | INFO_COLOR = \033[0;36m 14 | NO_COLOR = \033[m 15 | 16 | morpheus-new: 17 | docker compose -f ${DOCKERFILEDIRECTORY}/docker-compose-morpheus.yaml up --build -d 18 | 19 | morpheus-dev: 20 | docker compose -f ${DOCKERFILEDIRECTORY}/docker-compose-morpheus.yaml up -d 21 | 22 | morpheus-down: 23 | docker compose -f ${DOCKERFILEDIRECTORY}/docker-compose-morpheus.yaml down -v 24 | 25 | morpheus-rm: 26 | docker image remove -f morpheus-morpheus 27 | docker image remove -f morpheus-prometheus-to-postgres 28 | 29 | build-dev: 30 | docker build -t ${DEV_NAME} -f ${DOCKERFILEDIRECTORY}/dockerfile.dev . 31 | 32 | # NOTE: This will delete EVERYTHING 33 | pruneAll: 34 | docker system prune --all --force --volumes 35 | 36 | image_prune: 37 | docker image prune -af 38 | 39 | volume_prune: 40 | docker volume prune -af 41 | 42 | build_prune: 43 | docker buildx prune -af 44 | 45 | build-prod: 46 | docker build --tag=$(IMAGE):$(VERSION) -f ${DOCKERFILEDIRECTORY}/dockerfile.prod . 47 | 48 | prepare-buildx: ## Create buildx builder for multi-arch build, if not exists 49 | docker buildx inspect $(BUILDER) || docker buildx create --name=$(BUILDER) --driver=docker-container --driver-opt=network=host 50 | 51 | help: 52 | @echo Please specify a build target. The choices are: 53 | @grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "$(INFO_COLOR)%-30s$(NO_COLOR) %s\n", $$1, $$2}' 54 | 55 | .PHONY: help -------------------------------------------------------------------------------- /docs/morpheus_demo1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/docs/morpheus_demo1.gif -------------------------------------------------------------------------------- /docs/morpheus_demo2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/docs/morpheus_demo2.gif -------------------------------------------------------------------------------- /docs/morpheus_demo3.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/docs/morpheus_demo3.gif -------------------------------------------------------------------------------- /docs/morpheus_demo4.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/docs/morpheus_demo4.gif -------------------------------------------------------------------------------- /docs/morpheus_demo5.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/docs/morpheus_demo5.gif -------------------------------------------------------------------------------- /docs/morpheus_demo6.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/docs/morpheus_demo6.gif -------------------------------------------------------------------------------- /docs/morpheus_demo8.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/docs/morpheus_demo8.gif -------------------------------------------------------------------------------- /docs/morpheus_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/docs/morpheus_logo.png -------------------------------------------------------------------------------- /docs/temp.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/docs/temp.txt -------------------------------------------------------------------------------- /imageConfigs/grafana/grafana.ini: -------------------------------------------------------------------------------- 1 | [server] 2 | allow_embedding = true 3 | enable_gzip = true 4 | [security] 5 | allow_embedding = true 6 | [plugins] 7 | plugins = boomtheme-panel 8 | enable_alpha = true 9 | # Lets people view the grafana dashboard without needing any authentication 10 | [auth.anonymous] 11 | enabled = true 12 | org_name = Main Org. 13 | org_role = Viewer 14 | [log] 15 | level = error -------------------------------------------------------------------------------- /imageConfigs/grafana/provisioning/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'Prometheus' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | editable: false 10 | allowUiUpdates: true 11 | options: 12 | path: /etc/grafana/provisioning/dashboards -------------------------------------------------------------------------------- /imageConfigs/grafana/provisioning/datasources/srcProm.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | # Tells grafana where to look and what for to get metrics 4 | datasources: 5 | - name: Prometheus 6 | type: prometheus 7 | access: proxy 8 | orgId: 1 9 | url: http://prometheus:9090 10 | basicAuth: false 11 | isDefault: false 12 | editable: true 13 | -------------------------------------------------------------------------------- /imageConfigs/postgres/init_prod.sql: -------------------------------------------------------------------------------- 1 | \connect morpheus 2 | 3 | CREATE TABLE dashboards( 4 | id serial PRIMARY KEY NOT NULL, 5 | name varchar NOT NULL, 6 | type_of integer NOT NULL, 7 | path varchar 8 | ); 9 | 10 | CREATE TABLE services( 11 | id serial PRIMARY KEY NOT NULL, 12 | docker_instance_name varchar NOT NULL, 13 | docker_id varchar NOT NULL 14 | ); 15 | 16 | CREATE TABLE snapshots( 17 | id SERIAL PRIMARY KEY, 18 | metric_date TIMESTAMP WITHOUT TIME ZONE NOT NULL, 19 | cpu_usage DOUBLE PRECISION, 20 | memory_usage DOUBLE PRECISION, 21 | available_memory DOUBLE PRECISION, 22 | network_receive_bytes DOUBLE PRECISION, 23 | network_transmit_bytes DOUBLE PRECISION, 24 | load_average DOUBLE PRECISION, 25 | total_memory DOUBLE PRECISION 26 | ); 27 | 28 | CREATE TABLE usersettings( 29 | firstname varchar NOT NULL, 30 | lastname varchar NOT NULL, 31 | email varchar PRIMARY KEY NOT NULL 32 | ); 33 | 34 | CREATE TABLE IF NOT EXISTS settings ( 35 | id SERIAL PRIMARY KEY, 36 | fetch_interval INTEGER DEFAULT 60, 37 | run_immediately BOOLEAN DEFAULT FALSE 38 | ); 39 | 40 | 41 | CREATE TABLE conversation_history ( 42 | id serial PRIMARY KEY NOT NULL, 43 | conversation JSONB NOT NULL, 44 | last_updated TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP 45 | ); 46 | 47 | ALTER TABLE dashboards OWNER TO admin; 48 | ALTER TABLE services OWNER TO admin; 49 | ALTER TABLE snapshots OWNER TO admin; 50 | ALTER TABLE usersettings OWNER to admin; 51 | ALTER TABLE conversation_history OWNER TO admin; 52 | 53 | INSERT INTO settings (id, fetch_interval, run_immediately) 54 | VALUES (1, 60, FALSE) 55 | ON CONFLICT (id) DO NOTHING; -------------------------------------------------------------------------------- /imageConfigs/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 5s 3 | evaluation_interval: 15s 4 | 5 | scrape_configs: 6 | # Exposes port for data to be queried by grafana, can be viewed at localhost:9090/metrics, or queried at localhost:9090/graph 7 | - job_name: localprometheus 8 | scrape_interval: 5s 9 | static_configs: 10 | - targets: ['localhost:9090', 'node-exporter:9100', 'cadvisor:8080'] 11 | 12 | # - job_name: 'federate' 13 | # metrics_path: '/federate' 14 | # scrape_interval: 15s 15 | # honor_labels: true 16 | # params: 17 | # 'match[]': 18 | # - '{job="kubernetes-apiservers"}' 19 | # - '{job="kubernetes-nodes"}' 20 | # - '{job="kubernetes-nodes-cadvisor"}' 21 | # - '{job="kubernetes-service-endpoints"}' 22 | # static_configs: 23 | # - targets: 24 | # - 'host.docker.internal:45555' 25 | # # - 'localhost:45555' 26 | # relabel_configs: 27 | # - target_label: 'federation' 28 | # replacement: 'true' 29 | # - target_label: 'scrapetype' 30 | # replacement: 'kubernetes' -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | const nextJest = require('next/jest') 2 | 3 | const createJestConfig = nextJest({ 4 | dir: './', 5 | }) 6 | 7 | const customJestConfig = { 8 | setupFilesAfterEnv: ['/jest.setup.js'], 9 | testEnvironment: 'jest-environment-jsdom', 10 | } 11 | 12 | module.exports = createJestConfig(customJestConfig) 13 | -------------------------------------------------------------------------------- /jest.setup.js: -------------------------------------------------------------------------------- 1 | import '@testing-library/jest-dom' -------------------------------------------------------------------------------- /k8sConfig/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile to setup kubernetes environment in Docker 2 | 3 | #Step 1 - Install kind for testing kubernetes 4 | install-kind: 5 | brew install kind 6 | 7 | #Step 2 - Install kubectl for interacting with kubernetes 8 | install-kubectl: 9 | brew install kubectl 10 | 11 | # Step 3 - Create cluster 12 | kind-create-cluster: 13 | kind create cluster --name k8s-morpheus --image kindest/node:v1.28.13 --config ./kind.yaml 14 | 15 | # Step 4 - change control context to k8s-morpheus 16 | change-cluster-context: 17 | kubectl cluster-info --context kind-k8s-morpheus 18 | 19 | # Step 5 - Configure monitoring in kind-k8s-morpheus cluster 20 | # Create the namespace and CRDs, and then wait for them to be available before creating the remaining resources 21 | # Note that due to some CRD size we are using kubectl server-side apply feature which is generally available since kubernetes 1.22. 22 | # If you are using previous kubernetes versions this feature may not be available and you would need to use kubectl create instead. 23 | config-k8s-monitoring: 24 | kubectl apply --server-side -f manifests/setup 25 | kubectl wait \ 26 | --for condition=Established \ 27 | --all CustomResourceDefinition \ 28 | --namespace=monitoring 29 | kubectl apply -f manifests/ 30 | 31 | # Step 6 - Use kubectl to wait until all pods are Running in monitoring namespace 32 | get-monitoring-pods: 33 | kubectl -n monitoring get pods 34 | 35 | port-forward-k8s-grafana: 36 | kubectl -n monitoring port-forward svc/grafana 45556:3000 37 | 38 | port-forward-k8s-prometheus: 39 | kubectl -n monitoring port-forward svc/prometheus-k8s 45558:9090 40 | 41 | kind-delete-cluster: 42 | kind delete cluster --name k8s-morpheus 43 | 44 | get-pods: 45 | kubectl -n monitoring get pods 46 | 47 | get-all: 48 | kubectl -n monitoring get all 49 | -------------------------------------------------------------------------------- /k8sConfig/kind.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | - role: worker 6 | - role: worker 7 | - role: worker 8 | -------------------------------------------------------------------------------- /k8sConfig/manifests/alertmanager-alertmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Alertmanager 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.26.0 10 | name: main 11 | namespace: monitoring 12 | spec: 13 | image: quay.io/prometheus/alertmanager:v0.26.0 14 | nodeSelector: 15 | kubernetes.io/os: linux 16 | podMetadata: 17 | labels: 18 | app.kubernetes.io/component: alert-router 19 | app.kubernetes.io/instance: main 20 | app.kubernetes.io/name: alertmanager 21 | app.kubernetes.io/part-of: kube-prometheus 22 | app.kubernetes.io/version: 0.26.0 23 | replicas: 3 24 | resources: 25 | limits: 26 | cpu: 100m 27 | memory: 100Mi 28 | requests: 29 | cpu: 4m 30 | memory: 100Mi 31 | securityContext: 32 | fsGroup: 2000 33 | runAsNonRoot: true 34 | runAsUser: 1000 35 | serviceAccountName: alertmanager-main 36 | version: 0.26.0 37 | -------------------------------------------------------------------------------- /k8sConfig/manifests/alertmanager-networkPolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.26.0 10 | name: alertmanager-main 11 | namespace: monitoring 12 | spec: 13 | egress: 14 | - {} 15 | ingress: 16 | - from: 17 | - podSelector: 18 | matchLabels: 19 | app.kubernetes.io/name: prometheus 20 | ports: 21 | - port: 9093 22 | protocol: TCP 23 | - port: 8080 24 | protocol: TCP 25 | - from: 26 | - podSelector: 27 | matchLabels: 28 | app.kubernetes.io/name: alertmanager 29 | ports: 30 | - port: 9094 31 | protocol: TCP 32 | - port: 9094 33 | protocol: UDP 34 | podSelector: 35 | matchLabels: 36 | app.kubernetes.io/component: alert-router 37 | app.kubernetes.io/instance: main 38 | app.kubernetes.io/name: alertmanager 39 | app.kubernetes.io/part-of: kube-prometheus 40 | policyTypes: 41 | - Egress 42 | - Ingress 43 | -------------------------------------------------------------------------------- /k8sConfig/manifests/alertmanager-podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.26.0 10 | name: alertmanager-main 11 | namespace: monitoring 12 | spec: 13 | maxUnavailable: 1 14 | selector: 15 | matchLabels: 16 | app.kubernetes.io/component: alert-router 17 | app.kubernetes.io/instance: main 18 | app.kubernetes.io/name: alertmanager 19 | app.kubernetes.io/part-of: kube-prometheus 20 | -------------------------------------------------------------------------------- /k8sConfig/manifests/alertmanager-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.26.0 10 | name: alertmanager-main 11 | namespace: monitoring 12 | stringData: 13 | alertmanager.yaml: |- 14 | "global": 15 | "resolve_timeout": "5m" 16 | "inhibit_rules": 17 | - "equal": 18 | - "namespace" 19 | - "alertname" 20 | "source_matchers": 21 | - "severity = critical" 22 | "target_matchers": 23 | - "severity =~ warning|info" 24 | - "equal": 25 | - "namespace" 26 | - "alertname" 27 | "source_matchers": 28 | - "severity = warning" 29 | "target_matchers": 30 | - "severity = info" 31 | - "equal": 32 | - "namespace" 33 | "source_matchers": 34 | - "alertname = InfoInhibitor" 35 | "target_matchers": 36 | - "severity = info" 37 | "receivers": 38 | - "name": "Default" 39 | - "name": "Watchdog" 40 | - "name": "Critical" 41 | - "name": "null" 42 | "route": 43 | "group_by": 44 | - "namespace" 45 | "group_interval": "5m" 46 | "group_wait": "30s" 47 | "receiver": "Default" 48 | "repeat_interval": "12h" 49 | "routes": 50 | - "matchers": 51 | - "alertname = Watchdog" 52 | "receiver": "Watchdog" 53 | - "matchers": 54 | - "alertname = InfoInhibitor" 55 | "receiver": "null" 56 | - "matchers": 57 | - "severity = critical" 58 | "receiver": "Critical" 59 | type: Opaque 60 | -------------------------------------------------------------------------------- /k8sConfig/manifests/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.26.0 10 | name: alertmanager-main 11 | namespace: monitoring 12 | spec: 13 | ports: 14 | - name: web 15 | port: 9093 16 | targetPort: web 17 | - name: reloader-web 18 | port: 8080 19 | targetPort: reloader-web 20 | selector: 21 | app.kubernetes.io/component: alert-router 22 | app.kubernetes.io/instance: main 23 | app.kubernetes.io/name: alertmanager 24 | app.kubernetes.io/part-of: kube-prometheus 25 | sessionAffinity: ClientIP 26 | -------------------------------------------------------------------------------- /k8sConfig/manifests/alertmanager-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | automountServiceAccountToken: false 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/component: alert-router 7 | app.kubernetes.io/instance: main 8 | app.kubernetes.io/name: alertmanager 9 | app.kubernetes.io/part-of: kube-prometheus 10 | app.kubernetes.io/version: 0.26.0 11 | name: alertmanager-main 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /k8sConfig/manifests/alertmanager-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.26.0 10 | name: alertmanager-main 11 | namespace: monitoring 12 | spec: 13 | endpoints: 14 | - interval: 30s 15 | port: web 16 | - interval: 30s 17 | port: reloader-web 18 | selector: 19 | matchLabels: 20 | app.kubernetes.io/component: alert-router 21 | app.kubernetes.io/instance: main 22 | app.kubernetes.io/name: alertmanager 23 | app.kubernetes.io/part-of: kube-prometheus 24 | -------------------------------------------------------------------------------- /k8sConfig/manifests/blackboxExporter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: blackbox-exporter 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /k8sConfig/manifests/blackboxExporter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: blackbox-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.24.0 9 | name: blackbox-exporter 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: blackbox-exporter 14 | subjects: 15 | - kind: ServiceAccount 16 | name: blackbox-exporter 17 | namespace: monitoring 18 | -------------------------------------------------------------------------------- /k8sConfig/manifests/blackboxExporter-configuration.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | config.yml: |- 4 | "modules": 5 | "http_2xx": 6 | "http": 7 | "preferred_ip_protocol": "ip4" 8 | "prober": "http" 9 | "http_post_2xx": 10 | "http": 11 | "method": "POST" 12 | "preferred_ip_protocol": "ip4" 13 | "prober": "http" 14 | "irc_banner": 15 | "prober": "tcp" 16 | "tcp": 17 | "preferred_ip_protocol": "ip4" 18 | "query_response": 19 | - "send": "NICK prober" 20 | - "send": "USER prober prober prober :prober" 21 | - "expect": "PING :([^ ]+)" 22 | "send": "PONG ${1}" 23 | - "expect": "^:[^ ]+ 001" 24 | "pop3s_banner": 25 | "prober": "tcp" 26 | "tcp": 27 | "preferred_ip_protocol": "ip4" 28 | "query_response": 29 | - "expect": "^+OK" 30 | "tls": true 31 | "tls_config": 32 | "insecure_skip_verify": false 33 | "ssh_banner": 34 | "prober": "tcp" 35 | "tcp": 36 | "preferred_ip_protocol": "ip4" 37 | "query_response": 38 | - "expect": "^SSH-2.0-" 39 | "tcp_connect": 40 | "prober": "tcp" 41 | "tcp": 42 | "preferred_ip_protocol": "ip4" 43 | kind: ConfigMap 44 | metadata: 45 | labels: 46 | app.kubernetes.io/component: exporter 47 | app.kubernetes.io/name: blackbox-exporter 48 | app.kubernetes.io/part-of: kube-prometheus 49 | app.kubernetes.io/version: 0.24.0 50 | name: blackbox-exporter-configuration 51 | namespace: monitoring 52 | -------------------------------------------------------------------------------- /k8sConfig/manifests/blackboxExporter-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: blackbox-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.24.0 9 | name: blackbox-exporter 10 | namespace: monitoring 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | app.kubernetes.io/component: exporter 16 | app.kubernetes.io/name: blackbox-exporter 17 | app.kubernetes.io/part-of: kube-prometheus 18 | template: 19 | metadata: 20 | annotations: 21 | kubectl.kubernetes.io/default-container: blackbox-exporter 22 | labels: 23 | app.kubernetes.io/component: exporter 24 | app.kubernetes.io/name: blackbox-exporter 25 | app.kubernetes.io/part-of: kube-prometheus 26 | app.kubernetes.io/version: 0.24.0 27 | spec: 28 | automountServiceAccountToken: true 29 | containers: 30 | - args: 31 | - --config.file=/etc/blackbox_exporter/config.yml 32 | - --web.listen-address=:19115 33 | image: quay.io/prometheus/blackbox-exporter:v0.24.0 34 | name: blackbox-exporter 35 | ports: 36 | - containerPort: 19115 37 | name: http 38 | resources: 39 | limits: 40 | cpu: 20m 41 | memory: 40Mi 42 | requests: 43 | cpu: 10m 44 | memory: 20Mi 45 | securityContext: 46 | allowPrivilegeEscalation: false 47 | capabilities: 48 | drop: 49 | - ALL 50 | readOnlyRootFilesystem: true 51 | runAsNonRoot: true 52 | runAsUser: 65534 53 | volumeMounts: 54 | - mountPath: /etc/blackbox_exporter/ 55 | name: config 56 | readOnly: true 57 | - args: 58 | - --webhook-url=http://localhost:19115/-/reload 59 | - --volume-dir=/etc/blackbox_exporter/ 60 | image: jimmidyson/configmap-reload:v0.5.0 61 | name: module-configmap-reloader 62 | resources: 63 | limits: 64 | cpu: 20m 65 | memory: 40Mi 66 | requests: 67 | cpu: 10m 68 | memory: 20Mi 69 | securityContext: 70 | allowPrivilegeEscalation: false 71 | capabilities: 72 | drop: 73 | - ALL 74 | readOnlyRootFilesystem: true 75 | runAsNonRoot: true 76 | runAsUser: 65534 77 | terminationMessagePath: /dev/termination-log 78 | terminationMessagePolicy: FallbackToLogsOnError 79 | volumeMounts: 80 | - mountPath: /etc/blackbox_exporter/ 81 | name: config 82 | readOnly: true 83 | - args: 84 | - --secure-listen-address=:9115 85 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 86 | - --upstream=http://127.0.0.1:19115/ 87 | image: quay.io/brancz/kube-rbac-proxy:v0.14.2 88 | name: kube-rbac-proxy 89 | ports: 90 | - containerPort: 9115 91 | name: https 92 | resources: 93 | limits: 94 | cpu: 20m 95 | memory: 40Mi 96 | requests: 97 | cpu: 10m 98 | memory: 20Mi 99 | securityContext: 100 | allowPrivilegeEscalation: false 101 | capabilities: 102 | drop: 103 | - ALL 104 | readOnlyRootFilesystem: true 105 | runAsGroup: 65532 106 | runAsNonRoot: true 107 | runAsUser: 65532 108 | nodeSelector: 109 | kubernetes.io/os: linux 110 | serviceAccountName: blackbox-exporter 111 | volumes: 112 | - configMap: 113 | name: blackbox-exporter-configuration 114 | name: config 115 | -------------------------------------------------------------------------------- /k8sConfig/manifests/blackboxExporter-networkPolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: blackbox-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.24.0 9 | name: blackbox-exporter 10 | namespace: monitoring 11 | spec: 12 | egress: 13 | - {} 14 | ingress: 15 | - from: 16 | - podSelector: 17 | matchLabels: 18 | app.kubernetes.io/name: prometheus 19 | ports: 20 | - port: 9115 21 | protocol: TCP 22 | - port: 19115 23 | protocol: TCP 24 | podSelector: 25 | matchLabels: 26 | app.kubernetes.io/component: exporter 27 | app.kubernetes.io/name: blackbox-exporter 28 | app.kubernetes.io/part-of: kube-prometheus 29 | policyTypes: 30 | - Egress 31 | - Ingress 32 | -------------------------------------------------------------------------------- /k8sConfig/manifests/blackboxExporter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: blackbox-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.24.0 9 | name: blackbox-exporter 10 | namespace: monitoring 11 | spec: 12 | ports: 13 | - name: https 14 | port: 9115 15 | targetPort: https 16 | - name: probe 17 | port: 19115 18 | targetPort: http 19 | selector: 20 | app.kubernetes.io/component: exporter 21 | app.kubernetes.io/name: blackbox-exporter 22 | app.kubernetes.io/part-of: kube-prometheus 23 | -------------------------------------------------------------------------------- /k8sConfig/manifests/blackboxExporter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | automountServiceAccountToken: false 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/component: exporter 7 | app.kubernetes.io/name: blackbox-exporter 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.24.0 10 | name: blackbox-exporter 11 | namespace: monitoring 12 | -------------------------------------------------------------------------------- /k8sConfig/manifests/blackboxExporter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: blackbox-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.24.0 9 | name: blackbox-exporter 10 | namespace: monitoring 11 | spec: 12 | endpoints: 13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 14 | interval: 30s 15 | path: /metrics 16 | port: https 17 | scheme: https 18 | tlsConfig: 19 | insecureSkipVerify: true 20 | selector: 21 | matchLabels: 22 | app.kubernetes.io/component: exporter 23 | app.kubernetes.io/name: blackbox-exporter 24 | app.kubernetes.io/part-of: kube-prometheus 25 | -------------------------------------------------------------------------------- /k8sConfig/manifests/grafana-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: grafana 6 | app.kubernetes.io/name: grafana 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 9.5.3 9 | name: grafana-config 10 | namespace: monitoring 11 | stringData: 12 | grafana.ini: | 13 | [date_formats] 14 | default_timezone = UTC 15 | [server] 16 | allow_embedding = true 17 | enable_gzip = true 18 | [security] 19 | allow_embedding = true 20 | [plugins] 21 | plugins = boomtheme-panel 22 | enable_alpha = true 23 | [auth.anonymous] 24 | enabled = true 25 | org_name = Main Org. 26 | org_role = Viewer 27 | [log] 28 | level = error 29 | type: Opaque 30 | -------------------------------------------------------------------------------- /k8sConfig/manifests/grafana-dashboardDatasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: grafana 6 | app.kubernetes.io/name: grafana 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 9.5.3 9 | name: grafana-datasources 10 | namespace: monitoring 11 | stringData: 12 | datasources.yaml: |- 13 | { 14 | "apiVersion": 1, 15 | "datasources": [ 16 | { 17 | "access": "proxy", 18 | "editable": false, 19 | "name": "prometheus", 20 | "orgId": 1, 21 | "type": "prometheus", 22 | "url": "http://prometheus-k8s.monitoring.svc:9090", 23 | "version": 1 24 | } 25 | ] 26 | } 27 | type: Opaque 28 | -------------------------------------------------------------------------------- /k8sConfig/manifests/grafana-dashboardSources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | dashboards.yaml: |- 4 | { 5 | "apiVersion": 1, 6 | "providers": [ 7 | { 8 | "folder": "Default", 9 | "folderUid": "", 10 | "name": "0", 11 | "options": { 12 | "path": "/grafana-dashboard-definitions/0" 13 | }, 14 | "orgId": 1, 15 | "type": "file" 16 | } 17 | ] 18 | } 19 | kind: ConfigMap 20 | metadata: 21 | labels: 22 | app.kubernetes.io/component: grafana 23 | app.kubernetes.io/name: grafana 24 | app.kubernetes.io/part-of: kube-prometheus 25 | app.kubernetes.io/version: 9.5.3 26 | name: grafana-dashboards 27 | namespace: monitoring 28 | -------------------------------------------------------------------------------- /k8sConfig/manifests/grafana-networkPolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: grafana 6 | app.kubernetes.io/name: grafana 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 9.5.3 9 | name: grafana 10 | namespace: monitoring 11 | spec: 12 | egress: 13 | - {} 14 | ingress: 15 | - from: 16 | - podSelector: 17 | matchLabels: 18 | app.kubernetes.io/name: prometheus 19 | ports: 20 | - port: 3000 21 | protocol: TCP 22 | podSelector: 23 | matchLabels: 24 | app.kubernetes.io/component: grafana 25 | app.kubernetes.io/name: grafana 26 | app.kubernetes.io/part-of: kube-prometheus 27 | policyTypes: 28 | - Egress 29 | - Ingress 30 | -------------------------------------------------------------------------------- /k8sConfig/manifests/grafana-prometheusRule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: grafana 6 | app.kubernetes.io/name: grafana 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 9.5.3 9 | prometheus: k8s 10 | role: alert-rules 11 | name: grafana-rules 12 | namespace: monitoring 13 | spec: 14 | groups: 15 | - name: GrafanaAlerts 16 | rules: 17 | - alert: GrafanaRequestsFailing 18 | annotations: 19 | message: '{{ $labels.namespace }}/{{ $labels.job }}/{{ $labels.handler }} is experiencing {{ $value | humanize }}% errors' 20 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/grafana/grafanarequestsfailing 21 | expr: | 22 | 100 * sum without (status_code) (namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query", status_code=~"5.."}) 23 | / 24 | sum without (status_code) (namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query"}) 25 | > 50 26 | for: 5m 27 | labels: 28 | severity: warning 29 | - name: grafana_rules 30 | rules: 31 | - expr: | 32 | sum by (namespace, job, handler, status_code) (rate(grafana_http_request_duration_seconds_count[5m])) 33 | record: namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m 34 | -------------------------------------------------------------------------------- /k8sConfig/manifests/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: grafana 6 | app.kubernetes.io/name: grafana 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 9.5.3 9 | name: grafana 10 | namespace: monitoring 11 | spec: 12 | ports: 13 | - name: http 14 | port: 3000 15 | targetPort: http 16 | selector: 17 | app.kubernetes.io/component: grafana 18 | app.kubernetes.io/name: grafana 19 | app.kubernetes.io/part-of: kube-prometheus 20 | -------------------------------------------------------------------------------- /k8sConfig/manifests/grafana-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | automountServiceAccountToken: false 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/component: grafana 7 | app.kubernetes.io/name: grafana 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 9.5.3 10 | name: grafana 11 | namespace: monitoring 12 | -------------------------------------------------------------------------------- /k8sConfig/manifests/grafana-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: grafana 6 | app.kubernetes.io/name: grafana 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 9.5.3 9 | name: grafana 10 | namespace: monitoring 11 | spec: 12 | endpoints: 13 | - interval: 15s 14 | port: http 15 | selector: 16 | matchLabels: 17 | app.kubernetes.io/name: grafana 18 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubePrometheus-prometheusRule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-prometheus 7 | app.kubernetes.io/part-of: kube-prometheus 8 | prometheus: k8s 9 | role: alert-rules 10 | name: kube-prometheus-rules 11 | namespace: monitoring 12 | spec: 13 | groups: 14 | - name: general.rules 15 | rules: 16 | - alert: TargetDown 17 | annotations: 18 | description: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.' 19 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/targetdown 20 | summary: One or more targets are unreachable. 21 | expr: 100 * (count(up == 0) BY (cluster, job, namespace, service) / count(up) BY (cluster, job, namespace, service)) > 10 22 | for: 10m 23 | labels: 24 | severity: warning 25 | - alert: Watchdog 26 | annotations: 27 | description: | 28 | This is an alert meant to ensure that the entire alerting pipeline is functional. 29 | This alert is always firing, therefore it should always be firing in Alertmanager 30 | and always fire against a receiver. There are integrations with various notification 31 | mechanisms that send a notification when this alert is not firing. For example the 32 | "DeadMansSnitch" integration in PagerDuty. 33 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/watchdog 34 | summary: An alert that should always be firing to certify that Alertmanager is working properly. 35 | expr: vector(1) 36 | labels: 37 | severity: none 38 | - alert: InfoInhibitor 39 | annotations: 40 | description: | 41 | This is an alert that is used to inhibit info alerts. 42 | By themselves, the info-level alerts are sometimes very noisy, but they are relevant when combined with 43 | other alerts. 44 | This alert fires whenever there's a severity="info" alert, and stops firing when another alert with a 45 | severity of 'warning' or 'critical' starts firing on the same namespace. 46 | This alert should be routed to a null receiver and configured to inhibit alerts with severity="info". 47 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/infoinhibitor 48 | summary: Info-level alert inhibition. 49 | expr: ALERTS{severity = "info"} == 1 unless on(namespace) ALERTS{alertname != "InfoInhibitor", severity =~ "warning|critical", alertstate="firing"} == 1 50 | labels: 51 | severity: none 52 | - name: node-network 53 | rules: 54 | - alert: NodeNetworkInterfaceFlapping 55 | annotations: 56 | description: Network interface "{{ $labels.device }}" changing its up status often on node-exporter {{ $labels.namespace }}/{{ $labels.pod }} 57 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/nodenetworkinterfaceflapping 58 | summary: Network interface is often changing its status 59 | expr: | 60 | changes(node_network_up{job="node-exporter",device!~"veth.+"}[2m]) > 2 61 | for: 2m 62 | labels: 63 | severity: warning 64 | - name: kube-prometheus-node-recording.rules 65 | rules: 66 | - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[3m])) BY (instance) 67 | record: instance:node_cpu:rate:sum 68 | - expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance) 69 | record: instance:node_network_receive_bytes:rate:sum 70 | - expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance) 71 | record: instance:node_network_transmit_bytes:rate:sum 72 | - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) BY (instance, cpu)) BY (instance) 73 | record: instance:node_cpu:ratio 74 | - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) 75 | record: cluster:node_cpu:sum_rate5m 76 | - expr: cluster:node_cpu:sum_rate5m / count(sum(node_cpu_seconds_total) BY (instance, cpu)) 77 | record: cluster:node_cpu:ratio 78 | - name: kube-prometheus-general.rules 79 | rules: 80 | - expr: count without(instance, pod, node) (up == 1) 81 | record: count:up1 82 | - expr: count without(instance, pod, node) (up == 0) 83 | record: count:up0 84 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubeStateMetrics-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.9.2 9 | name: kube-state-metrics 10 | rules: 11 | - apiGroups: 12 | - "" 13 | resources: 14 | - configmaps 15 | - secrets 16 | - nodes 17 | - pods 18 | - services 19 | - serviceaccounts 20 | - resourcequotas 21 | - replicationcontrollers 22 | - limitranges 23 | - persistentvolumeclaims 24 | - persistentvolumes 25 | - namespaces 26 | - endpoints 27 | verbs: 28 | - list 29 | - watch 30 | - apiGroups: 31 | - apps 32 | resources: 33 | - statefulsets 34 | - daemonsets 35 | - deployments 36 | - replicasets 37 | verbs: 38 | - list 39 | - watch 40 | - apiGroups: 41 | - batch 42 | resources: 43 | - cronjobs 44 | - jobs 45 | verbs: 46 | - list 47 | - watch 48 | - apiGroups: 49 | - autoscaling 50 | resources: 51 | - horizontalpodautoscalers 52 | verbs: 53 | - list 54 | - watch 55 | - apiGroups: 56 | - authentication.k8s.io 57 | resources: 58 | - tokenreviews 59 | verbs: 60 | - create 61 | - apiGroups: 62 | - authorization.k8s.io 63 | resources: 64 | - subjectaccessreviews 65 | verbs: 66 | - create 67 | - apiGroups: 68 | - policy 69 | resources: 70 | - poddisruptionbudgets 71 | verbs: 72 | - list 73 | - watch 74 | - apiGroups: 75 | - certificates.k8s.io 76 | resources: 77 | - certificatesigningrequests 78 | verbs: 79 | - list 80 | - watch 81 | - apiGroups: 82 | - discovery.k8s.io 83 | resources: 84 | - endpointslices 85 | verbs: 86 | - list 87 | - watch 88 | - apiGroups: 89 | - storage.k8s.io 90 | resources: 91 | - storageclasses 92 | - volumeattachments 93 | verbs: 94 | - list 95 | - watch 96 | - apiGroups: 97 | - admissionregistration.k8s.io 98 | resources: 99 | - mutatingwebhookconfigurations 100 | - validatingwebhookconfigurations 101 | verbs: 102 | - list 103 | - watch 104 | - apiGroups: 105 | - networking.k8s.io 106 | resources: 107 | - networkpolicies 108 | - ingressclasses 109 | - ingresses 110 | verbs: 111 | - list 112 | - watch 113 | - apiGroups: 114 | - coordination.k8s.io 115 | resources: 116 | - leases 117 | verbs: 118 | - list 119 | - watch 120 | - apiGroups: 121 | - rbac.authorization.k8s.io 122 | resources: 123 | - clusterrolebindings 124 | - clusterroles 125 | - rolebindings 126 | - roles 127 | verbs: 128 | - list 129 | - watch 130 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubeStateMetrics-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.9.2 9 | name: kube-state-metrics 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: kube-state-metrics 14 | subjects: 15 | - kind: ServiceAccount 16 | name: kube-state-metrics 17 | namespace: monitoring 18 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubeStateMetrics-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.9.2 9 | name: kube-state-metrics 10 | namespace: monitoring 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | app.kubernetes.io/component: exporter 16 | app.kubernetes.io/name: kube-state-metrics 17 | app.kubernetes.io/part-of: kube-prometheus 18 | template: 19 | metadata: 20 | annotations: 21 | kubectl.kubernetes.io/default-container: kube-state-metrics 22 | labels: 23 | app.kubernetes.io/component: exporter 24 | app.kubernetes.io/name: kube-state-metrics 25 | app.kubernetes.io/part-of: kube-prometheus 26 | app.kubernetes.io/version: 2.9.2 27 | spec: 28 | automountServiceAccountToken: true 29 | containers: 30 | - args: 31 | - --host=127.0.0.1 32 | - --port=8081 33 | - --telemetry-host=127.0.0.1 34 | - --telemetry-port=8082 35 | image: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.9.2 36 | name: kube-state-metrics 37 | resources: 38 | limits: 39 | cpu: 100m 40 | memory: 250Mi 41 | requests: 42 | cpu: 10m 43 | memory: 190Mi 44 | securityContext: 45 | allowPrivilegeEscalation: false 46 | capabilities: 47 | drop: 48 | - ALL 49 | readOnlyRootFilesystem: true 50 | runAsNonRoot: true 51 | runAsUser: 65534 52 | seccompProfile: 53 | type: RuntimeDefault 54 | - args: 55 | - --secure-listen-address=:8443 56 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 57 | - --upstream=http://127.0.0.1:8081/ 58 | image: quay.io/brancz/kube-rbac-proxy:v0.14.2 59 | name: kube-rbac-proxy-main 60 | ports: 61 | - containerPort: 8443 62 | name: https-main 63 | resources: 64 | limits: 65 | cpu: 40m 66 | memory: 40Mi 67 | requests: 68 | cpu: 20m 69 | memory: 20Mi 70 | securityContext: 71 | allowPrivilegeEscalation: false 72 | capabilities: 73 | drop: 74 | - ALL 75 | readOnlyRootFilesystem: true 76 | runAsGroup: 65532 77 | runAsNonRoot: true 78 | runAsUser: 65532 79 | - args: 80 | - --secure-listen-address=:9443 81 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 82 | - --upstream=http://127.0.0.1:8082/ 83 | image: quay.io/brancz/kube-rbac-proxy:v0.14.2 84 | name: kube-rbac-proxy-self 85 | ports: 86 | - containerPort: 9443 87 | name: https-self 88 | resources: 89 | limits: 90 | cpu: 20m 91 | memory: 40Mi 92 | requests: 93 | cpu: 10m 94 | memory: 20Mi 95 | securityContext: 96 | allowPrivilegeEscalation: false 97 | capabilities: 98 | drop: 99 | - ALL 100 | readOnlyRootFilesystem: true 101 | runAsGroup: 65532 102 | runAsNonRoot: true 103 | runAsUser: 65532 104 | nodeSelector: 105 | kubernetes.io/os: linux 106 | serviceAccountName: kube-state-metrics 107 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubeStateMetrics-networkPolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.9.2 9 | name: kube-state-metrics 10 | namespace: monitoring 11 | spec: 12 | egress: 13 | - {} 14 | ingress: 15 | - from: 16 | - podSelector: 17 | matchLabels: 18 | app.kubernetes.io/name: prometheus 19 | ports: 20 | - port: 8443 21 | protocol: TCP 22 | - port: 9443 23 | protocol: TCP 24 | podSelector: 25 | matchLabels: 26 | app.kubernetes.io/component: exporter 27 | app.kubernetes.io/name: kube-state-metrics 28 | app.kubernetes.io/part-of: kube-prometheus 29 | policyTypes: 30 | - Egress 31 | - Ingress 32 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubeStateMetrics-prometheusRule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.9.2 9 | prometheus: k8s 10 | role: alert-rules 11 | name: kube-state-metrics-rules 12 | namespace: monitoring 13 | spec: 14 | groups: 15 | - name: kube-state-metrics 16 | rules: 17 | - alert: KubeStateMetricsListErrors 18 | annotations: 19 | description: kube-state-metrics is experiencing errors at an elevated rate in list operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. 20 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricslisterrors 21 | summary: kube-state-metrics is experiencing errors in list operations. 22 | expr: | 23 | (sum(rate(kube_state_metrics_list_total{job="kube-state-metrics",result="error"}[5m])) by (cluster) 24 | / 25 | sum(rate(kube_state_metrics_list_total{job="kube-state-metrics"}[5m])) by (cluster)) 26 | > 0.01 27 | for: 15m 28 | labels: 29 | severity: critical 30 | - alert: KubeStateMetricsWatchErrors 31 | annotations: 32 | description: kube-state-metrics is experiencing errors at an elevated rate in watch operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. 33 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricswatcherrors 34 | summary: kube-state-metrics is experiencing errors in watch operations. 35 | expr: | 36 | (sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics",result="error"}[5m])) by (cluster) 37 | / 38 | sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics"}[5m])) by (cluster)) 39 | > 0.01 40 | for: 15m 41 | labels: 42 | severity: critical 43 | - alert: KubeStateMetricsShardingMismatch 44 | annotations: 45 | description: kube-state-metrics pods are running with different --total-shards configuration, some Kubernetes objects may be exposed multiple times or not exposed at all. 46 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricsshardingmismatch 47 | summary: kube-state-metrics sharding is misconfigured. 48 | expr: | 49 | stdvar (kube_state_metrics_total_shards{job="kube-state-metrics"}) by (cluster) != 0 50 | for: 15m 51 | labels: 52 | severity: critical 53 | - alert: KubeStateMetricsShardsMissing 54 | annotations: 55 | description: kube-state-metrics shards are missing, some Kubernetes objects are not being exposed. 56 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/kube-state-metrics/kubestatemetricsshardsmissing 57 | summary: kube-state-metrics shards are missing. 58 | expr: | 59 | 2^max(kube_state_metrics_total_shards{job="kube-state-metrics"}) by (cluster) - 1 60 | - 61 | sum( 2 ^ max by (cluster, shard_ordinal) (kube_state_metrics_shard_ordinal{job="kube-state-metrics"}) ) by (cluster) 62 | != 0 63 | for: 15m 64 | labels: 65 | severity: critical 66 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubeStateMetrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.9.2 9 | name: kube-state-metrics 10 | namespace: monitoring 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: https-main 15 | port: 8443 16 | targetPort: https-main 17 | - name: https-self 18 | port: 9443 19 | targetPort: https-self 20 | selector: 21 | app.kubernetes.io/component: exporter 22 | app.kubernetes.io/name: kube-state-metrics 23 | app.kubernetes.io/part-of: kube-prometheus 24 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubeStateMetrics-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | automountServiceAccountToken: false 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/component: exporter 7 | app.kubernetes.io/name: kube-state-metrics 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.9.2 10 | name: kube-state-metrics 11 | namespace: monitoring 12 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubeStateMetrics-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.9.2 9 | name: kube-state-metrics 10 | namespace: monitoring 11 | spec: 12 | endpoints: 13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 14 | honorLabels: true 15 | interval: 30s 16 | metricRelabelings: 17 | - action: drop 18 | regex: kube_endpoint_address_not_ready|kube_endpoint_address_available 19 | sourceLabels: 20 | - __name__ 21 | port: https-main 22 | relabelings: 23 | - action: labeldrop 24 | regex: (pod|service|endpoint|namespace) 25 | scheme: https 26 | scrapeTimeout: 30s 27 | tlsConfig: 28 | insecureSkipVerify: true 29 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 30 | interval: 30s 31 | port: https-self 32 | scheme: https 33 | tlsConfig: 34 | insecureSkipVerify: true 35 | jobLabel: app.kubernetes.io/name 36 | selector: 37 | matchLabels: 38 | app.kubernetes.io/component: exporter 39 | app.kubernetes.io/name: kube-state-metrics 40 | app.kubernetes.io/part-of: kube-prometheus 41 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubernetesControlPlane-serviceMonitorCoreDNS.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: coredns 6 | app.kubernetes.io/part-of: kube-prometheus 7 | name: coredns 8 | namespace: monitoring 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | interval: 15s 13 | metricRelabelings: 14 | - action: drop 15 | regex: coredns_cache_misses_total 16 | sourceLabels: 17 | - __name__ 18 | port: metrics 19 | jobLabel: app.kubernetes.io/name 20 | namespaceSelector: 21 | matchNames: 22 | - kube-system 23 | selector: 24 | matchLabels: 25 | k8s-app: kube-dns 26 | -------------------------------------------------------------------------------- /k8sConfig/manifests/kubernetesControlPlane-serviceMonitorKubeScheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-scheduler 6 | app.kubernetes.io/part-of: kube-prometheus 7 | name: kube-scheduler 8 | namespace: monitoring 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | interval: 30s 13 | port: https-metrics 14 | scheme: https 15 | tlsConfig: 16 | insecureSkipVerify: true 17 | jobLabel: app.kubernetes.io/name 18 | namespaceSelector: 19 | matchNames: 20 | - kube-system 21 | selector: 22 | matchLabels: 23 | app.kubernetes.io/name: kube-scheduler 24 | -------------------------------------------------------------------------------- /k8sConfig/manifests/nodeExporter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.6.1 9 | name: node-exporter 10 | rules: 11 | - apiGroups: 12 | - authentication.k8s.io 13 | resources: 14 | - tokenreviews 15 | verbs: 16 | - create 17 | - apiGroups: 18 | - authorization.k8s.io 19 | resources: 20 | - subjectaccessreviews 21 | verbs: 22 | - create 23 | -------------------------------------------------------------------------------- /k8sConfig/manifests/nodeExporter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.6.1 9 | name: node-exporter 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: node-exporter 14 | subjects: 15 | - kind: ServiceAccount 16 | name: node-exporter 17 | namespace: monitoring 18 | -------------------------------------------------------------------------------- /k8sConfig/manifests/nodeExporter-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.6.1 9 | name: node-exporter 10 | namespace: monitoring 11 | spec: 12 | selector: 13 | matchLabels: 14 | app.kubernetes.io/component: exporter 15 | app.kubernetes.io/name: node-exporter 16 | app.kubernetes.io/part-of: kube-prometheus 17 | template: 18 | metadata: 19 | annotations: 20 | kubectl.kubernetes.io/default-container: node-exporter 21 | labels: 22 | app.kubernetes.io/component: exporter 23 | app.kubernetes.io/name: node-exporter 24 | app.kubernetes.io/part-of: kube-prometheus 25 | app.kubernetes.io/version: 1.6.1 26 | spec: 27 | automountServiceAccountToken: true 28 | containers: 29 | - args: 30 | - --web.listen-address=127.0.0.1:9100 31 | - --path.sysfs=/host/sys 32 | - --path.rootfs=/host/root 33 | - --path.udev.data=/host/root/run/udev/data 34 | - --no-collector.wifi 35 | - --no-collector.hwmon 36 | - --no-collector.btrfs 37 | - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/k3s/containerd/.+|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/) 38 | - --collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15})$ 39 | - --collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15})$ 40 | image: quay.io/prometheus/node-exporter:v1.6.1 41 | name: node-exporter 42 | resources: 43 | limits: 44 | cpu: 250m 45 | memory: 180Mi 46 | requests: 47 | cpu: 102m 48 | memory: 180Mi 49 | securityContext: 50 | allowPrivilegeEscalation: false 51 | capabilities: 52 | add: 53 | - SYS_TIME 54 | drop: 55 | - ALL 56 | readOnlyRootFilesystem: true 57 | volumeMounts: 58 | - mountPath: /host/sys 59 | mountPropagation: HostToContainer 60 | name: sys 61 | readOnly: true 62 | - mountPath: /host/root 63 | mountPropagation: HostToContainer 64 | name: root 65 | readOnly: true 66 | - args: 67 | - --secure-listen-address=[$(IP)]:9100 68 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 69 | - --upstream=http://127.0.0.1:9100/ 70 | env: 71 | - name: IP 72 | valueFrom: 73 | fieldRef: 74 | fieldPath: status.podIP 75 | image: quay.io/brancz/kube-rbac-proxy:v0.14.2 76 | name: kube-rbac-proxy 77 | ports: 78 | - containerPort: 9100 79 | hostPort: 9100 80 | name: https 81 | resources: 82 | limits: 83 | cpu: 20m 84 | memory: 40Mi 85 | requests: 86 | cpu: 10m 87 | memory: 20Mi 88 | securityContext: 89 | allowPrivilegeEscalation: false 90 | capabilities: 91 | drop: 92 | - ALL 93 | readOnlyRootFilesystem: true 94 | runAsGroup: 65532 95 | runAsNonRoot: true 96 | runAsUser: 65532 97 | hostNetwork: true 98 | hostPID: true 99 | nodeSelector: 100 | kubernetes.io/os: linux 101 | priorityClassName: system-cluster-critical 102 | securityContext: 103 | runAsNonRoot: true 104 | runAsUser: 65534 105 | serviceAccountName: node-exporter 106 | tolerations: 107 | - operator: Exists 108 | volumes: 109 | - hostPath: 110 | path: /sys 111 | name: sys 112 | - hostPath: 113 | path: / 114 | name: root 115 | updateStrategy: 116 | rollingUpdate: 117 | maxUnavailable: 10% 118 | type: RollingUpdate 119 | -------------------------------------------------------------------------------- /k8sConfig/manifests/nodeExporter-networkPolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.6.1 9 | name: node-exporter 10 | namespace: monitoring 11 | spec: 12 | egress: 13 | - {} 14 | ingress: 15 | - from: 16 | - podSelector: 17 | matchLabels: 18 | app.kubernetes.io/name: prometheus 19 | ports: 20 | - port: 9100 21 | protocol: TCP 22 | podSelector: 23 | matchLabels: 24 | app.kubernetes.io/component: exporter 25 | app.kubernetes.io/name: node-exporter 26 | app.kubernetes.io/part-of: kube-prometheus 27 | policyTypes: 28 | - Egress 29 | - Ingress 30 | -------------------------------------------------------------------------------- /k8sConfig/manifests/nodeExporter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.6.1 9 | name: node-exporter 10 | namespace: monitoring 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: https 15 | port: 9100 16 | targetPort: https 17 | selector: 18 | app.kubernetes.io/component: exporter 19 | app.kubernetes.io/name: node-exporter 20 | app.kubernetes.io/part-of: kube-prometheus 21 | -------------------------------------------------------------------------------- /k8sConfig/manifests/nodeExporter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | automountServiceAccountToken: false 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/component: exporter 7 | app.kubernetes.io/name: node-exporter 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 1.6.1 10 | name: node-exporter 11 | namespace: monitoring 12 | -------------------------------------------------------------------------------- /k8sConfig/manifests/nodeExporter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.6.1 9 | name: node-exporter 10 | namespace: monitoring 11 | spec: 12 | endpoints: 13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 14 | interval: 15s 15 | port: https 16 | relabelings: 17 | - action: replace 18 | regex: (.*) 19 | replacement: $1 20 | sourceLabels: 21 | - __meta_kubernetes_pod_node_name 22 | targetLabel: instance 23 | scheme: https 24 | tlsConfig: 25 | insecureSkipVerify: true 26 | jobLabel: app.kubernetes.io/name 27 | selector: 28 | matchLabels: 29 | app.kubernetes.io/component: exporter 30 | app.kubernetes.io/name: node-exporter 31 | app.kubernetes.io/part-of: kube-prometheus 32 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.46.0 10 | name: prometheus-k8s 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - nodes/metrics 16 | verbs: 17 | - get 18 | - nonResourceURLs: 19 | - /metrics 20 | verbs: 21 | - get 22 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.46.0 10 | name: prometheus-k8s 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: prometheus-k8s 15 | subjects: 16 | - kind: ServiceAccount 17 | name: prometheus-k8s 18 | namespace: monitoring 19 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-networkPolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.46.0 10 | name: prometheus-k8s 11 | namespace: monitoring 12 | spec: 13 | egress: 14 | - {} 15 | ingress: 16 | - from: 17 | - podSelector: 18 | matchLabels: 19 | app.kubernetes.io/name: prometheus 20 | ports: 21 | - port: 9090 22 | protocol: TCP 23 | - port: 8080 24 | protocol: TCP 25 | - from: 26 | - podSelector: 27 | matchLabels: 28 | app.kubernetes.io/name: prometheus-adapter 29 | ports: 30 | - port: 9090 31 | protocol: TCP 32 | - from: 33 | - podSelector: 34 | matchLabels: 35 | app.kubernetes.io/name: grafana 36 | ports: 37 | - port: 9090 38 | protocol: TCP 39 | podSelector: 40 | matchLabels: 41 | app.kubernetes.io/component: prometheus 42 | app.kubernetes.io/instance: k8s 43 | app.kubernetes.io/name: prometheus 44 | app.kubernetes.io/part-of: kube-prometheus 45 | policyTypes: 46 | - Egress 47 | - Ingress 48 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.46.0 10 | name: prometheus-k8s 11 | namespace: monitoring 12 | spec: 13 | minAvailable: 1 14 | selector: 15 | matchLabels: 16 | app.kubernetes.io/component: prometheus 17 | app.kubernetes.io/instance: k8s 18 | app.kubernetes.io/name: prometheus 19 | app.kubernetes.io/part-of: kube-prometheus 20 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: Prometheus 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.46.0 10 | name: k8s 11 | namespace: monitoring 12 | spec: 13 | alerting: 14 | alertmanagers: 15 | - apiVersion: v2 16 | name: alertmanager-main 17 | namespace: monitoring 18 | port: web 19 | enableFeatures: [] 20 | externalLabels: {} 21 | image: quay.io/prometheus/prometheus:v2.46.0 22 | nodeSelector: 23 | kubernetes.io/os: linux 24 | podMetadata: 25 | labels: 26 | app.kubernetes.io/component: prometheus 27 | app.kubernetes.io/instance: k8s 28 | app.kubernetes.io/name: prometheus 29 | app.kubernetes.io/part-of: kube-prometheus 30 | app.kubernetes.io/version: 2.46.0 31 | podMonitorNamespaceSelector: {} 32 | podMonitorSelector: {} 33 | probeNamespaceSelector: {} 34 | probeSelector: {} 35 | replicas: 2 36 | resources: 37 | requests: 38 | memory: 400Mi 39 | ruleNamespaceSelector: {} 40 | ruleSelector: {} 41 | securityContext: 42 | fsGroup: 2000 43 | runAsNonRoot: true 44 | runAsUser: 1000 45 | serviceAccountName: prometheus-k8s 46 | serviceMonitorNamespaceSelector: {} 47 | serviceMonitorSelector: {} 48 | version: 2.46.0 49 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-roleBindingConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.46.0 10 | name: prometheus-k8s-config 11 | namespace: monitoring 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: prometheus-k8s-config 16 | subjects: 17 | - kind: ServiceAccount 18 | name: prometheus-k8s 19 | namespace: monitoring 20 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-roleBindingSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: RoleBinding 5 | metadata: 6 | labels: 7 | app.kubernetes.io/component: prometheus 8 | app.kubernetes.io/instance: k8s 9 | app.kubernetes.io/name: prometheus 10 | app.kubernetes.io/part-of: kube-prometheus 11 | app.kubernetes.io/version: 2.46.0 12 | name: prometheus-k8s 13 | namespace: default 14 | roleRef: 15 | apiGroup: rbac.authorization.k8s.io 16 | kind: Role 17 | name: prometheus-k8s 18 | subjects: 19 | - kind: ServiceAccount 20 | name: prometheus-k8s 21 | namespace: monitoring 22 | - apiVersion: rbac.authorization.k8s.io/v1 23 | kind: RoleBinding 24 | metadata: 25 | labels: 26 | app.kubernetes.io/component: prometheus 27 | app.kubernetes.io/instance: k8s 28 | app.kubernetes.io/name: prometheus 29 | app.kubernetes.io/part-of: kube-prometheus 30 | app.kubernetes.io/version: 2.46.0 31 | name: prometheus-k8s 32 | namespace: kube-system 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: Role 36 | name: prometheus-k8s 37 | subjects: 38 | - kind: ServiceAccount 39 | name: prometheus-k8s 40 | namespace: monitoring 41 | - apiVersion: rbac.authorization.k8s.io/v1 42 | kind: RoleBinding 43 | metadata: 44 | labels: 45 | app.kubernetes.io/component: prometheus 46 | app.kubernetes.io/instance: k8s 47 | app.kubernetes.io/name: prometheus 48 | app.kubernetes.io/part-of: kube-prometheus 49 | app.kubernetes.io/version: 2.46.0 50 | name: prometheus-k8s 51 | namespace: monitoring 52 | roleRef: 53 | apiGroup: rbac.authorization.k8s.io 54 | kind: Role 55 | name: prometheus-k8s 56 | subjects: 57 | - kind: ServiceAccount 58 | name: prometheus-k8s 59 | namespace: monitoring 60 | kind: RoleBindingList 61 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-roleConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.46.0 10 | name: prometheus-k8s-config 11 | namespace: monitoring 12 | rules: 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - configmaps 17 | verbs: 18 | - get 19 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-roleSpecificNamespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: Role 5 | metadata: 6 | labels: 7 | app.kubernetes.io/component: prometheus 8 | app.kubernetes.io/instance: k8s 9 | app.kubernetes.io/name: prometheus 10 | app.kubernetes.io/part-of: kube-prometheus 11 | app.kubernetes.io/version: 2.46.0 12 | name: prometheus-k8s 13 | namespace: default 14 | rules: 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - services 19 | - endpoints 20 | - pods 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | - apiGroups: 26 | - extensions 27 | resources: 28 | - ingresses 29 | verbs: 30 | - get 31 | - list 32 | - watch 33 | - apiGroups: 34 | - networking.k8s.io 35 | resources: 36 | - ingresses 37 | verbs: 38 | - get 39 | - list 40 | - watch 41 | - apiVersion: rbac.authorization.k8s.io/v1 42 | kind: Role 43 | metadata: 44 | labels: 45 | app.kubernetes.io/component: prometheus 46 | app.kubernetes.io/instance: k8s 47 | app.kubernetes.io/name: prometheus 48 | app.kubernetes.io/part-of: kube-prometheus 49 | app.kubernetes.io/version: 2.46.0 50 | name: prometheus-k8s 51 | namespace: kube-system 52 | rules: 53 | - apiGroups: 54 | - "" 55 | resources: 56 | - services 57 | - endpoints 58 | - pods 59 | verbs: 60 | - get 61 | - list 62 | - watch 63 | - apiGroups: 64 | - extensions 65 | resources: 66 | - ingresses 67 | verbs: 68 | - get 69 | - list 70 | - watch 71 | - apiGroups: 72 | - networking.k8s.io 73 | resources: 74 | - ingresses 75 | verbs: 76 | - get 77 | - list 78 | - watch 79 | - apiVersion: rbac.authorization.k8s.io/v1 80 | kind: Role 81 | metadata: 82 | labels: 83 | app.kubernetes.io/component: prometheus 84 | app.kubernetes.io/instance: k8s 85 | app.kubernetes.io/name: prometheus 86 | app.kubernetes.io/part-of: kube-prometheus 87 | app.kubernetes.io/version: 2.46.0 88 | name: prometheus-k8s 89 | namespace: monitoring 90 | rules: 91 | - apiGroups: 92 | - "" 93 | resources: 94 | - services 95 | - endpoints 96 | - pods 97 | verbs: 98 | - get 99 | - list 100 | - watch 101 | - apiGroups: 102 | - extensions 103 | resources: 104 | - ingresses 105 | verbs: 106 | - get 107 | - list 108 | - watch 109 | - apiGroups: 110 | - networking.k8s.io 111 | resources: 112 | - ingresses 113 | verbs: 114 | - get 115 | - list 116 | - watch 117 | kind: RoleList 118 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.46.0 10 | name: prometheus-k8s 11 | namespace: monitoring 12 | spec: 13 | ports: 14 | - name: web 15 | port: 9090 16 | targetPort: web 17 | - name: reloader-web 18 | port: 8080 19 | targetPort: reloader-web 20 | selector: 21 | app.kubernetes.io/component: prometheus 22 | app.kubernetes.io/instance: k8s 23 | app.kubernetes.io/name: prometheus 24 | app.kubernetes.io/part-of: kube-prometheus 25 | sessionAffinity: ClientIP 26 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | automountServiceAccountToken: true 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/component: prometheus 7 | app.kubernetes.io/instance: k8s 8 | app.kubernetes.io/name: prometheus 9 | app.kubernetes.io/part-of: kube-prometheus 10 | app.kubernetes.io/version: 2.46.0 11 | name: prometheus-k8s 12 | namespace: monitoring 13 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheus-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.46.0 10 | name: prometheus-k8s 11 | namespace: monitoring 12 | spec: 13 | endpoints: 14 | - interval: 30s 15 | port: web 16 | - interval: 30s 17 | port: reloader-web 18 | selector: 19 | matchLabels: 20 | app.kubernetes.io/component: prometheus 21 | app.kubernetes.io/instance: k8s 22 | app.kubernetes.io/name: prometheus 23 | app.kubernetes.io/part-of: kube-prometheus 24 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-apiService.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: v1beta1.metrics.k8s.io 10 | spec: 11 | group: metrics.k8s.io 12 | groupPriorityMinimum: 100 13 | insecureSkipTLSVerify: true 14 | service: 15 | name: prometheus-adapter 16 | namespace: monitoring 17 | version: v1beta1 18 | versionPriority: 100 19 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: prometheus-adapter 10 | rules: 11 | - apiGroups: 12 | - "" 13 | resources: 14 | - nodes 15 | - namespaces 16 | - pods 17 | - services 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 10 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 11 | rbac.authorization.k8s.io/aggregate-to-view: "true" 12 | name: system:aggregated-metrics-reader 13 | rules: 14 | - apiGroups: 15 | - metrics.k8s.io 16 | resources: 17 | - pods 18 | - nodes 19 | verbs: 20 | - get 21 | - list 22 | - watch 23 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: prometheus-adapter 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: prometheus-adapter 14 | subjects: 15 | - kind: ServiceAccount 16 | name: prometheus-adapter 17 | namespace: monitoring 18 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-clusterRoleBindingDelegator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: resource-metrics:system:auth-delegator 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: system:auth-delegator 14 | subjects: 15 | - kind: ServiceAccount 16 | name: prometheus-adapter 17 | namespace: monitoring 18 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-clusterRoleServerResources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: resource-metrics-server-resources 10 | rules: 11 | - apiGroups: 12 | - metrics.k8s.io 13 | resources: 14 | - '*' 15 | verbs: 16 | - '*' 17 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-configMap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | config.yaml: |- 4 | "resourceRules": 5 | "cpu": 6 | "containerLabel": "container" 7 | "containerQuery": | 8 | sum by (<<.GroupBy>>) ( 9 | irate ( 10 | container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="",pod!=""}[120s] 11 | ) 12 | ) 13 | "nodeQuery": | 14 | sum by (<<.GroupBy>>) ( 15 | 1 - irate( 16 | node_cpu_seconds_total{mode="idle"}[60s] 17 | ) 18 | * on(namespace, pod) group_left(node) ( 19 | node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>} 20 | ) 21 | ) 22 | or sum by (<<.GroupBy>>) ( 23 | 1 - irate( 24 | windows_cpu_time_total{mode="idle", job="windows-exporter",<<.LabelMatchers>>}[4m] 25 | ) 26 | ) 27 | "resources": 28 | "overrides": 29 | "namespace": 30 | "resource": "namespace" 31 | "node": 32 | "resource": "node" 33 | "pod": 34 | "resource": "pod" 35 | "memory": 36 | "containerLabel": "container" 37 | "containerQuery": | 38 | sum by (<<.GroupBy>>) ( 39 | container_memory_working_set_bytes{<<.LabelMatchers>>,container!="",pod!=""} 40 | ) 41 | "nodeQuery": | 42 | sum by (<<.GroupBy>>) ( 43 | node_memory_MemTotal_bytes{job="node-exporter",<<.LabelMatchers>>} 44 | - 45 | node_memory_MemAvailable_bytes{job="node-exporter",<<.LabelMatchers>>} 46 | ) 47 | or sum by (<<.GroupBy>>) ( 48 | windows_cs_physical_memory_bytes{job="windows-exporter",<<.LabelMatchers>>} 49 | - 50 | windows_memory_available_bytes{job="windows-exporter",<<.LabelMatchers>>} 51 | ) 52 | "resources": 53 | "overrides": 54 | "instance": 55 | "resource": "node" 56 | "namespace": 57 | "resource": "namespace" 58 | "pod": 59 | "resource": "pod" 60 | "window": "5m" 61 | kind: ConfigMap 62 | metadata: 63 | labels: 64 | app.kubernetes.io/component: metrics-adapter 65 | app.kubernetes.io/name: prometheus-adapter 66 | app.kubernetes.io/part-of: kube-prometheus 67 | app.kubernetes.io/version: 0.11.1 68 | name: adapter-config 69 | namespace: monitoring 70 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: prometheus-adapter 10 | namespace: monitoring 11 | spec: 12 | replicas: 2 13 | selector: 14 | matchLabels: 15 | app.kubernetes.io/component: metrics-adapter 16 | app.kubernetes.io/name: prometheus-adapter 17 | app.kubernetes.io/part-of: kube-prometheus 18 | strategy: 19 | rollingUpdate: 20 | maxSurge: 1 21 | maxUnavailable: 1 22 | template: 23 | metadata: 24 | annotations: 25 | checksum.config/md5: 3b1ebf7df0232d1675896f67b66373db 26 | labels: 27 | app.kubernetes.io/component: metrics-adapter 28 | app.kubernetes.io/name: prometheus-adapter 29 | app.kubernetes.io/part-of: kube-prometheus 30 | app.kubernetes.io/version: 0.11.1 31 | spec: 32 | automountServiceAccountToken: true 33 | containers: 34 | - args: 35 | - --cert-dir=/var/run/serving-cert 36 | - --config=/etc/adapter/config.yaml 37 | - --metrics-relist-interval=1m 38 | - --prometheus-url=http://prometheus-k8s.monitoring.svc:9090/ 39 | - --secure-port=6443 40 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA 41 | image: registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.11.1 42 | livenessProbe: 43 | failureThreshold: 5 44 | httpGet: 45 | path: /livez 46 | port: https 47 | scheme: HTTPS 48 | periodSeconds: 5 49 | name: prometheus-adapter 50 | ports: 51 | - containerPort: 6443 52 | name: https 53 | readinessProbe: 54 | failureThreshold: 5 55 | httpGet: 56 | path: /readyz 57 | port: https 58 | scheme: HTTPS 59 | periodSeconds: 5 60 | resources: 61 | limits: 62 | cpu: 250m 63 | memory: 180Mi 64 | requests: 65 | cpu: 102m 66 | memory: 180Mi 67 | securityContext: 68 | allowPrivilegeEscalation: false 69 | capabilities: 70 | drop: 71 | - ALL 72 | readOnlyRootFilesystem: true 73 | startupProbe: 74 | failureThreshold: 18 75 | httpGet: 76 | path: /livez 77 | port: https 78 | scheme: HTTPS 79 | periodSeconds: 10 80 | volumeMounts: 81 | - mountPath: /tmp 82 | name: tmpfs 83 | readOnly: false 84 | - mountPath: /var/run/serving-cert 85 | name: volume-serving-cert 86 | readOnly: false 87 | - mountPath: /etc/adapter 88 | name: config 89 | readOnly: false 90 | nodeSelector: 91 | kubernetes.io/os: linux 92 | serviceAccountName: prometheus-adapter 93 | volumes: 94 | - emptyDir: {} 95 | name: tmpfs 96 | - emptyDir: {} 97 | name: volume-serving-cert 98 | - configMap: 99 | name: adapter-config 100 | name: config 101 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-networkPolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: prometheus-adapter 10 | namespace: monitoring 11 | spec: 12 | egress: 13 | - {} 14 | ingress: 15 | - {} 16 | podSelector: 17 | matchLabels: 18 | app.kubernetes.io/component: metrics-adapter 19 | app.kubernetes.io/name: prometheus-adapter 20 | app.kubernetes.io/part-of: kube-prometheus 21 | policyTypes: 22 | - Egress 23 | - Ingress 24 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: prometheus-adapter 10 | namespace: monitoring 11 | spec: 12 | minAvailable: 1 13 | selector: 14 | matchLabels: 15 | app.kubernetes.io/component: metrics-adapter 16 | app.kubernetes.io/name: prometheus-adapter 17 | app.kubernetes.io/part-of: kube-prometheus 18 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-roleBindingAuthReader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: resource-metrics-auth-reader 10 | namespace: kube-system 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: Role 14 | name: extension-apiserver-authentication-reader 15 | subjects: 16 | - kind: ServiceAccount 17 | name: prometheus-adapter 18 | namespace: monitoring 19 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: prometheus-adapter 10 | namespace: monitoring 11 | spec: 12 | ports: 13 | - name: https 14 | port: 443 15 | targetPort: 6443 16 | selector: 17 | app.kubernetes.io/component: metrics-adapter 18 | app.kubernetes.io/name: prometheus-adapter 19 | app.kubernetes.io/part-of: kube-prometheus 20 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | automountServiceAccountToken: false 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/component: metrics-adapter 7 | app.kubernetes.io/name: prometheus-adapter 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.11.1 10 | name: prometheus-adapter 11 | namespace: monitoring 12 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusAdapter-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: metrics-adapter 6 | app.kubernetes.io/name: prometheus-adapter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.11.1 9 | name: prometheus-adapter 10 | namespace: monitoring 11 | spec: 12 | endpoints: 13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 14 | interval: 30s 15 | metricRelabelings: 16 | - action: drop 17 | regex: (apiserver_client_certificate_.*|apiserver_envelope_.*|apiserver_flowcontrol_.*|apiserver_storage_.*|apiserver_webhooks_.*|workqueue_.*) 18 | sourceLabels: 19 | - __name__ 20 | port: https 21 | scheme: https 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | selector: 25 | matchLabels: 26 | app.kubernetes.io/component: metrics-adapter 27 | app.kubernetes.io/name: prometheus-adapter 28 | app.kubernetes.io/part-of: kube-prometheus 29 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusOperator-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.67.1 9 | name: prometheus-operator 10 | rules: 11 | - apiGroups: 12 | - monitoring.coreos.com 13 | resources: 14 | - alertmanagers 15 | - alertmanagers/finalizers 16 | - alertmanagers/status 17 | - alertmanagerconfigs 18 | - prometheuses 19 | - prometheuses/finalizers 20 | - prometheuses/status 21 | - prometheusagents 22 | - prometheusagents/finalizers 23 | - prometheusagents/status 24 | - thanosrulers 25 | - thanosrulers/finalizers 26 | - thanosrulers/status 27 | - scrapeconfigs 28 | - servicemonitors 29 | - podmonitors 30 | - probes 31 | - prometheusrules 32 | verbs: 33 | - '*' 34 | - apiGroups: 35 | - apps 36 | resources: 37 | - statefulsets 38 | verbs: 39 | - '*' 40 | - apiGroups: 41 | - "" 42 | resources: 43 | - configmaps 44 | - secrets 45 | verbs: 46 | - '*' 47 | - apiGroups: 48 | - "" 49 | resources: 50 | - pods 51 | verbs: 52 | - list 53 | - delete 54 | - apiGroups: 55 | - "" 56 | resources: 57 | - services 58 | - services/finalizers 59 | - endpoints 60 | verbs: 61 | - get 62 | - create 63 | - update 64 | - delete 65 | - apiGroups: 66 | - "" 67 | resources: 68 | - nodes 69 | verbs: 70 | - list 71 | - watch 72 | - apiGroups: 73 | - "" 74 | resources: 75 | - namespaces 76 | verbs: 77 | - get 78 | - list 79 | - watch 80 | - apiGroups: 81 | - networking.k8s.io 82 | resources: 83 | - ingresses 84 | verbs: 85 | - get 86 | - list 87 | - watch 88 | - apiGroups: 89 | - authentication.k8s.io 90 | resources: 91 | - tokenreviews 92 | verbs: 93 | - create 94 | - apiGroups: 95 | - authorization.k8s.io 96 | resources: 97 | - subjectaccessreviews 98 | verbs: 99 | - create 100 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusOperator-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.67.1 9 | name: prometheus-operator 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: prometheus-operator 14 | subjects: 15 | - kind: ServiceAccount 16 | name: prometheus-operator 17 | namespace: monitoring 18 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusOperator-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.67.1 9 | name: prometheus-operator 10 | namespace: monitoring 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | app.kubernetes.io/component: controller 16 | app.kubernetes.io/name: prometheus-operator 17 | app.kubernetes.io/part-of: kube-prometheus 18 | template: 19 | metadata: 20 | annotations: 21 | kubectl.kubernetes.io/default-container: prometheus-operator 22 | labels: 23 | app.kubernetes.io/component: controller 24 | app.kubernetes.io/name: prometheus-operator 25 | app.kubernetes.io/part-of: kube-prometheus 26 | app.kubernetes.io/version: 0.67.1 27 | spec: 28 | automountServiceAccountToken: true 29 | containers: 30 | - args: 31 | - --kubelet-service=kube-system/kubelet 32 | - --prometheus-config-reloader=quay.io/prometheus-operator/prometheus-config-reloader:v0.67.1 33 | image: quay.io/prometheus-operator/prometheus-operator:v0.67.1 34 | name: prometheus-operator 35 | ports: 36 | - containerPort: 8080 37 | name: http 38 | resources: 39 | limits: 40 | cpu: 200m 41 | memory: 200Mi 42 | requests: 43 | cpu: 100m 44 | memory: 100Mi 45 | securityContext: 46 | allowPrivilegeEscalation: false 47 | capabilities: 48 | drop: 49 | - ALL 50 | readOnlyRootFilesystem: true 51 | - args: 52 | - --secure-listen-address=:8443 53 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 54 | - --upstream=http://127.0.0.1:8080/ 55 | image: quay.io/brancz/kube-rbac-proxy:v0.14.2 56 | name: kube-rbac-proxy 57 | ports: 58 | - containerPort: 8443 59 | name: https 60 | resources: 61 | limits: 62 | cpu: 20m 63 | memory: 40Mi 64 | requests: 65 | cpu: 10m 66 | memory: 20Mi 67 | securityContext: 68 | allowPrivilegeEscalation: false 69 | capabilities: 70 | drop: 71 | - ALL 72 | readOnlyRootFilesystem: true 73 | runAsGroup: 65532 74 | runAsNonRoot: true 75 | runAsUser: 65532 76 | nodeSelector: 77 | kubernetes.io/os: linux 78 | securityContext: 79 | runAsNonRoot: true 80 | runAsUser: 65534 81 | seccompProfile: 82 | type: RuntimeDefault 83 | serviceAccountName: prometheus-operator 84 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusOperator-networkPolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.67.1 9 | name: prometheus-operator 10 | namespace: monitoring 11 | spec: 12 | egress: 13 | - {} 14 | ingress: 15 | - from: 16 | - podSelector: 17 | matchLabels: 18 | app.kubernetes.io/name: prometheus 19 | ports: 20 | - port: 8443 21 | protocol: TCP 22 | podSelector: 23 | matchLabels: 24 | app.kubernetes.io/component: controller 25 | app.kubernetes.io/name: prometheus-operator 26 | app.kubernetes.io/part-of: kube-prometheus 27 | policyTypes: 28 | - Egress 29 | - Ingress 30 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusOperator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.67.1 9 | name: prometheus-operator 10 | namespace: monitoring 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: https 15 | port: 8443 16 | targetPort: https 17 | selector: 18 | app.kubernetes.io/component: controller 19 | app.kubernetes.io/name: prometheus-operator 20 | app.kubernetes.io/part-of: kube-prometheus 21 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusOperator-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | automountServiceAccountToken: false 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/component: controller 7 | app.kubernetes.io/name: prometheus-operator 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.67.1 10 | name: prometheus-operator 11 | namespace: monitoring 12 | -------------------------------------------------------------------------------- /k8sConfig/manifests/prometheusOperator-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.67.1 9 | name: prometheus-operator 10 | namespace: monitoring 11 | spec: 12 | endpoints: 13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 14 | honorLabels: true 15 | port: https 16 | scheme: https 17 | tlsConfig: 18 | insecureSkipVerify: true 19 | selector: 20 | matchLabels: 21 | app.kubernetes.io/component: controller 22 | app.kubernetes.io/name: prometheus-operator 23 | app.kubernetes.io/part-of: kube-prometheus 24 | app.kubernetes.io/version: 0.67.1 25 | -------------------------------------------------------------------------------- /k8sConfig/manifests/setup/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /morpheus/docker-compose-morpheus.yaml: -------------------------------------------------------------------------------- 1 | # version: '3.9' 2 | 3 | services: 4 | # The UI/Backend 5 | morpheus: 6 | build: 7 | context: .. 8 | dockerfile: morpheus/dockerfile.dev 9 | container_name: morpheus 10 | restart: unless-stopped 11 | environment: 12 | - MODE=browser 13 | - STATUS=development 14 | volumes: 15 | # Opens the docker sock and creates named volumes for persistent data that can be shared between containers 16 | - /var/run/docker.sock:/var/run/docker.sock 17 | - morpheus-prometheus-files:/prometheus 18 | - morpheus-grafana-files:/grafana 19 | - morpheus-initsql:/postgres 20 | # ports: 21 | # - 3001:3001 22 | 23 | cadvisor: 24 | # Must use cadvisor v0.47.1 instead of latest, latest does not work for macOS 25 | # The morpheus image uploaded is on gcr.io/cadvisor/cadvisor:v0.47.1 26 | image: gcr.io/cadvisor/cadvisor:v0.49.1 27 | container_name: morpheus-cadvisor 28 | restart: unless-stopped 29 | expose: 30 | - 8080 31 | ports: 32 | - 50000:8080 33 | volumes: 34 | - /var/run:/var/run:ro 35 | - /sys:/sys:ro 36 | - /var/lib/docker/:/var/lib/docker:ro 37 | - /var/run/docker.sock:/var/run/docker.sock:ro 38 | - /etc/machine-id:/etc/machine-id:ro 39 | - /var/lib/dbus/machine-id:/var/lib/dbus/machine-id:ro 40 | - /:/rootfs:ro 41 | - /dev/disk/:/dev/disk:ro 42 | privileged: true 43 | devices: 44 | - /dev/kmsg 45 | 46 | # Collects additional metrics that are then sent to prometheus 47 | node-exporter: 48 | image: prom/node-exporter:latest 49 | container_name: morpheus-node-exporter 50 | restart: unless-stopped 51 | expose: 52 | - 9100 53 | ports: 54 | - 50001:9100 55 | volumes: 56 | #system info for linux machines 57 | - /proc:/host/proc:ro 58 | # also for linux 59 | - /sys:/host/sys:ro 60 | #root 61 | - /:/rootfs:ro 62 | command: 63 | - --path.procfs=/host/proc 64 | - --path.rootfs=/rootfs 65 | - --path.sysfs=/host/sys 66 | - --collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/) 67 | - --log.level=error 68 | 69 | # Collects metrics and makes them available at localhost:9090 70 | prometheus: 71 | image: prom/prometheus:latest 72 | container_name: morpheus-prometheus 73 | restart: unless-stopped 74 | expose: 75 | - 9090 76 | ports: 77 | - 50002:9090 78 | volumes: 79 | - morpheus-prometheus-files:/etc/prometheus 80 | - morpheus-prometheus-data:/prometheus 81 | # Tells prometheus to use the config file from ./imageConfigs/prometheus 82 | # Dockerfile handles this if you're using the uploaded images 83 | command: 84 | - --config.file=/etc/prometheus/prometheus.yml 85 | - --log.level=error 86 | depends_on: 87 | - node-exporter 88 | - cadvisor 89 | - morpheus 90 | 91 | # Displays metrics gathered from containers 92 | grafana: 93 | image: grafana/grafana:latest 94 | container_name: morpheus-grafana 95 | restart: unless-stopped 96 | expose: 97 | - 3000 98 | ports: 99 | - 50003:3000 100 | volumes: 101 | - morpheus-grafana-files:/etc/grafana 102 | environment: 103 | GF_PATHS_CONFIG: /etc/grafana/grafana.ini 104 | depends_on: 105 | - prometheus 106 | 107 | # Store metrics gathered from containers at localhost:5432 108 | postgres: 109 | image: postgres:16.4-alpine3.20 110 | container_name: morpheus-postgres 111 | restart: unless-stopped 112 | expose: 113 | - 5432 114 | ports: 115 | - 50005:5432 116 | volumes: 117 | - postgres_volume:/var/lib/postgresql/data 118 | - morpheus-initsql:/docker-entrypoint-initdb.d/ 119 | environment: 120 | - POSTGRES_PASSWORD=admin 121 | - POSTGRES_USER=admin 122 | - POSTGRES_DB=morpheus 123 | healthcheck: 124 | test: ["CMD-SHELL", "pg_isready -U admin -d morpheus"] 125 | interval: 5s 126 | timeout: 5s 127 | retries: 5 128 | depends_on: 129 | - morpheus 130 | 131 | prometheus-to-postgres: 132 | build: 133 | context: ./scripts 134 | dockerfile: dockerfile-p2p.yaml 135 | depends_on: 136 | postgres: 137 | condition: service_healthy 138 | environment: 139 | - PROMETHEUS_URL=http://prometheus:9090 140 | - POSTGRES_HOST=postgres 141 | - POSTGRES_PORT=5432 142 | - POSTGRES_DB=morpheus 143 | - POSTGRES_USER=admin 144 | - POSTGRES_PASSWORD=admin 145 | - LOG_LEVEL=INFO 146 | logging: 147 | driver: "json-file" 148 | options: 149 | max-size: "10m" 150 | max-file: "3" 151 | tag: "{{.Name}}" 152 | 153 | volumes: 154 | morpheus-prometheus-data: 155 | morpheus-prometheus-files: 156 | morpheus-grafana-files: 157 | morpheus-initsql: 158 | postgres_volume: 159 | -------------------------------------------------------------------------------- /morpheus/dockerfile.dev: -------------------------------------------------------------------------------- 1 | 2 | FROM --platform=$BUILDPLATFORM node:18.12-alpine3.16 3 | 4 | # copy sql init file to directory for postgres initialization 5 | COPY imageConfigs/postgres postgres 6 | 7 | # Creates and copies files to folders that docker-compose will use create named volumes from 8 | COPY imageConfigs/prometheus prometheus 9 | COPY imageConfigs/grafana grafana 10 | 11 | # Starts the application 12 | CMD ["tail", "-f", "/dev/null"] -------------------------------------------------------------------------------- /morpheus/scripts/dockerfile-p2p.yaml: -------------------------------------------------------------------------------- 1 | FROM python:3.9-slim 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt . 6 | RUN pip install --no-cache-dir -r requirements.txt 7 | 8 | COPY prometheus_to_postgres.py . 9 | 10 | CMD ["python", "-u", "prometheus_to_postgres.py"] 11 | -------------------------------------------------------------------------------- /morpheus/scripts/requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | psycopg2-binary -------------------------------------------------------------------------------- /nextui/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "next/core-web-vitals" 3 | } 4 | -------------------------------------------------------------------------------- /nextui/.gitignore: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files. 2 | 3 | # dependencies 4 | /node_modules 5 | /.pnp 6 | .pnp.js 7 | .yarn/install-state.gz 8 | 9 | # testing 10 | /coverage 11 | 12 | # next.js 13 | /.next/ 14 | /out/ 15 | 16 | # production 17 | /build 18 | 19 | # misc 20 | .DS_Store 21 | *.pem 22 | 23 | # debug 24 | npm-debug.log* 25 | yarn-debug.log* 26 | yarn-error.log* 27 | 28 | # local env files 29 | .env*.local 30 | 31 | # vercel 32 | .vercel 33 | 34 | # typescript 35 | *.tsbuildinfo 36 | next-env.d.ts 37 | -------------------------------------------------------------------------------- /nextui/README.md: -------------------------------------------------------------------------------- 1 | This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). 2 | 3 | ## Getting Started 4 | 5 | First, run the development server: 6 | 7 | ```bash 8 | npm run dev 9 | # or 10 | yarn dev 11 | # or 12 | pnpm dev 13 | # or 14 | bun dev 15 | ``` 16 | 17 | Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. 18 | 19 | You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file. 20 | 21 | This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. 22 | 23 | ## Learn More 24 | 25 | To learn more about Next.js, take a look at the following resources: 26 | 27 | - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. 28 | - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. 29 | 30 | You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! 31 | 32 | ## Deploy on Vercel 33 | 34 | The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. 35 | 36 | Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details. 37 | -------------------------------------------------------------------------------- /nextui/__tests__/Header.test.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { render, screen, fireEvent, waitFor, within, act } from '@testing-library/react'; 3 | import '@testing-library/jest-dom'; 4 | import { ThemeProvider, createTheme } from '@mui/material/styles'; 5 | import { CssBaseline } from '@mui/material'; 6 | import { UserProvider } from '@auth0/nextjs-auth0/client'; 7 | import Header from '../app/components/Header/Header'; 8 | import Router from 'next/router'; 9 | import mockRouter from 'next-router-mock'; 10 | 11 | jest.mock('next/router', () => require('next-router-mock')); 12 | 13 | const theme = createTheme(); 14 | 15 | const renderWithProviders = (component: React.ReactNode) => { 16 | return render( 17 | 18 | 19 | 20 | {component} 21 | 22 | 23 | ); 24 | }; 25 | 26 | describe('Header component', () => { 27 | beforeEach(() => { 28 | mockRouter.setCurrentUrl('/'); 29 | }); 30 | 31 | it('should render the Header component correctly', async () => { 32 | await act(async () => { 33 | renderWithProviders(
); 34 | }); 35 | 36 | const morpheusLogo = screen.getByText(/Morpheus/i); 37 | expect(morpheusLogo).toBeInTheDocument(); 38 | 39 | const settingsIcon = screen.getByTestId('settings-icon'); 40 | expect(settingsIcon).toBeInTheDocument(); 41 | }); 42 | 43 | it('should open and close the drawer when menu icon is clicked', async () => { 44 | await act(async () => { 45 | renderWithProviders(
); 46 | }); 47 | 48 | const menuButton = screen.getByAltText('Menu Icon'); 49 | await act(async () => { 50 | fireEvent.click(menuButton); 51 | }); 52 | 53 | const drawer = await screen.findByTestId('sidebar-drawer'); 54 | expect(drawer).toBeInTheDocument(); 55 | 56 | const closeButton = within(drawer).getByTestId('close-drawer-button'); 57 | await act(async () => { 58 | fireEvent.click(closeButton); 59 | }); 60 | 61 | await waitFor(() => { 62 | expect(screen.queryByTestId('sidebar-drawer')).toBeVisible(); 63 | }, { timeout: 3000 }); 64 | }); 65 | 66 | it('should toggle Docker folder when clicked', async () => { 67 | await act(async () => { 68 | renderWithProviders(
); 69 | }); 70 | 71 | const menuButton = screen.getByAltText('Menu Icon'); 72 | await act(async () => { 73 | fireEvent.click(menuButton); 74 | }); 75 | 76 | const dockerButton = await screen.findByText('Docker'); 77 | await act(async () => { 78 | fireEvent.click(dockerButton); 79 | }); 80 | 81 | expect(await screen.findByText('Docker Controller')).toBeInTheDocument(); 82 | 83 | await act(async () => { 84 | fireEvent.click(dockerButton); 85 | }); 86 | 87 | await waitFor(() => { 88 | expect(screen.queryByText('Docker Controller')).not.toBeInTheDocument(); 89 | }); 90 | }); 91 | 92 | it('should toggle Kubernetes folder when clicked', async () => { 93 | await act(async () => { 94 | renderWithProviders(
); 95 | }); 96 | 97 | const kubernetesButton = await screen.findByText('Kubernetes'); 98 | await act(async () => { 99 | fireEvent.click(kubernetesButton); 100 | }); 101 | 102 | expect(await screen.findByText('Cluster Visualizer')).toBeInTheDocument(); 103 | 104 | await act(async () => { 105 | fireEvent.click(kubernetesButton); 106 | }); 107 | 108 | await waitFor(() => { 109 | expect(screen.queryByText('Cluster Visualizer')).not.toBeInTheDocument(); 110 | }); 111 | }); 112 | 113 | it('should navigate to correct paths when links are clicked and not show 404', async () => { 114 | mockRouter.setCurrentUrl('/systemData'); 115 | 116 | renderWithProviders(
); 117 | 118 | const menuButton = screen.getByAltText('Menu Icon'); 119 | fireEvent.click(menuButton); 120 | 121 | const drawer = await screen.findByTestId('sidebar-drawer'); 122 | expect(drawer).toBeInTheDocument(); 123 | 124 | const dataLink = screen.getByText('System Data'); 125 | await act(async () => { 126 | fireEvent.click(dataLink); 127 | }); 128 | 129 | // Check if the URL has changed 130 | await waitFor(() => { 131 | expect(mockRouter.asPath).toBe('/systemData'); 132 | }); 133 | 134 | // Verify that no 404 error is displayed 135 | expect(screen.queryByText(/404/i)).not.toBeInTheDocument(); 136 | expect(screen.queryByText(/not found/i)).not.toBeInTheDocument(); 137 | 138 | // Basic check to ensure some expected content is still present 139 | expect(screen.getByText(/Morpheus/i)).toBeInTheDocument(); 140 | }); 141 | }); -------------------------------------------------------------------------------- /nextui/__tests__/MetricsDisplay.test.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { render, screen, waitFor } from '@testing-library/react'; 3 | import '@testing-library/jest-dom'; 4 | import { MetricsDisplay } from '../app/components/MetricsDisplay'; 5 | 6 | // Mock the fetch function 7 | global.fetch = jest.fn(); 8 | 9 | describe('MetricsDisplay', () => { 10 | const mockMetrics = [ 11 | { 12 | id: 1, 13 | metric_date: '2023-01-01T00:00:00Z', 14 | diskSpace: '1000000000', 15 | memory: '500000000', 16 | swap: '100000000', 17 | CPU_usage: '50', 18 | available_memory: '250000000', 19 | }, 20 | ]; 21 | 22 | beforeEach(() => { 23 | jest.resetAllMocks(); 24 | }); 25 | 26 | it('renders loading state initially', () => { 27 | render(); 28 | expect(screen.getByText('Loading...')).toBeInTheDocument(); 29 | }); 30 | 31 | it('renders error state when fetch fails', async () => { 32 | (global.fetch as jest.Mock).mockRejectedValueOnce(new Error('API Error')); 33 | 34 | render(); 35 | 36 | await waitFor(() => { 37 | expect(screen.getByText('Error: API Error')).toBeInTheDocument(); 38 | }); 39 | }); 40 | 41 | it('renders metrics when fetch is successful', async () => { 42 | (global.fetch as jest.Mock).mockResolvedValueOnce({ 43 | ok: true, 44 | json: async () => ({ data: mockMetrics }), 45 | }); 46 | 47 | render(); 48 | 49 | await waitFor(() => { 50 | expect(screen.getByText('System Metrics Overview')).toBeInTheDocument(); 51 | expect(screen.getAllByText('CPU Usage')).toHaveLength(2); 52 | expect(screen.getAllByText('50.00%')).toHaveLength(2); 53 | expect(screen.getAllByText('Memory Usage')).toHaveLength(2); 54 | expect(screen.getAllByText('476.84 MB')).toHaveLength(2); 55 | }); 56 | }); 57 | 58 | it('renders "No metrics available" when data is empty', async () => { 59 | (global.fetch as jest.Mock).mockResolvedValueOnce({ 60 | ok: true, 61 | json: async () => ({ data: [] }), 62 | }); 63 | 64 | render(); 65 | 66 | await waitFor(() => { 67 | expect(screen.getByText('No metrics available')).toBeInTheDocument(); 68 | }); 69 | }); 70 | 71 | it('formats bytes correctly', async () => { 72 | (global.fetch as jest.Mock).mockResolvedValueOnce({ 73 | ok: true, 74 | json: async () => ({ data: mockMetrics }), 75 | }); 76 | 77 | render(); 78 | 79 | await waitFor(() => { 80 | expect(screen.getAllByText('953.67 MB')).toHaveLength(2); // diskSpace 81 | expect(screen.getAllByText('476.84 MB')).toHaveLength(2); // memory 82 | expect(screen.getAllByText('95.37 MB')).toHaveLength(2); // swap 83 | expect(screen.getAllByText('238.42 MB')).toHaveLength(2); // available_memory 84 | }); 85 | }); 86 | 87 | it('shows alert for high usage', async () => { 88 | const highUsageMetrics = [ 89 | { 90 | ...mockMetrics[0], 91 | CPU_usage: '90', 92 | memory: '950000000', 93 | }, 94 | ]; 95 | 96 | (global.fetch as jest.Mock).mockResolvedValueOnce({ 97 | ok: true, 98 | json: async () => ({ data: highUsageMetrics }), 99 | }); 100 | 101 | render(); 102 | 103 | await waitFor(() => { 104 | expect(screen.getAllByText('Alert: High usage')).toHaveLength(4); 105 | }); 106 | }); 107 | 108 | it('renders metrics history table', async () => { 109 | (global.fetch as jest.Mock).mockResolvedValueOnce({ 110 | ok: true, 111 | json: async () => ({ data: mockMetrics }), 112 | }); 113 | 114 | render(); 115 | 116 | await waitFor(() => { 117 | expect(screen.getByText('Metrics History')).toBeInTheDocument(); 118 | expect(screen.getByRole('table')).toBeInTheDocument(); 119 | expect(screen.getAllByRole('row')).toHaveLength(2); // Header + 1 data row as im displaying it in the metrics endpoint twice 120 | }); 121 | }); 122 | }); 123 | -------------------------------------------------------------------------------- /nextui/__tests__/Routing.test.tsx: -------------------------------------------------------------------------------- 1 | import { render, screen, waitFor } from '@testing-library/react'; // Ensure waitFor is imported 2 | import userEvent from '@testing-library/user-event'; 3 | import '@testing-library/jest-dom'; 4 | import { useRouter } from 'next/navigation'; 5 | import Header from '../app/components/Header/Header'; 6 | import { act } from 'react-dom/test-utils'; 7 | 8 | jest.mock('next/navigation', () => ({ 9 | useRouter: jest.fn().mockImplementation(() => ({ 10 | push: jest.fn(), 11 | back: jest.fn(), 12 | forward: jest.fn(), 13 | refresh: jest.fn(), 14 | replace: jest.fn(), 15 | prefetch: jest.fn(), 16 | })), 17 | usePathname: jest.fn().mockReturnValue('/'), 18 | })); 19 | 20 | jest.mock('@mui/material/styles', () => ({ 21 | ...jest.requireActual('@mui/material/styles'), 22 | useTheme: jest.fn().mockReturnValue({ 23 | direction: 'ltr', 24 | }), 25 | })); 26 | 27 | describe('Header Routing Tests', () => { 28 | beforeEach(() => { 29 | jest.clearAllMocks(); 30 | }); 31 | 32 | test('Header renders without errors', () => { 33 | render(
); 34 | expect(screen.getByText(/Morpheus/i)).toBeInTheDocument(); 35 | }); 36 | 37 | const clickLinkAndExpectNavigation = async (name, path) => { 38 | const link = await screen.findByText(name); 39 | expect(link).toBeInTheDocument(); 40 | 41 | // Use userEvent to simulate a click 42 | await userEvent.click(link); 43 | 44 | const { push } = useRouter(); 45 | // Ensure push is called 46 | expect(push).toHaveBeenCalledWith(path); // Check if this is being called 47 | }; 48 | 49 | test('Folder toggles work correctly', async () => { 50 | render(
); 51 | 52 | const menuButton = screen.getByAltText('Menu Icon'); 53 | await userEvent.click(menuButton); 54 | 55 | const dockerFolder = await screen.findByText('Docker'); 56 | await userEvent.click(dockerFolder); 57 | expect(await screen.findByText('Docker Controller')).toBeVisible(); 58 | 59 | await userEvent.click(dockerFolder); 60 | await waitFor(() => { 61 | expect(screen.queryByText('Docker Controller')).not.toBeInTheDocument(); 62 | }); 63 | }); 64 | 65 | 66 | }); 67 | -------------------------------------------------------------------------------- /nextui/__tests__/SystemData.test.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { render, screen } from '@testing-library/react'; 3 | import '@testing-library/jest-dom'; 4 | import Dashboard from '../app/systemData/Dashboard'; 5 | 6 | jest.mock('next/router', () => require('next-router-mock')); 7 | 8 | describe('SystemData Dashboard', () => { 9 | it('renders without crashing', () => { 10 | render(); 11 | expect(screen.getByText('CPU Usage')).toBeInTheDocument(); 12 | expect(screen.getByText('Memory Usage')).toBeInTheDocument(); 13 | expect(screen.getByText('RAM status')).toBeInTheDocument(); 14 | }); 15 | 16 | it('renders the correct number of Grid containers', () => { 17 | render(); 18 | const gridContainers = document.querySelectorAll('.MuiGrid-container'); 19 | expect(gridContainers.length).toBe(5); 20 | }); 21 | 22 | it('renders iframes for each panel', () => { 23 | render(); 24 | const iframes = document.querySelectorAll('iframe'); 25 | expect(iframes.length).toBe(11); 26 | }); 27 | 28 | it('renders the correct headers for each section', () => { 29 | render(); 30 | expect(screen.getByText('CPU Usage')).toBeInTheDocument(); 31 | expect(screen.getByText('Memory Usage')).toBeInTheDocument(); 32 | expect(screen.getByText('Memory Usage detailed')).toBeInTheDocument(); 33 | expect(screen.getByText('Network data detailed')).toBeInTheDocument(); 34 | expect(screen.getByText('Extended CPU data')).toBeInTheDocument(); 35 | expect(screen.getByText('Cached Mem data')).toBeInTheDocument(); 36 | expect(screen.getByText('Sent packets Data')).toBeInTheDocument(); 37 | expect(screen.getByText('Received Packets Data')).toBeInTheDocument(); 38 | }); 39 | 40 | it('renders the correct number of Paper components', () => { 41 | render(); 42 | const paperComponents = document.querySelectorAll('.MuiPaper-root'); 43 | expect(paperComponents.length).toBe(11); 44 | }); 45 | 46 | it('renders the correct number of iframes with specific sources', () => { 47 | render(); 48 | const systemIframes = document.querySelectorAll('iframe[src*="system?orgId=1"]'); 49 | const dockerIframes = document.querySelectorAll('iframe[src*="docker-container?orgId=1"]'); 50 | expect(systemIframes.length).toBe(7); 51 | expect(dockerIframes.length).toBe(4); 52 | }); 53 | 54 | it('renders the correct classes for grid items', () => { 55 | render(); 56 | const dataCardGrid1Items = document.querySelectorAll('.dataCardGrid1'); 57 | const dataCardGrid2Items = document.querySelectorAll('.dataCardGrid2'); 58 | const thirdGridItems = document.querySelectorAll('.thirdGridItems'); 59 | expect(dataCardGrid1Items.length).toBe(3); 60 | expect(dataCardGrid2Items.length).toBe(2); 61 | expect(thirdGridItems.length).toBe(6); 62 | }); 63 | 64 | it('renders the correct header classes', () => { 65 | render(); 66 | const cpuHeaders = document.querySelectorAll('.cardHeaderCPU'); 67 | const memoryHeaders = document.querySelectorAll('.cardHeaderMemory'); 68 | const networkHeaders = document.querySelectorAll('.cardHeaderNetwork'); 69 | expect(cpuHeaders.length).toBe(3); 70 | expect(memoryHeaders.length).toBe(3); 71 | expect(networkHeaders.length).toBe(3); 72 | }); 73 | }); 74 | -------------------------------------------------------------------------------- /nextui/__tests__/getMetrics.test.js: -------------------------------------------------------------------------------- 1 | // import getMetrics from '../app/api/aws-bedrock/getMetrics'; 2 | // import { RateLimiter } from 'limiter'; 3 | 4 | // const mockLimiterRemoveTokens = jest.spyOn( 5 | // RateLimiter.prototype, 6 | // 'removeTokens' 7 | // ); 8 | 9 | // describe('getMetrics', () => { 10 | // afterEach(() => { 11 | // jest.clearAllMocks(); 12 | // }); 13 | 14 | // it('fetches Prometheus data successfully', async () => { 15 | // mockLimiterRemoveTokens.mockResolvedValueOnce(true); 16 | 17 | // global.fetch = jest.fn().mockResolvedValueOnce({ 18 | // ok: true, 19 | // json: async () => ({ 20 | // data: { 21 | // result: [ 22 | // { metric: { name: 'container1' }, value: [1691856162.332, '0.12'] }, 23 | // ], 24 | // }, 25 | // }), 26 | // }); 27 | 28 | // const result = await getMetrics(); 29 | 30 | // expect(fetch).toHaveBeenCalledTimes(4); 31 | // expect(result).toEqual([ 32 | // { 33 | // metric: 'CPU Usage', 34 | // unit: 'cores', 35 | // description: 'CPU usage rate over 5 minutes', 36 | // data: [{ container: 'container1', value: '0.12' }], 37 | // }, 38 | // { 39 | // metric: 'Memory Usage', 40 | // unit: 'bytes', 41 | // description: 'Current memory usage', 42 | // data: [{ container: 'container1', value: '0.12' }], 43 | // }, 44 | // { 45 | // metric: 'Network Receive', 46 | // unit: 'bytes/s', 47 | // description: 'Network receive rate over 5 minutes', 48 | // data: [{ container: 'container1', value: '0.12' }], 49 | // }, 50 | // { 51 | // metric: 'Network Transmit', 52 | // unit: 'bytes/s', 53 | // description: 'Network transmit rate over 5 minutes', 54 | // data: [{ container: 'container1', value: '0.12' }], 55 | // }, 56 | // ]); 57 | // }); 58 | 59 | // it('throws an error when rate limit is exceeded', async () => { 60 | // mockLimiterRemoveTokens.mockResolvedValueOnce(false); 61 | 62 | // await expect(getMetrics()).rejects.toThrow('Rate limit exceeded'); 63 | // }); 64 | 65 | // it('throws an error when fetch fails', async () => { 66 | // mockLimiterRemoveTokens.mockResolvedValueOnce(true); 67 | 68 | // global.fetch = jest.fn().mockResolvedValueOnce({ 69 | // ok: false, 70 | // status: 500, 71 | // }); 72 | 73 | // await expect(getMetrics()).rejects.toThrow( 74 | // 'Failed to fetch Prometheus data' 75 | // ); 76 | // }); 77 | 78 | // it('uses cached data if cache is valid', async () => { 79 | // mockLimiterRemoveTokens.mockResolvedValueOnce(true); 80 | 81 | // const mockCacheData = [ 82 | // { 83 | // metric: 'CPU Usage', 84 | // unit: 'cores', 85 | // description: 'CPU usage rate over 5 minutes', 86 | // data: [{ container: 'container1', value: '0.12' }], 87 | // }, 88 | // ]; 89 | // const now = Date.now(); 90 | // jest.spyOn(Date, 'now').mockReturnValueOnce(now); 91 | // const cache = { data: mockCacheData, timestamp: now - 1000 }; 92 | 93 | // global.cache = cache; 94 | 95 | // const result = await getMetrics(); 96 | 97 | // expect(result).toBe(mockCacheData); 98 | // }); 99 | // }); 100 | -------------------------------------------------------------------------------- /nextui/__tests__/route.test.js: -------------------------------------------------------------------------------- 1 | // import { createMocks } from 'node-mocks-http'; 2 | // import { GET } from '../app/api/aws-bedrock/route'; // Adjust the path if necessary 3 | // import pool from '@/db/pgModel'; // Adjust according to your setup 4 | 5 | // jest.mock('@/db/pgModel', () => ({ 6 | // query: jest.fn(), 7 | // })); 8 | 9 | // describe('GET API Route Handler', () => { 10 | // beforeEach(() => { 11 | // jest.clearAllMocks(); 12 | // }); 13 | 14 | // it('responds with metrics when the conversation is found', async () => { 15 | // // Mock the database query for fetching conversation history 16 | // pool.query.mockResolvedValueOnce({ 17 | // rows: [ 18 | // { 19 | // conversation: JSON.stringify([ 20 | // { role: 'user', content: 'Dummy data' }, 21 | // ]), 22 | // }, 23 | // ], 24 | // }); 25 | 26 | // // Mock the database update query 27 | // pool.query.mockResolvedValueOnce({ 28 | // rowCount: 1, // Simulating that an existing conversation was updated 29 | // }); 30 | 31 | // // Mocking the request and response 32 | // const { req, res } = createMocks({ 33 | // method: 'GET', 34 | // }); 35 | 36 | // // Call the GET handler 37 | // await GET(req); 38 | 39 | // // Assert the status code and response 40 | // expect(res._getStatusCode()).toBe(200); 41 | // expect(res._getJSONData()).toEqual({ 42 | // result: expect.any(String), 43 | // }); 44 | // }); 45 | 46 | // it('returns 500 when the database query fails', async () => { 47 | // // Mock the database query failure 48 | // pool.query.mockRejectedValueOnce(new Error('Database query failed')); 49 | 50 | // const { req, res } = createMocks({ 51 | // method: 'GET', 52 | // }); 53 | 54 | // // Call the GET handler 55 | // await GET(req); 56 | 57 | // // Assert the response status and error details 58 | // expect(res._getStatusCode()).toBe(500); 59 | // expect(res._getJSONData()).toEqual({ 60 | // error: 'Internal Server Error', 61 | // details: 'Database query failed', 62 | // }); 63 | // }); 64 | 65 | // it('returns 500 when JSON parsing fails', async () => { 66 | // // Mock the database query to return invalid JSON 67 | // pool.query.mockResolvedValueOnce({ 68 | // rows: [{ conversation: 'invalid JSON' }], 69 | // }); 70 | 71 | // const { req, res } = createMocks({ 72 | // method: 'GET', 73 | // }); 74 | 75 | // // Call the GET handler 76 | // await GET(req); 77 | 78 | // // Assert the response status and error details 79 | // expect(res._getStatusCode()).toBe(500); 80 | // expect(res._getJSONData()).toEqual({ 81 | // error: 'Internal Server Error', 82 | // details: 'Unexpected token i in JSON at position 0', 83 | // }); 84 | // }); 85 | // }); 86 | -------------------------------------------------------------------------------- /nextui/app/api/AIChat/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | import OpenAI from 'openai'; 3 | 4 | // Initialize OpenAI client 5 | const openai = new OpenAI({ 6 | apiKey: process.env.OPENAI_API_KEY, 7 | }); 8 | 9 | // // Function to fetch Kubernetes data 10 | // async function fetchKubernetesData() { 11 | // // TODO: implement this function to fetch Kubernetes-specific data 12 | // // This is placeholder and should be replaced with actual Kubernetes data fetching logic 13 | // return { 14 | // pods: 10, 15 | // services: 5, 16 | // deployments: 3, 17 | // // Add more relevant Kubernetes metrics here 18 | // }; 19 | // } 20 | 21 | // Function to fetch Kubernetes data 22 | async function fetchKubernetesData() { 23 | try { 24 | const response = await fetch('http://localhost:3000/api/v1/clusterview'); 25 | if (!response.ok) { 26 | throw new Error('Failed to fetch cluster data'); 27 | } 28 | return await response.json(); 29 | } catch (error) { 30 | console.error('Error fetching cluster data:', error); 31 | return null; 32 | } 33 | } 34 | 35 | export async function POST(request: Request) { 36 | try { 37 | // Extract the prompt and model from the request body 38 | const { prompt, model } = await request.json(); 39 | 40 | // Validate the prompt 41 | if (!prompt || typeof prompt !== 'string') { 42 | return NextResponse.json({ error: 'Invalid prompt' }, { status: 400 }); 43 | } 44 | 45 | // Fetch Kubernetes metrics data 46 | const kubernetesData = await fetchKubernetesData(); 47 | 48 | if (!kubernetesData) { 49 | return NextResponse.json( 50 | { error: 'Failed to fetch Kubernetes data' }, 51 | { status: 500 } 52 | ); 53 | } 54 | 55 | // Combine the user's prompt with the Kubernetes data 56 | const enhancedPrompt = ` 57 | Act as a Kubernetes expert. You will answer questions related to Kubernetes clusters, containers, deployments, or any aspects related to managing and observing these systems. 58 | Here is my question: ${prompt}. If my question is certainly irrelevant to clusters, containers, etc, follow these 3 steps: 59 | 1) Do not answer the question. 60 | 2) Do not provide information related to the Kubernetes data that I will provide to you. 61 | 3) Respond with 'I'm sorry, it appears your question is not relevant to your Kubernetes clusters.' 62 | Only if the question is relevant to Kubernetes, proceed to answer the question using the Kubernetes metrics data I will provide to offer insights and recommendations. (If it is relevant, the fact that it is relevant should not be addressed.) 63 | Here is the Kubernetes data: ${JSON.stringify(kubernetesData, null, 2)}. 64 | Never use placeholders if you do not know specific numbers or information. Be concise yet informative. 65 | Aim to organize the response by metrics and include specific pods/services/deployments and their metrics/numbers, if relevant. Use line breaks, numbering, bullets, and bolding for organization. Limit your response to under 1500 tokens.`; 66 | 67 | //gpt-4 seems to have much better understanding of what information is considered relevant, as well as better consistency in response style and length, so it didn't require these specific instructions. 68 | //However, 3.5-turbo was used because it was much cheaper at less than $0.005 per 1000 tokens. while 4 was over $0.03 per 1000 tokens. 69 | //For container (not k8) query: running Morpheus containers alone, the prometheus data sent is about 4 to 6 thousand tokens. The complexity of the data seems to also increase this, because just the prometheus data, with no true prompt and limited response, would cost 3 to 6 cents per request in gpt-4. 70 | //Used 'opinion' in prompt to get more judgement rather than only objective facts. In some models, disallow certain formatting. Bold can result in '**' and headings in '###'. 71 | //Organization by metric was used for more clarity.Used 'containers/metrics/numbers' to prevent vague or general sounding responses. 72 | //Several questions can be representative of how strict their interpretation of the rules are. From least to most answered given the same rules for interpretation and response: 'how is the weather', 'where is california', 'how are my containers', 'how is my cluster'. 73 | //For the most strict rules, use terms like 'act strictly as...' and 'without exception'. However, this combo made 3.5 believe questions like 'how is my cluster' not relevant. 74 | 75 | // Send the enhanced prompt to OpenAI for analysis 76 | const completion = await openai.chat.completions.create({ 77 | model: model || 'gpt-3.5-turbo', 78 | messages: [{ role: 'user', content: enhancedPrompt }], 79 | }); 80 | 81 | const result = completion.choices[0].message.content; 82 | 83 | return NextResponse.json({ result }); 84 | } catch (error) { 85 | console.error('Error:', error); 86 | return NextResponse.json( 87 | { error: 'An error occurred while processing your request.' }, 88 | { status: 500 } 89 | ); 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /nextui/app/api/aws-bedrock/getMetrics.ts: -------------------------------------------------------------------------------- 1 | import { RateLimiter } from 'limiter'; 2 | 3 | const PROMETHEUS_URL = process.env.PROMETHEUS_URL || 'http://localhost:50002'; 4 | const limiter = new RateLimiter({ tokensPerInterval: 5, interval: 'second' }); 5 | let cache: { data: any; timestamp: number } | null = null; 6 | const CACHE_DURATION = 60 * 1000; // 1 minute 7 | 8 | const queries = [ 9 | { 10 | name: 'CPU Usage', 11 | query: 12 | 'sum(rate(container_cpu_usage_seconds_total{name=~".+"}[5m])) by (name)', 13 | unit: 'cores', 14 | description: 'CPU usage rate over 5 minutes', 15 | }, 16 | { 17 | name: 'Memory Usage', 18 | query: 'sum(container_memory_usage_bytes{name=~".+"}) by (name)', 19 | unit: 'bytes', 20 | description: 'Current memory usage', 21 | }, 22 | { 23 | name: 'Network Receive', 24 | query: 25 | 'sum(rate(container_network_receive_bytes_total{name=~".+"}[5m])) by (name)', 26 | unit: 'bytes/s', 27 | description: 'Network receive rate over 5 minutes', 28 | }, 29 | { 30 | name: 'Network Transmit', 31 | query: 32 | 'sum(rate(container_network_transmit_bytes_total{name=~".+"}[5m])) by (name)', 33 | unit: 'bytes/s', 34 | description: 'Network transmit rate over 5 minutes', 35 | }, 36 | ]; 37 | 38 | interface PrometheusMetric { 39 | metric: { 40 | [key: string]: string; 41 | }; 42 | value: [number, string]; 43 | } 44 | 45 | interface QueryResult { 46 | name: string; 47 | data: PrometheusMetric[]; 48 | } 49 | 50 | export default async function getMetrics() { 51 | if (!(await limiter.removeTokens(1))) { 52 | throw new Error('Rate limit exceeded'); 53 | } 54 | 55 | if (cache && Date.now() - cache.timestamp < CACHE_DURATION) { 56 | return cache.data; 57 | } 58 | 59 | try { 60 | const results: QueryResult[] = await Promise.all( 61 | queries.map(async ({ name, query }) => { 62 | const url = `${PROMETHEUS_URL}/api/v1/query?query=${encodeURIComponent( 63 | query 64 | )}`; 65 | const response = await fetch(url); 66 | if (!response.ok) { 67 | throw new Error(`HTTP error! status: ${response.status}`); 68 | } 69 | const data = await response.json(); 70 | return { name, data: data.data.result }; 71 | }) 72 | ); 73 | 74 | const formattedResults = results.map(({ name, data }) => { 75 | const queryInfo = queries.find((q) => q.name === name); 76 | return { 77 | metric: name, 78 | unit: queryInfo?.unit, 79 | description: queryInfo?.description, 80 | data: data.map((item: PrometheusMetric) => ({ 81 | container: item.metric.name, 82 | value: parseFloat(item.value[1]).toFixed(2), 83 | })), 84 | }; 85 | }); 86 | 87 | cache = { data: formattedResults, timestamp: Date.now() }; 88 | return formattedResults; 89 | } catch (error) { 90 | console.error('Error fetching Prometheus data:', error); 91 | throw new Error('Failed to fetch Prometheus data'); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /nextui/app/api/db-metrics/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | import pool from '@/db/pgModel'; 3 | 4 | // Environment variables for connection details 5 | // const pool = new Pool({ 6 | // host: process.env.POSTGRES_HOST || 'localhost', 7 | // port: parseInt(process.env.POSTGRES_PORT || '5432'), 8 | // database: process.env.POSTGRES_DB || 'morpheus', 9 | // user: process.env.POSTGRES_USER || 'postgres', 10 | // password: process.env.POSTGRES_PASSWORD || 'your_postgres_password', 11 | // }); 12 | 13 | export async function GET(request: Request) { 14 | const { searchParams } = new URL(request.url); 15 | const limit = parseInt(searchParams.get('limit') || '50'); 16 | const offset = parseInt(searchParams.get('offset') || '0'); 17 | 18 | if (isNaN(limit) || isNaN(offset)) { 19 | return NextResponse.json({ error: 'Invalid limit or offset' }, { status: 400 }); 20 | } 21 | 22 | try { 23 | const client = await pool.connect(); 24 | try { 25 | const result = await client.query( 26 | 'SELECT * FROM snapshots ORDER BY metric_date DESC LIMIT $1 OFFSET $2', 27 | [limit, offset] 28 | ); 29 | 30 | if (result.rows.length === 0) { 31 | return NextResponse.json({ message: 'No data found' }, { status: 404 }); 32 | } 33 | 34 | const snapshots = result.rows.map(row => ({ 35 | ...row, 36 | metric_date: row.metric_date.toISOString(), 37 | })); 38 | 39 | return NextResponse.json({ data: snapshots }); 40 | } finally { 41 | client.release(); 42 | } 43 | } catch (error) { 44 | console.error('Error fetching data from database:', error); 45 | return NextResponse.json({ error: 'Internal Server Error', details: error.message }, { status: 500 }); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /nextui/app/api/db-metrics/run-prometheus-to-postgres.ts: -------------------------------------------------------------------------------- 1 | import type { NextApiRequest, NextApiResponse } from 'next'; 2 | import { exec } from 'child_process'; 3 | 4 | export default function handler(req: NextApiRequest, res: NextApiResponse) { 5 | if (req.method !== 'POST') { 6 | return res.status(405).json({ message: 'Method Not Allowed' }); 7 | } 8 | 9 | exec('python /Morpheus/scripts/prometheus_to_postgres.py', (error, stdout, stderr) => { 10 | if (error) { 11 | console.error(`Error: ${error.message}`); 12 | return res.status(500).json({ message: 'Script execution failed', error: error.message }); 13 | } 14 | if (stderr) { 15 | console.error(`stderr: ${stderr}`); 16 | return res.status(500).json({ message: 'Script execution failed', error: stderr }); 17 | } 18 | console.log(`stdout: ${stdout}`); 19 | res.status(200).json({ message: 'Script executed successfully', output: stdout }); 20 | }); 21 | } 22 | -------------------------------------------------------------------------------- /nextui/app/api/openai-analyze-metrics/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | import OpenAI from 'openai'; 3 | import { RateLimiter } from 'limiter'; 4 | 5 | // Initialize OpenAI client 6 | const openai = new OpenAI({ 7 | apiKey: process.env.OPENAI_API_KEY, 8 | }); 9 | 10 | // Create a rate limiter: 2 requests per second 11 | const limiter = new RateLimiter({ tokensPerInterval: 2, interval: 'second' }); 12 | 13 | // Function to fetch Prometheus data 14 | async function fetchPrometheusData() { 15 | try { 16 | const response = await fetch('http://localhost:3000/api/prometheus-query'); 17 | if (!response.ok) { 18 | throw new Error(`HTTP error! status: ${response.status}`); 19 | } 20 | return response.json(); 21 | } catch (error) { 22 | console.error('Error fetching Prometheus data:', error); 23 | return null; 24 | } 25 | } 26 | 27 | export async function POST(request: Request) { 28 | // Check rate limit before processing the request 29 | if (!(await limiter.removeTokens(1))) { 30 | return NextResponse.json({ error: 'Rate limit exceeded' }, { status: 429 }); 31 | } 32 | 33 | try { 34 | // Extract the prompt from the request body 35 | const { prompt } = await request.json(); 36 | 37 | // Validate the prompt 38 | if (!prompt || typeof prompt !== 'string') { 39 | return NextResponse.json({ error: 'Invalid prompt' }, { status: 400 }); 40 | } 41 | 42 | // Fetch Prometheus metrics data 43 | const prometheusData = await fetchPrometheusData(); 44 | 45 | // Combine the user's prompt with the Prometheus data 46 | const enhancedPrompt = ` 47 | ${prompt} 48 | These are current metrics data from our containerized system: 49 | ${JSON.stringify(prometheusData, null, 2)} 50 | Analyze this, along with the original prompt to provide insights and your opinions. Never use any bold or heading formatting in your response. 51 | Organize response by the metrics. Add a recommendation section with a list of specific actions. 52 | Aim to specify containers/metrics/numbers. Response should be 1000 to 1500 tokens. 53 | `; 54 | //Model 4 seems to have better consistency in response style and length, so it didn't require these specific instructions. 55 | //However, 3.5-turbo was used because it was much cheaper at less than $0.005 per 1000 tokens. while 4 was over $0.03 per 1000 tokens. 56 | //Currently, running Morpheus containers alone, the prometheus data is about 4 to 6 thousand tokens. The complexity of the data seems to also increase this, because just the prometheus data, with little prompt and very limited token response, would cost 3 to 6 cents per request in gpt-4. 57 | //Used 'opinion' in prompt to get more judgement rather than only objective facts 58 | //Mentioned formatting because bold results in '**' and headings result in '###' 59 | //Organization by metric was used for more clarity 60 | //Used 'containers/metrics/numbers' to prevent vague or general sounding responses 61 | 62 | // Send the enhanced prompt to OpenAI for analysis 63 | const completion = await openai.chat.completions.create({ 64 | // model: 'gpt-4-0125-preview', 65 | model: 'gpt-3.5-turbo', 66 | messages: [{ role: 'user', content: enhancedPrompt }], 67 | }); 68 | 69 | // Extract the result from the OpenAI response 70 | const result = completion.choices[0].message.content; 71 | 72 | // Log successful analysis (for monitoring purposes) 73 | console.log( 74 | `Successfully analyzed prompt with metrics: "${prompt.substring( 75 | 0, 76 | 50 77 | )}..."` 78 | ); 79 | 80 | // Return the analysis result 81 | return NextResponse.json({ result }); 82 | } catch (error) { 83 | console.error('Error invoking OpenAI model:', error); 84 | 85 | // Handle rate limit errors from OpenAI 86 | if (error instanceof OpenAI.APIError && error.status === 429) { 87 | return NextResponse.json( 88 | { error: 'API rate limit exceeded. Please try again later.' }, 89 | { status: 429 } 90 | ); 91 | } 92 | 93 | // Generic error response for all other errors 94 | return NextResponse.json( 95 | { 96 | error: 97 | 'An error occurred while processing your request. Please try again later.', 98 | }, 99 | { status: 500 } 100 | ); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /nextui/app/api/prometheus-query/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | import { RateLimiter } from 'limiter'; 3 | 4 | // The metrics here are for Prometheus metrics available without Kubernetes. See k-prometheus-query for Kubernetes queries 5 | // This queries eight metrics, for whatis usually considered most important: container count, container uptime average, CPU, memory, disk I/O read and write, network receive and transmit 6 | 7 | // Set the Prometheus URL, defaulting to localhost if not provided in environment variables 8 | const PROMETHEUS_URL = process.env.PROMETHEUS_URL || 'http://localhost:50002'; 9 | 10 | // Create a rate limiter: 5 requests per second 11 | // This helps prevent overloading the Prometheus server with too many requests 12 | const limiter = new RateLimiter({ tokensPerInterval: 5, interval: 'second' }); 13 | 14 | // Simple in-memory cache to store the last fetched data 15 | // This reduces the load on Prometheus by serving cached data for a short period 16 | let cache: { data: any; timestamp: number } | null = null; 17 | const CACHE_DURATION = 60 * 1000; // Cache duration: 1 minute 18 | 19 | export async function GET(request: Request) { 20 | // Check rate limit before processing the request 21 | if (!(await limiter.removeTokens(1))) { 22 | return NextResponse.json({ error: 'Rate limit exceeded' }, { status: 429 }); 23 | } 24 | 25 | // Check if we have valid cached data 26 | if (cache && Date.now() - cache.timestamp < CACHE_DURATION) { 27 | return NextResponse.json(cache.data); 28 | } 29 | 30 | try { 31 | // Define the Prometheus queries we want to execute 32 | // Each query is an object with a name (for identification), the PromQL query string, unit, and description 33 | const queries = [ 34 | { 35 | name: 'Container Count', 36 | query: 'count(container_last_seen)', 37 | unit: 'containers', 38 | description: 'Total number of containers', 39 | }, 40 | { 41 | name: 'Container Uptime Average', 42 | query: 'avg(time() - container_start_time_seconds)', 43 | unit: 'seconds', 44 | description: 'Average uptime of containers', 45 | }, 46 | { 47 | name: 'CPU Usage', 48 | query: 49 | 'sum(rate(container_cpu_usage_seconds_total{name=~".+"}[5m])) by (name)', 50 | unit: 'cores', 51 | description: 'CPU usage rate over 5 minutes', 52 | }, 53 | { 54 | name: 'Memory Usage', 55 | query: 'sum(container_memory_usage_bytes{name=~".+"}) by (name)', 56 | unit: 'bytes', 57 | description: 'Current memory usage', 58 | }, 59 | { 60 | name: 'Disk I/O Read', 61 | query: 62 | 'sum(rate(container_fs_reads_bytes_total{name!=""}[5m])) by (name)', 63 | unit: 'bytes/s', 64 | description: 'Disk read rate over 5 minutes', 65 | }, 66 | { 67 | name: 'Disk I/O Write', 68 | query: 69 | 'sum(rate(container_fs_writes_bytes_total{name!=""}[5m])) by (name)', 70 | unit: 'bytes/s', 71 | description: 'Disk write rate over 5 minutes', 72 | }, 73 | { 74 | name: 'Network Receive', 75 | query: 76 | 'sum(rate(container_network_receive_bytes_total{name=~".+"}[5m])) by (name)', 77 | unit: 'bytes/s', 78 | description: 'Network receive rate over 5 minutes', 79 | }, 80 | { 81 | name: 'Network Transmit', 82 | query: 83 | 'sum(rate(container_network_transmit_bytes_total{name=~".+"}[5m])) by (name)', 84 | unit: 'bytes/s', 85 | description: 'Network transmit rate over 5 minutes', 86 | }, 87 | ]; 88 | 89 | // Execute all queries in parallel using Promise.all 90 | // This is more efficient than running queries sequentially 91 | const results = await Promise.all( 92 | queries.map(async ({ name, query }) => { 93 | // Construct the URL for the Prometheus API call 94 | const url = `${PROMETHEUS_URL}/api/v1/query?query=${encodeURIComponent( 95 | query 96 | )}`; 97 | 98 | // Fetch data from Prometheus 99 | const response = await fetch(url); 100 | if (!response.ok) { 101 | throw new Error(`HTTP error! status: ${response.status}`); 102 | } 103 | 104 | // Parse the JSON response 105 | const data = await response.json(); 106 | 107 | // Return an object with the query name and the result data 108 | return { name, data: data.data.result }; 109 | }) 110 | ); 111 | 112 | // Format the results into a more usable structure 113 | const formattedResults = results.map(({ name, data }) => { 114 | const queryInfo = queries.find((q) => q.name === name); 115 | return { 116 | metric: name, 117 | unit: queryInfo?.unit, 118 | description: queryInfo?.description, 119 | data: data.map((item: any) => ({ 120 | container: item.metric.name, 121 | value: parseFloat(item.value[1]).toFixed(2), 122 | })), 123 | }; 124 | }); 125 | 126 | // Update the cache with the new data 127 | cache = { data: formattedResults, timestamp: Date.now() }; 128 | 129 | // Return the formatted results as a JSON response 130 | return NextResponse.json(formattedResults); 131 | } catch (error) { 132 | // Log the error for server-side debugging 133 | console.error('Error fetching container Prometheus data:', error); 134 | 135 | // Return a 500 error response to the client 136 | return NextResponse.json( 137 | { error: 'Failed to fetch container Prometheus data' }, 138 | { status: 500 } 139 | ); 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /nextui/app/api/settings/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | import { Pool } from 'pg'; 3 | 4 | const pool = new Pool({ 5 | host: process.env.POSTGRES_HOST || 'localhost', 6 | port: parseInt(process.env.POSTGRES_PORT || '5432'), 7 | database: process.env.POSTGRES_DB || 'morpheus', 8 | user: process.env.POSTGRES_USER || 'postgres', 9 | password: process.env.POSTGRES_PASSWORD || 'your_postgres_password', 10 | }); 11 | 12 | export async function GET(request: Request) { 13 | try { 14 | const client = await pool.connect(); 15 | try { 16 | const result = await client.query('SELECT fetch_interval, run_immediately FROM settings WHERE id = 1'); 17 | if (result.rows.length === 0) { 18 | return NextResponse.json({ message: 'No settings found' }, { status: 404 }); 19 | } 20 | const settings = result.rows[0]; 21 | return NextResponse.json({ data: settings }); 22 | } finally { 23 | client.release(); 24 | } 25 | } catch (error) { 26 | console.error('Error fetching settings from database:', error); 27 | return NextResponse.json({ error: 'Internal Server Error', details: error.message }, { status: 500 }); 28 | } 29 | } 30 | 31 | export async function POST(request: Request) { 32 | try { 33 | const body = await request.json(); 34 | let { fetch_interval, run_immediately } = body; 35 | 36 | // Validate inputs 37 | if (typeof fetch_interval !== 'number' || fetch_interval < 10 || fetch_interval > 3600) { 38 | fetch_interval = 60; // default to 60 seconds 39 | } 40 | if (typeof run_immediately !== 'boolean') { 41 | run_immediately = false; 42 | } 43 | 44 | const client = await pool.connect(); 45 | try { 46 | await client.query('UPDATE settings SET fetch_interval = $1, run_immediately = $2 WHERE id = 1', [ 47 | fetch_interval, 48 | run_immediately, 49 | ]); 50 | return NextResponse.json({ message: 'Settings updated' }); 51 | } finally { 52 | client.release(); 53 | } 54 | } catch (error) { 55 | console.error('Error updating settings in database:', error); 56 | return NextResponse.json({ error: 'Internal Server Error', details: error.message }, { status: 500 }); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /nextui/app/api/v1/clusterview/route.ts: -------------------------------------------------------------------------------- 1 | import * as k8s from '@kubernetes/client-node'; 2 | import { NextResponse } from 'next/server'; 3 | 4 | export async function GET(request: Request) { 5 | const kc = new k8s.KubeConfig(); 6 | kc.loadFromDefault(); 7 | 8 | const coreApi = kc.makeApiClient(k8s.CoreV1Api); 9 | const appsApi = kc.makeApiClient(k8s.AppsV1Api); 10 | 11 | try { 12 | const nodes = await coreApi.listNode(); 13 | const pods = await coreApi.listPodForAllNamespaces(); 14 | const services = await coreApi.listServiceForAllNamespaces(); 15 | const deployments = await appsApi.listDeploymentForAllNamespaces(); 16 | 17 | // Create a map of services to pods based on selectors 18 | const serviceToPods = services.body.items.reduce((acc, service) => { 19 | if (service.metadata && service.spec?.selector) { 20 | const matchingPods = pods.body.items 21 | .filter((pod) => matchLabels(pod.metadata?.labels, service.spec.selector)) 22 | .map((pod) => pod.metadata?.name) 23 | .filter(Boolean); // Filter out undefined names 24 | 25 | acc[service.metadata.name] = matchingPods; 26 | } 27 | return acc; 28 | }, {} as Record); 29 | 30 | return NextResponse.json({ 31 | nodes: nodes.body.items.map((node) => node.metadata?.name).filter(Boolean), 32 | pods: pods.body.items 33 | .map((pod) => ({ 34 | name: pod.metadata?.name, 35 | nodeName: pod.spec?.nodeName, 36 | })) 37 | .filter((pod) => pod.name), 38 | services: services.body.items.map((service) => service.metadata?.name).filter(Boolean), 39 | serviceToPods, 40 | deployments: deployments.body.items.map((deployment) => deployment.metadata?.name).filter(Boolean), 41 | }); 42 | } catch (err) { 43 | console.error('Error fetching Kubernetes components:', err); 44 | return NextResponse.json({ error: 'Error fetching Kubernetes components' }); 45 | } 46 | } 47 | 48 | const matchLabels = (podLabels, serviceSelector) => { 49 | if (!podLabels || !serviceSelector) return false; 50 | return Object.keys(serviceSelector).every((key) => serviceSelector[key] === podLabels[key]); 51 | }; 52 | -------------------------------------------------------------------------------- /nextui/app/api/v1/docker/containers/[id]/start/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | 3 | export async function GET(request: Request, { params }: { params: { id: string } }) { 4 | // Dynamic import of Dockerode 5 | const Docker = (await import('dockerode')).default; 6 | 7 | const docker = new Docker(); 8 | const container = await docker.getContainer(params.id); 9 | try { 10 | await container.start(); 11 | return NextResponse.json({ success: true }); 12 | } catch (error) { 13 | return NextResponse.json({ success: false, error: (error as Error).message }, { status: 500 }); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /nextui/app/api/v1/docker/containers/[id]/stats/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | 3 | export async function GET(request: Request, { params }: { params: { id: string } }) { 4 | // Dynamic import of Dockerode 5 | const Docker = (await import('dockerode')).default; 6 | 7 | const docker = new Docker(); 8 | const container = await docker.getContainer(params.id); 9 | console.log(container); 10 | const containerStats = await container.stats({ stream: false }); 11 | return NextResponse.json(containerStats); 12 | } 13 | -------------------------------------------------------------------------------- /nextui/app/api/v1/docker/containers/[id]/stop/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | 3 | export async function GET(request: Request, { params }: { params: { id: string } }) { 4 | // Dynamic import of Dockerode 5 | const Docker = (await import('dockerode')).default; 6 | 7 | const docker = new Docker(); 8 | const container = await docker.getContainer(params.id); 9 | try { 10 | await container.stop(); 11 | return NextResponse.json({ success: true }); 12 | } catch (error) { 13 | return NextResponse.json({ success: false, error: (error as Error).message }, { status: 500 }); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /nextui/app/api/v1/docker/containers/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | 3 | export async function GET(request: Request) { 4 | // Dynamic import of Dockerode 5 | const Docker = (await import('dockerode')).default; 6 | 7 | const docker = new Docker(); 8 | const containers = await docker.listContainers({ all: true }); 9 | return NextResponse.json(containers); 10 | } 11 | -------------------------------------------------------------------------------- /nextui/app/api/v1/usersettings/route.ts: -------------------------------------------------------------------------------- 1 | import { NextResponse } from 'next/server'; 2 | import pool from '@/db/pgModel'; 3 | 4 | export async function GET() { 5 | try { 6 | const client = await pool.connect(); 7 | const result = await client.query('SELECT firstname, lastname, email FROM usersettings LIMIT 1'); 8 | client.release(); 9 | 10 | if (result.rows.length > 0) { 11 | return NextResponse.json(result.rows[0]); 12 | } else { 13 | return NextResponse.json({ error: 'No user settings found' }, { status: 404 }); 14 | } 15 | } catch (error) { 16 | console.error('Error fetching user settings:', error); 17 | return NextResponse.json({ error: 'Internal Server Error' }, { status: 500 }); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /nextui/app/components/Header/Dashboard.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import Link from 'next/link'; 3 | 4 | const Dashboard = () => { 5 | return ( 6 |
7 |
8 |

Morpheus Dashboard

9 | 26 |
27 | 28 |
29 | {/* Docker Management Section */} 30 |
31 |

Docker Management

32 |

33 | Start, stop, and monitor Docker containers running on your server. 34 |

35 | 36 | 39 | 40 |
41 | 42 | {/* Kubernetes Metrics Section */} 43 |
44 |

Kubernetes Metrics

45 |

46 | View and analyze metrics for Kubernetes clusters and nodes. 47 |

48 | 49 | 52 | 53 |
54 |
55 | 56 | {/* Docker Containers Table */} 57 |
58 |

Docker Containers

59 |
60 | {/* Table Header */} 61 |
62 |

Container Name

63 |
64 | Actions 65 |
66 |
67 | {/* Table Rows - Dynamically generated */} 68 |
69 | {[ 70 | 'k8s-morpheus-worker3', 71 | 'k8s-morpheus-worker', 72 | 'k8s-morpheus-worker2', 73 | 'k8s-morpheus-control-plane', 74 | 'morpheus-grafana', 75 | 'morpheus-prometheus', 76 | 'morpheus-cadvisor', 77 | 'morpheus', 78 | 'morpheus-node-exporter', 79 | ].map((containerName, index) => ( 80 |
84 | {containerName} 85 |
86 | 89 | 92 |
93 |
94 | ))} 95 |
96 |
97 |
98 |
99 | ); 100 | }; 101 | 102 | export default Dashboard; 103 | -------------------------------------------------------------------------------- /nextui/app/components/Header/page.tsx: -------------------------------------------------------------------------------- 1 | export { default } from './Header'; -------------------------------------------------------------------------------- /nextui/app/components/MetricsDisplay.module.css: -------------------------------------------------------------------------------- 1 | /* MetricsDisplay.module.css */ 2 | 3 | .container { 4 | padding: 20px; 5 | } 6 | 7 | .title { 8 | text-align: center; 9 | } 10 | 11 | .timestamp { 12 | text-align: center; 13 | margin-bottom: 20px; 14 | } 15 | 16 | .grid { 17 | display: flex; 18 | flex-wrap: wrap; 19 | justify-content: center; 20 | } 21 | 22 | .card { 23 | background-color: #f9f9f9; 24 | border-radius: 6px; 25 | padding: 15px; 26 | margin: 10px; 27 | width: 200px; 28 | text-align: center; 29 | } 30 | 31 | .alert { 32 | border: 2px solid red; 33 | } 34 | 35 | .alertText { 36 | color: red; 37 | font-weight: bold; 38 | } 39 | 40 | .chartContainer { 41 | margin: 40px 0; 42 | } 43 | 44 | .tableContainer { 45 | overflow-x: auto; 46 | } 47 | 48 | .metricsTable { 49 | width: 100%; 50 | border-collapse: collapse; 51 | } 52 | 53 | .metricsTable th, 54 | .metricsTable td { 55 | border: 1px solid #ddd; 56 | padding: 8px; 57 | } 58 | 59 | .metricsTable th { 60 | background-color: #f2f2f2; 61 | text-align: left; 62 | } 63 | .controls { 64 | display: flex; 65 | align-items: center; 66 | justify-content: center; 67 | margin-bottom: 20px; 68 | } 69 | 70 | .controls>* { 71 | margin: 0 10px; 72 | } -------------------------------------------------------------------------------- /nextui/app/components/sideBar/page.tsx: -------------------------------------------------------------------------------- 1 | export { default } from './sideBar'; 2 | -------------------------------------------------------------------------------- /nextui/app/dashboard/Dashboard.module.scss: -------------------------------------------------------------------------------- 1 | .topCardsContainer { 2 | display: grid; 3 | grid-template-columns: 1fr; 4 | 5 | @media screen and (min-width: 768px) { 6 | grid-template-columns: 1fr 1fr 1fr; 7 | } 8 | } 9 | 10 | .dataCard { 11 | padding: 20px; 12 | height: 0; 13 | padding-bottom: 100%; 14 | position: relative; 15 | box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2); 16 | border-radius: 8px; 17 | background-color: white; 18 | } 19 | 20 | .dataCard>* { 21 | position: absolute; 22 | top: 50%; 23 | left: 50%; 24 | transform: translate(-50%, -50%); 25 | } -------------------------------------------------------------------------------- /nextui/app/dashboard/page.tsx: -------------------------------------------------------------------------------- 1 | export { default } from '../docker/containers/Dashboard'; 2 | -------------------------------------------------------------------------------- /nextui/app/dashboard/settings/actions.ts: -------------------------------------------------------------------------------- 1 | 'use server'; 2 | 3 | import { revalidatePath } from 'next/cache'; 4 | import pool from '@/db/pgModel'; 5 | 6 | export async function updateUserSettings(prevState: any, formData: FormData) { 7 | const firstname = formData.get('firstname') as string; 8 | const lastname = formData.get('lastname') as string; 9 | const email = formData.get('email') as string; 10 | 11 | try { 12 | const client = await pool.connect(); 13 | const result = await client.query('UPDATE usersettings SET firstname = $1, lastname = $2 WHERE email = $3', [ 14 | firstname, 15 | lastname, 16 | email, 17 | ]); 18 | client.release(); 19 | 20 | revalidatePath('/dashboard/settings'); 21 | 22 | if (result.rowCount > 0) { 23 | return { message: 'Settings updated successfully' }; 24 | } else { 25 | return { message: 'Failed to update settings' }; 26 | } 27 | } catch (error) { 28 | console.error('Error updating user settings:', error); 29 | return { message: 'Failed to update settings' }; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /nextui/app/dashboard/settings/page.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import React, { useEffect, useState } from 'react'; 4 | import { CssBaseline, Container, Grid, Card, CardContent, Typography, TextField, Button } from '@mui/material'; 5 | import { useFormState } from 'react-dom'; 6 | import { updateUserSettings } from './actions'; 7 | 8 | export default function Settings() { 9 | const [userData, setUserData] = useState({ firstname: '', lastname: '', email: '' }); 10 | const [state, formAction] = useFormState(updateUserSettings, null); 11 | 12 | useEffect(() => { 13 | async function fetchUserData() { 14 | const response = await fetch('/api/v1/usersettings'); 15 | const data = await response.json(); 16 | setUserData(data); 17 | } 18 | fetchUserData(); 19 | }, []); 20 | 21 | return ( 22 | 23 | 24 | 25 | Settings 26 | 27 | 28 | 29 | 30 |
31 | 32 | 33 | 34 | Profile Settings 35 | 36 | 45 | 54 | 65 | 68 | {state &&

{state.message}

} 69 |
70 |
71 |
72 |
73 | {/* ... (rest of the code remains unchanged) ... */} 74 |
75 |
76 | ); 77 | } 78 | -------------------------------------------------------------------------------- /nextui/app/docker/containers/ContainerActions.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import { Button, Stack } from '@mui/material'; 4 | import { useState } from 'react'; 5 | 6 | interface ContainerActionsProps { 7 | containerId: string; 8 | state: string; 9 | } 10 | 11 | export default function ContainerActions({ containerId, state }: ContainerActionsProps) { 12 | const [containerState, setContainerState] = useState(state); 13 | 14 | const handleStart = async () => { 15 | await fetch(`/api/v1/docker/containers/${containerId}/start`, { method: 'GET' }); 16 | setContainerState('running'); 17 | }; 18 | 19 | const handleStop = async () => { 20 | await fetch(`/api/v1/docker/containers/${containerId}/stop`, { method: 'GET' }); 21 | setContainerState('exited'); 22 | }; 23 | 24 | return ( 25 | 26 | 29 | 32 | 33 | ); 34 | } 35 | -------------------------------------------------------------------------------- /nextui/app/docker/containers/Dashboard.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Box, Typography, Container, Grid } from '@mui/material'; 3 | 4 | const Dashboard: React.FC = () => { 5 | return ( 6 | 20 | 24 | Docker Dashboard 📊 25 | 26 | 27 | 32 | 33 | 46 | 47 | CPU Usage per Container 🖥️ 48 | 49 | 55 | 56 | 57 | 58 | 71 | 72 | Network Traffic 🌐 73 | 74 | 80 | 81 | 82 | 83 | 96 | 97 | Disk I/O 💾 98 | 99 | 105 | 106 | 107 | 108 | 121 | 122 | Used Disk Space 📦 123 | 124 | 130 | 131 | 132 | 133 | 134 | ); 135 | }; 136 | 137 | export default Dashboard; 138 | -------------------------------------------------------------------------------- /nextui/app/docker/containers/dashboard.css: -------------------------------------------------------------------------------- 1 | /* *, 2 | *::before, 3 | *::after { 4 | box-sizing: border-box; 5 | margin: 0; 6 | padding: 0; 7 | font-family: Arial, sans-serif; 8 | } 9 | 10 | .dashboard-full-container { 11 | width: 100vw; 12 | height: 100vh; 13 | display: flex; 14 | flex-direction: column; 15 | justify-content: flex-start; 16 | align-items: center; 17 | background-color: #1a202c; 18 | overflow: auto; 19 | padding: 16px; 20 | } 21 | 22 | .header { 23 | display: flex; 24 | justify-content: space-between; 25 | align-items: center; 26 | background-color: #2b6cb0; 27 | padding: 16px; 28 | border-radius: 8px; 29 | width: 100%; 30 | max-width: 1200px; 31 | margin-bottom: 32px; 32 | } 33 | 34 | .dashboard-title { 35 | font-size: 2.5rem; 36 | font-weight: bold; 37 | text-align: center; 38 | color: white; 39 | margin-bottom: 24px; 40 | } 41 | 42 | .dashboard-grid { 43 | display: grid; 44 | grid-template-columns: 1fr; 45 | gap: 16px; 46 | width: 100%; 47 | max-width: 1200px; 48 | } 49 | 50 | @media (min-width: 768px) { 51 | .dashboard-grid { 52 | grid-template-columns: repeat(2, 1fr); 53 | } 54 | } 55 | 56 | .dashboard-card { 57 | background-color: #2d3748; 58 | border-radius: 8px; 59 | padding: 24px; 60 | box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); 61 | transition: transform 0.3s ease; 62 | } 63 | 64 | .dashboard-card:hover { 65 | transform: scale(1.05); 66 | } 67 | 68 | .border-blue { 69 | border: 2px solid #3182ce; 70 | } 71 | 72 | .border-green { 73 | border: 2px solid #38a169; 74 | } 75 | 76 | .border-yellow { 77 | border: 2px solid #ecc94b; 78 | } 79 | 80 | .border-red { 81 | border: 2px solid #e53e3e; 82 | } 83 | 84 | .card-title { 85 | color: white; 86 | font-size: 1.25rem; 87 | margin-bottom: 16px; 88 | } 89 | 90 | .iframe { 91 | border-radius: 8px; 92 | border: none; 93 | } 94 | 95 | .footer { 96 | text-align: center; 97 | color: #a0aec0; 98 | margin-top: 32px; 99 | width: 100%; 100 | max-width: 1200px; 101 | } */ 102 | -------------------------------------------------------------------------------- /nextui/app/docker/containers/page.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { 3 | Container, 4 | List, 5 | ListItem, 6 | ListItemText, 7 | Typography, 8 | Paper, 9 | Box, 10 | } from '@mui/material'; 11 | import ContainerActions from './ContainerActions'; 12 | 13 | async function getContainers() { 14 | const Docker = (await import('dockerode')).default; 15 | const docker = new Docker(); 16 | const containers = await docker.listContainers({ all: true }); 17 | return containers; 18 | } 19 | 20 | export default async function ContainersPage() { 21 | const containers = await getContainers(); 22 | 23 | return ( 24 | 30 | 31 | 44 | Docker Containers 45 | 46 | 47 | 48 | {containers.map((container) => ( 49 | 59 | 65 | 69 | 70 | ))} 71 | 72 | 73 | 74 | 75 | ); 76 | } 77 | -------------------------------------------------------------------------------- /nextui/app/docker/login.tsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const page = () => { 4 | return ( 5 |

Dashboard login

6 | ) 7 | } 8 | 9 | export default page; -------------------------------------------------------------------------------- /nextui/app/docker/page.tsx: -------------------------------------------------------------------------------- 1 | export {default} from './login' -------------------------------------------------------------------------------- /nextui/app/globals.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | 5 | :root { 6 | --foreground-rgb: 0, 0, 0; 7 | --background-start-rgb: 214, 219, 220; 8 | --background-end-rgb: 255, 255, 255; 9 | } 10 | 11 | /* @media (prefers-color-scheme: dark) { 12 | :root { 13 | --foreground-rgb: 255, 255, 255; 14 | --background-start-rgb: 0, 0, 0; 15 | --background-end-rgb: 0, 0, 0; 16 | } 17 | } */ 18 | 19 | body { 20 | color: rgb(var(--foreground-rgb)); 21 | background: linear-gradient( 22 | to bottom, 23 | transparent, 24 | rgb(var(--background-end-rgb)) 25 | ) 26 | rgb(var(--background-start-rgb)); 27 | } 28 | 29 | @layer utilities { 30 | .text-balance { 31 | text-wrap: balance; 32 | } 33 | } 34 | 35 | html, 36 | body { 37 | height: 100%; 38 | margin: 0; 39 | padding: 0; 40 | } 41 | 42 | body { 43 | display: flex; 44 | flex-direction: column; 45 | min-height: 100vh; 46 | } 47 | 48 | main { 49 | flex: 1; 50 | } 51 | -------------------------------------------------------------------------------- /nextui/app/kubernetes/Dashboard.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import React, { useState, useEffect } from 'react'; 4 | import Graph from 'react-graph-vis'; 5 | import nodeImage from '../../public/node-128.png'; 6 | import podImage from '../../public/pod-128.png'; 7 | import serviceImage from '../../public/svc-128.png'; 8 | import deploymentImage from '../../public/deploy-128.png'; 9 | import './clusterView.css'; 10 | 11 | const Dashboard = () => { 12 | const [graphData, setGraphData] = useState({ nodes: [], edges: [] }); 13 | const [loading, setLoading] = useState(true); 14 | const [clusterData, setclusterData] = useState({}); 15 | 16 | useEffect(() => { 17 | fetch('/api/v1/clusterview') 18 | .then((response) => response.json()) 19 | .then((data) => { 20 | const { nodes, edges } = processClusterData(data); 21 | setclusterData(data); 22 | 23 | setGraphData({ nodes, edges }); 24 | setLoading(false); 25 | }) 26 | .catch((error) => { 27 | console.error('Error fetching cluster data:', error); 28 | setLoading(false); 29 | }); 30 | }, []); 31 | 32 | const processClusterData = (clusterData) => { 33 | const nodes = []; 34 | const edges = []; 35 | 36 | // Create nodes for each Kubernetes Node 37 | clusterData.nodes.forEach((node, index) => { 38 | nodes.push({ 39 | id: `node-${index}`, 40 | label: `${node}`, // label would be the same thing 41 | title: `Node: ${node}`, // label would be the same thing 42 | shape: 'image', 43 | image: nodeImage.src, 44 | size: 40, 45 | }); 46 | }); 47 | 48 | // Create nodes for each Pod 49 | clusterData.pods.forEach((pod, index) => { 50 | nodes.push({ 51 | id: `pod-${index}`, 52 | label: `${pod.name}`, 53 | title: `Pod: ${pod.name}`, 54 | shape: 'image', 55 | image: podImage.src, 56 | }); 57 | 58 | // Create edges from Node to Pod 59 | const nodeIndex = clusterData.nodes.indexOf(pod.nodeName); 60 | console.log(nodeIndex); 61 | if (nodeIndex !== -1) { 62 | edges.push({ 63 | from: `node-${nodeIndex}`, 64 | to: `pod-${index}`, 65 | length: 200, 66 | arrows: 'to', 67 | }); 68 | } 69 | }); 70 | 71 | // Map Services to graph nodes 72 | clusterData.services.forEach((service, index) => { 73 | nodes.push({ 74 | id: `service-${index}`, 75 | label: `${service}`, 76 | title: `Service: ${service}`, 77 | shape: 'image', 78 | image: serviceImage.src, 79 | }); 80 | 81 | if (clusterData.serviceToPods[service]) { 82 | // Create edges from Service to Pod 83 | clusterData.serviceToPods[service].forEach((podName) => { 84 | const podIndex = clusterData.pods.findIndex( 85 | (pod) => pod.name === podName 86 | ); 87 | if (podIndex !== -1) { 88 | edges.push({ 89 | from: `service-${index}`, 90 | to: `pod-${podIndex}`, 91 | length: 160, 92 | arrows: 'to', 93 | }); 94 | } 95 | }); 96 | } 97 | }); 98 | 99 | // Create nodes for each Deployment 100 | clusterData.deployments.forEach((deployment, index) => { 101 | nodes.push({ 102 | id: `deployment-${index}`, 103 | label: `${deployment}`, 104 | title: `Deployment: ${deployment}`, 105 | shape: 'image', 106 | image: deploymentImage.src, 107 | }); 108 | 109 | // Create edges from Deployment to Pod (based on naming convention or //label matching) 110 | clusterData.pods.forEach((pod, podIndex) => { 111 | if (pod.name.includes(deployment)) { 112 | edges.push({ 113 | from: `deployment-${index}`, 114 | to: `pod-${podIndex}`, 115 | length: 150, 116 | arrows: 'to', 117 | }); 118 | } 119 | }); 120 | }); 121 | return { nodes, edges }; 122 | }; 123 | 124 | const graphOptions = { 125 | layout: { 126 | hierarchical: false, // try switching to true 127 | }, 128 | edges: { 129 | color: '#000000', 130 | }, 131 | height: '1400px', 132 | width: '2400px', 133 | interaction: { 134 | hover: true, 135 | // tooltipDelay: 250, 136 | }, 137 | }; 138 | 139 | const events = { 140 | select: function (event) { 141 | var { nodes, edges } = event; 142 | }, 143 | }; 144 | 145 | if (loading) { 146 | return
Loading...
; 147 | } 148 | return ( 149 |
150 | 151 |
152 | ); 153 | }; 154 | 155 | export default Dashboard; 156 | -------------------------------------------------------------------------------- /nextui/app/kubernetes/aichat-api.tsx: -------------------------------------------------------------------------------- 1 | import React, { useState } from 'react'; 2 | import { 3 | Box, 4 | Typography, 5 | FormControl, 6 | InputLabel, 7 | Select, 8 | MenuItem, 9 | TextField, 10 | Button, 11 | } from '@mui/material'; 12 | import AIChat from './aichat-format'; 13 | 14 | const AIChatApi: React.FC = () => { 15 | const [messages, setMessages] = useState<{ role: string; content: string }[]>( 16 | [] 17 | ); 18 | const [model, setModel] = useState('gpt-4o'); 19 | const [input, setInput] = useState(''); 20 | const [isLoading, setIsLoading] = useState(false); 21 | 22 | const handleSubmit = async (event: React.FormEvent) => { 23 | event.preventDefault(); 24 | if (!input.trim() || isLoading) return; 25 | 26 | const userMessage = { role: 'user', content: input }; 27 | setMessages([...messages, userMessage]); 28 | setInput(''); 29 | setIsLoading(true); 30 | 31 | try { 32 | const response = await fetch('/api/AIChat', { 33 | method: 'POST', 34 | headers: { 'Content-Type': 'application/json' }, 35 | body: JSON.stringify({ prompt: input, model: model }), 36 | }); 37 | 38 | if (!response.ok) { 39 | throw new Error('Failed to get response from AI'); 40 | } 41 | 42 | const data = await response.json(); 43 | const assistantMessage = { role: 'assistant', content: data.result }; 44 | setMessages((prevMessages) => [...prevMessages, assistantMessage]); 45 | } catch (error) { 46 | console.error('Error:', error); 47 | const errorMessage = { 48 | role: 'assistant', 49 | content: 'Sorry, there was an error processing your request.', 50 | }; 51 | setMessages((prevMessages) => [...prevMessages, errorMessage]); 52 | } finally { 53 | setIsLoading(false); 54 | } 55 | }; 56 | 57 | return ( 58 | 66 | {/* 67 | AI Analysis 68 | */} 69 | 70 | 71 | OpenAI Model 72 | 82 | 83 |
87 | setInput(e.target.value)} 91 | placeholder='Ask a question...' 92 | variant='outlined' 93 | size='small' 94 | sx={{ 95 | mr: 1, 96 | '& .MuiInputBase-root': { 97 | height: '100%', 98 | alignItems: 'flex-start', 99 | }, 100 | '& .MuiInputBase-input': { 101 | height: '100%', 102 | overflow: 'auto', 103 | '&::-webkit-scrollbar': { 104 | width: '8px', 105 | }, 106 | '&::-webkit-scrollbar-thumb': { 107 | backgroundColor: 'rgba(0,0,0,.2)', 108 | borderRadius: '4px', 109 | }, 110 | }, 111 | }} 112 | disabled={isLoading} 113 | multiline 114 | maxRows={1} 115 | /> 116 | 126 | 127 | 128 |
129 | ); 130 | }; 131 | 132 | export default AIChatApi; 133 | -------------------------------------------------------------------------------- /nextui/app/kubernetes/aichat-format.tsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Box, Typography, Paper } from '@mui/material'; 3 | import ReactMarkdown from 'react-markdown'; 4 | 5 | interface AIChatProps { 6 | messages: { role: string; content: string }[]; 7 | } 8 | 9 | const AIChat: React.FC = ({ messages }) => { 10 | return ( 11 | 23 | 33 | {messages.length === 0 ? ( 34 | 42 | 46 | Your results will appear here. 47 | 48 | 49 | ) : ( 50 | messages.map((message, index) => ( 51 | 52 | 53 | {message.role === 'user' ? 'You' : 'AI'}: 54 | 55 | {message.content} 56 | 57 | )) 58 | )} 59 | 60 | 61 | ); 62 | }; 63 | 64 | export default AIChat; 65 | -------------------------------------------------------------------------------- /nextui/app/kubernetes/clusterView.css: -------------------------------------------------------------------------------- 1 | .cluster-container { 2 | display: flex; 3 | /* width: 100vw; 4 | height: 96vh; */ 5 | font-family: Verdana, Geneva, Tahoma, sans-serif; 6 | /* gap: 20px; */ 7 | } 8 | 9 | .clusterDisplay { 10 | display: flex; 11 | flex-direction: column; 12 | background-color: #fafaf8; 13 | /* height: 96vh; 14 | width: 55vw; */ 15 | margin-top: 10px; 16 | border: 1px; 17 | border-radius: 20px; 18 | margin-left: 20px; 19 | } 20 | -------------------------------------------------------------------------------- /nextui/app/kubernetes/kubernetes.module.scss: -------------------------------------------------------------------------------- 1 | .topCardsContainer { 2 | display: grid; 3 | grid-template-columns: 1fr; 4 | 5 | @media screen and (min-width: 768px) { 6 | grid-template-columns: 1fr 1fr 1fr; 7 | } 8 | } 9 | 10 | .dataCard { 11 | padding: 20px; 12 | height: 0; 13 | padding-bottom: 100%; 14 | position: relative; 15 | box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2); 16 | border-radius: 8px; 17 | background-color: white; 18 | } 19 | 20 | .dataCard > * { 21 | position: absolute; 22 | top: 50%; 23 | left: 50%; 24 | transform: translate(-50%, -50%); 25 | } 26 | -------------------------------------------------------------------------------- /nextui/app/kubernetes/page.tsx: -------------------------------------------------------------------------------- 1 | // export { default } from './Dashboard'; 2 | export { default } from './DashboardAichat'; 3 | -------------------------------------------------------------------------------- /nextui/app/kubernetes/react-graph-vis.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'react-graph-vis' { 2 | import { Network, NetworkEvents, Options, Node, Edge, DataSet, Data } from 'vis'; 3 | import { Component } from 'react'; 4 | 5 | export { Network, NetworkEvents, Options, Node, Edge, DataSet, Data } from 'vis'; 6 | 7 | export type GraphEvents = { 8 | [event in NetworkEvents]?: (params?: any) => void; 9 | }; 10 | 11 | export interface NetworkGraphProps { 12 | graph: Data; 13 | options?: Options; 14 | events?: GraphEvents; 15 | getNetwork?: (network: Network) => void; 16 | identifier?: string; 17 | style?: React.CSSProperties; 18 | getNodes?: (nodes: DataSet) => void; 19 | getEdges?: (edges: DataSet) => void; 20 | } 21 | 22 | export interface NetworkGraphState { 23 | identifier: string; 24 | } 25 | 26 | export default class NetworkGraph extends Component { 27 | render(); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /nextui/app/layout.tsx: -------------------------------------------------------------------------------- 1 | import * as React from 'react'; 2 | import { Suspense } from 'react'; 3 | import type { Metadata } from 'next'; 4 | import { Inter } from 'next/font/google'; 5 | import dynamic from 'next/dynamic'; 6 | 7 | import './globals.css'; 8 | 9 | const inter = Inter({ subsets: ['latin'] }); 10 | 11 | const Header = dynamic(() => import('./components/Header/Header'), { 12 | loading: () =>
, // Adjust the height as needed 13 | ssr: false, 14 | }); 15 | 16 | // const Header = React.lazy(() => import('./components/Header/Header')); 17 | 18 | export const metadata: Metadata = { 19 | title: 'Morpheus', 20 | description: 21 | 'A Docker, kubernetes visualizer and dashboard tool with AI integration', 22 | icons: { 23 | icon: [{ url: '/morpheus-logo.png', sizes: '64x64', type: 'image/png' }], 24 | }, 25 | }; 26 | 27 | export default function RootLayout({ 28 | children, 29 | }: Readonly<{ 30 | children: React.ReactNode; 31 | }>) { 32 | return ( 33 | 34 | 35 |
36 | {/* Use Suspense to handle the lazy loading */} 37 | {/* Loading...
}> */} 38 |
39 | {/* */} 40 |
{children}
41 |
42 | 43 | 44 | ); 45 | } 46 | -------------------------------------------------------------------------------- /nextui/app/metrics/Dashboard.tsx: -------------------------------------------------------------------------------- 1 | 'use client'; 2 | 3 | import React from 'react'; 4 | import { MetricsDisplay } from '../components/MetricsDisplay'; 5 | 6 | export default function MetricsPage() { 7 | return ( 8 |
9 |

Database Metrics

10 |
11 | 12 |
13 |
14 | ); 15 | } 16 | -------------------------------------------------------------------------------- /nextui/app/metrics/page.tsx: -------------------------------------------------------------------------------- 1 | export { default } from './Dashboard' -------------------------------------------------------------------------------- /nextui/app/navbar.module.scss: -------------------------------------------------------------------------------- 1 | .container { 2 | padding: 0; // Remove padding 3 | } 4 | 5 | .dataCard { 6 | padding: 20px; 7 | height: 0; 8 | padding-bottom: 100%; 9 | position: relative; 10 | box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2); 11 | border-radius: 8px; 12 | background-color: white; 13 | } 14 | 15 | .dataCard > * { 16 | position: absolute; 17 | top: 50%; 18 | left: 50%; 19 | transform: translate(-50%, -50%); 20 | } 21 | -------------------------------------------------------------------------------- /nextui/app/systemData/Dashboard.tsx: -------------------------------------------------------------------------------- 1 | // 'use client'; 2 | import React, { useState, useEffect } from 'react'; 3 | import { Box, Grid, Paper } from '@mui/material'; 4 | import { CssBaseline } from '@mui/material'; 5 | import styles from './systemData.module.scss'; 6 | 7 | interface IframeProps { 8 | panelId: number; 9 | // timeRange?: number; 10 | } 11 | 12 | const Dashboard: React.FC = () => { 13 | // const [timestamp, setTimestamp] = useState(Date.now()); 14 | 15 | // useEffect(() => { 16 | // const interval = setInterval(() => { 17 | // setTimestamp(Date.now()); 18 | // }, 60000); //this is the refresh interval 60000 is 60seconds etc 19 | // return () => clearInterval(interval); 20 | // }, []); 21 | 22 | const getIframeSource = ({ panelId }: IframeProps): string => { 23 | // const to = timestamp; 24 | // const from = to - timeRange * 1000; &from=${from}&to=${to} 25 | return `http://localhost:50003/d-solo/h5LcytHGz/system?orgId=1&refresh=10s&panelId=${panelId}`; 26 | }; 27 | const getIframeSource2 = ({ panelId }: IframeProps): string => { 28 | // const to = timestamp; 29 | // const from = to - timeRange * 1000; &from=${from}&to=${to} 30 | return `http://localhost:50003/d-solo/4dMaCsRZz/docker-container?orgId=1&refresh=5s&panelId=${panelId}`; 31 | }; 32 | return ( 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 |
RAM status
50 | 51 |
52 |
53 |
54 | 55 | 56 | 57 |
CPU Usage
58 | 59 |
60 |
61 | 62 | 63 |
Memory Usage
64 | 65 |
66 |
67 |
68 | 69 | 70 | 71 |
Memory Usage detailed
72 | 73 |
74 |
75 | 76 | 77 |
Network data detailed
78 | 79 |
80 |
81 |
82 | 83 | 84 | 85 |
Extended CPU data
86 | 87 |
88 |
89 | 90 | 91 |
Cached Mem data
92 | 93 |
94 |
95 |
96 | 97 | 98 | 99 |
Sent packets Data
100 | 101 |
102 |
103 | 104 | 105 |
Received Packets Data
106 | 107 |
108 |
109 |
110 |
111 |
112 | ); 113 | }; 114 | 115 | export default Dashboard; 116 | -------------------------------------------------------------------------------- /nextui/app/systemData/page.tsx: -------------------------------------------------------------------------------- 1 | export {default} from './Dashboard' -------------------------------------------------------------------------------- /nextui/app/systemData/systemData.module.scss: -------------------------------------------------------------------------------- 1 | .pageBackground { 2 | background-color: #DADFDF; 3 | color: #ffffff; 4 | min-height: 100vh; 5 | margin: 0; // Ensure no margin 6 | padding: 0; // Ensure no padding 7 | } 8 | 9 | .topCardsContainer { 10 | // margin-top: 10px; 11 | margin-bottom: 20px; // Add some bottom margin to the container 12 | } 13 | 14 | .dataCard { 15 | padding: 20px; 16 | position: relative; 17 | box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2); 18 | border-radius: 8px; 19 | background-color: #DADFDF;; 20 | } 21 | 22 | .dataCardGrid1, .dataCardGrid2, .thirdGridItems { 23 | padding: 0; 24 | box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2); 25 | border-radius: 8px; 26 | background-color: #DADFDF; 27 | overflow: hidden; 28 | display: flex; 29 | flex-direction: column; 30 | } 31 | 32 | .dataCardGrid1 iframe, .dataCardGrid2 iframe, .thirdGridItems iframe { 33 | width: 100%; 34 | height: 100%; 35 | border: none; 36 | display: block; 37 | } 38 | 39 | .dataCardGrid1 { 40 | height: 120%; 41 | margin-bottom: 20px; // Add bottom margin to create space 42 | } 43 | 44 | .dataCardGrid2 { 45 | height: 100%; 46 | margin-top: 20px; // Add top margin to create space 47 | margin-bottom: 20px; // Add bottom margin for consistency 48 | } 49 | 50 | .thirdGridItems { 51 | margin-top: 20px; 52 | margin-bottom: -10px; 53 | height: 400px; 54 | } 55 | 56 | .cardHeaderCPU, .cardHeaderMemory, .cardHeaderNetwork { 57 | color: white; 58 | font-size: 1.125rem; 59 | font-weight: 600; 60 | padding: 0.75rem; 61 | text-align: center; 62 | border-top-left-radius: 8px; 63 | border-top-right-radius: 8px; 64 | } 65 | 66 | .cardHeaderCPU { 67 | background: linear-gradient(to right, #3b82f6, #4f46e5); 68 | } 69 | 70 | .cardHeaderMemory { 71 | background: linear-gradient(to right, #f6803b, #fb7405); 72 | } 73 | 74 | .cardHeaderNetwork { 75 | background: linear-gradient(to right, #b723f1, #a905fb); 76 | } -------------------------------------------------------------------------------- /nextui/app/theme/darkTheme.ts: -------------------------------------------------------------------------------- 1 | import { ThemeOptions } from "@mui/material"; 2 | 3 | const darkTheme: ThemeOptions = { 4 | palette: { 5 | mode: 'dark', 6 | primary: { 7 | main: '#ff0000', 8 | }, 9 | secondary: { 10 | main: '#7000ff', 11 | }, 12 | } 13 | } 14 | export default darkTheme; -------------------------------------------------------------------------------- /nextui/app/theme/lightTheme.ts: -------------------------------------------------------------------------------- 1 | import { ThemeOptions } from "@mui/material"; 2 | 3 | const lightTheme: ThemeOptions = { 4 | palette: { 5 | mode: 'light', 6 | primary: { 7 | main: '#ffff00', 8 | }, 9 | secondary: { 10 | main: '#ff8400', 11 | }, 12 | } 13 | } 14 | export default lightTheme; -------------------------------------------------------------------------------- /nextui/db/pgModel.ts: -------------------------------------------------------------------------------- 1 | import pg from 'pg'; 2 | 3 | const config: pg.PoolConfig = { 4 | user: 'admin', 5 | password: 'admin', 6 | database: 'morpheus', 7 | host: 'localhost', 8 | port: 50005, 9 | }; 10 | 11 | const pool = new pg.Pool(config); 12 | 13 | export default pool; 14 | -------------------------------------------------------------------------------- /nextui/jest.config.js: -------------------------------------------------------------------------------- 1 | const nextJest = require('next/jest') 2 | 3 | const createJestConfig = nextJest({ 4 | dir: './', 5 | }) 6 | 7 | const customJestConfig = { 8 | setupFilesAfterEnv: ['/jest.setup.js'], 9 | testEnvironment: 'jest-environment-jsdom', 10 | moduleDirectories: ['node_modules', '/'], 11 | testPathIgnorePatterns: ['/.next/', '/node_modules/'], 12 | moduleNameMapper: { 13 | '^@/(.*)$': '/$1', 14 | }, 15 | } 16 | 17 | module.exports = createJestConfig(customJestConfig) 18 | -------------------------------------------------------------------------------- /nextui/jest.setup.js: -------------------------------------------------------------------------------- 1 | import '@testing-library/jest-dom' -------------------------------------------------------------------------------- /nextui/next.config.mjs: -------------------------------------------------------------------------------- 1 | /** @type {import('next').NextConfig} */ 2 | const nextConfig = { 3 | swcMinify: false, 4 | reactStrictMode: false, 5 | webpack: (config, { dev, isServer, webpack, nextRuntime }) => { 6 | if (!isServer) { 7 | config.optimization.splitChunks = { 8 | chunks: 'all', 9 | minSize: 30000, 10 | maxSize: 250000, 11 | maxInitialRequests: 10, 12 | }; 13 | } 14 | config.module.rules.push({ 15 | test: /\.node$/, 16 | use: [ 17 | { 18 | loader: 'nextjs-node-loader', 19 | }, 20 | ], 21 | }); 22 | return config; 23 | }, 24 | }; 25 | 26 | export default nextConfig; 27 | -------------------------------------------------------------------------------- /nextui/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nextui", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "dev": "next dev", 7 | "build": "next build", 8 | "start": "next start", 9 | "lint": "next lint", 10 | "test": "jest", 11 | "test:watch": "jest --watch" 12 | }, 13 | "dependencies": { 14 | "@auth0/nextjs-auth0": "^3.5.0", 15 | "@aws-sdk/client-bedrock-runtime": "^3.645.0", 16 | "@emotion/react": "^11.13.3", 17 | "@emotion/styled": "^11.13.0", 18 | "@kubernetes/client-node": "^0.21.0", 19 | "@mui/icons-material": "^5.16.7", 20 | "@mui/material": "^5.16.7", 21 | "axios": "^1.7.5", 22 | "dockerode": "^4.0.2", 23 | "framer-motion": "^11.3.30", 24 | "limiter": "^2.1.0", 25 | "next": "^14.2.6", 26 | "next-router-mock": "^0.9.13", 27 | "openai": "^4.56.0", 28 | "pg": "^8.12.0", 29 | "react": "^18", 30 | "react-dom": "^18", 31 | "react-graph-vis": "^1.0.7", 32 | "react-icons": "^5.3.0", 33 | "react-is": "^18.3.1", 34 | "react-markdown": "^9.0.1", 35 | "recharts": "^2.12.7", 36 | "sass": "^1.77.8", 37 | "sharp": "^0.33.5" 38 | }, 39 | "devDependencies": { 40 | "@testing-library/jest-dom": "^6.5.0", 41 | "@testing-library/react": "^14.0.0", 42 | "@tsconfig/recommended": "^1.0.7", 43 | "@types/d3": "^7.4.3", 44 | "@types/jest": "^29.5.12", 45 | "@types/jsonwebtoken": "^9.0.6", 46 | "@types/next": "^8.0.7", 47 | "@types/node": "^20", 48 | "@types/pg": "^8.11.6", 49 | "@types/react": "^18.3.4", 50 | "@types/react-dom": "^18", 51 | "@types/react-modal": "^3.16.3", 52 | "@types/vis": "^4.21.27", 53 | "eslint": "^8", 54 | "eslint-config-next": "14.2.5", 55 | "jest": "^29.7.0", 56 | "jest-environment-jsdom": "^29.7.0", 57 | "nextjs-node-loader": "^1.1.5", 58 | "postcss": "^8", 59 | "tailwindcss": "^3.4.1", 60 | "typescript": "5.5.4" 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /nextui/postcss.config.mjs: -------------------------------------------------------------------------------- 1 | /** @type {import('postcss-load-config').Config} */ 2 | const config = { 3 | plugins: { 4 | tailwindcss: {}, 5 | }, 6 | }; 7 | 8 | export default config; 9 | -------------------------------------------------------------------------------- /nextui/public/aws-bedrock-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/nextui/public/aws-bedrock-logo.png -------------------------------------------------------------------------------- /nextui/public/deploy-128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/nextui/public/deploy-128.png -------------------------------------------------------------------------------- /nextui/public/morpheus-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/nextui/public/morpheus-logo.png -------------------------------------------------------------------------------- /nextui/public/node-128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/nextui/public/node-128.png -------------------------------------------------------------------------------- /nextui/public/pod-128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/nextui/public/pod-128.png -------------------------------------------------------------------------------- /nextui/public/sidebarIcon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/nextui/public/sidebarIcon.png -------------------------------------------------------------------------------- /nextui/public/svc-128.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oslabs-beta/Morpheus/6cf99995a07eca80304ded831bd1aaec22731b22/nextui/public/svc-128.png -------------------------------------------------------------------------------- /nextui/tailwind.config.ts: -------------------------------------------------------------------------------- 1 | import type { Config } from 'tailwindcss'; 2 | 3 | const config: Config = { 4 | content: [ 5 | './pages/**/*.{js,ts,jsx,tsx,mdx}', 6 | './components/**/*.{js,ts,jsx,tsx,mdx}', 7 | './app/**/*.{js,ts,jsx,tsx,mdx}', 8 | './app/grafanaDashboard/**/*.{js,ts,jsx,tsx,mdx}', 9 | ], 10 | theme: { 11 | extend: { 12 | backgroundImage: { 13 | 'gradient-radial': 'radial-gradient(var(--tw-gradient-stops))', 14 | 'gradient-conic': 15 | 'conic-gradient(from 180deg at 50% 50%, var(--tw-gradient-stops))', 16 | 'gradient-animate': 17 | 'linear-gradient(270deg, #ffafbd, #ffc3a0, #2193b0, #6dd5ed, #cc2b5e, #753a88, #ee9ca7, #ffdde1)', 18 | }, 19 | animation: { 20 | 'gradient-x': 'gradient-x 15s ease infinite', 21 | }, 22 | keyframes: { 23 | 'gradient-x': { 24 | '0%, 100%': { 'background-position': '0% 50%' }, 25 | '50%': { 'background-position': '100% 50%' }, 26 | }, 27 | }, 28 | }, 29 | }, 30 | plugins: [], 31 | }; 32 | export default config; 33 | -------------------------------------------------------------------------------- /nextui/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "lib": ["dom", "dom.iterable", "esnext"], 4 | "allowJs": true, 5 | "skipLibCheck": true, 6 | "strict": false, 7 | "noEmit": true, 8 | "esModuleInterop": true, 9 | "module": "esnext", 10 | "moduleResolution": "bundler", 11 | "resolveJsonModule": true, 12 | "isolatedModules": true, 13 | "jsx": "preserve", 14 | "incremental": true, 15 | "noImplicitAny": false, 16 | "plugins": [ 17 | { 18 | "name": "next" 19 | } 20 | ], 21 | "paths": { 22 | "@/*": ["./*"] 23 | } 24 | }, 25 | "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts", "app/kubernetes/react-graph-vis.d.ts"], 26 | "exclude": ["node_modules"] 27 | } 28 | --------------------------------------------------------------------------------