├── .cargo-husky └── hooks │ ├── pre-commit │ └── pre-push ├── .dockerignore ├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml ├── grcov.yml └── workflows │ ├── cd.yml │ ├── ci.yml │ └── cicd-test.yml ├── .gitignore ├── .vscode └── launch.json ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── SECURITY.md ├── artwork ├── icon-2x.png ├── icon.jpg ├── icon.png ├── logo-2x.png ├── logo-icon.svg ├── logo.jpg ├── logo.png └── logo.svg ├── deployment ├── chocolatey │ ├── chocolateyinstall.ps1.template │ ├── kdash.nuspec.template │ └── packager.py ├── getLatest.sh ├── homebrew │ ├── kdash.rb.template │ └── packager.py └── scoop │ ├── kdash.json.template │ └── packager.py ├── k8s.yml ├── rustfmt.toml ├── screenshots ├── contexts.png ├── describe.png ├── logs.png ├── overview.png ├── ui.gif └── utilization.png ├── src ├── app │ ├── configmaps.rs │ ├── contexts.rs │ ├── cronjobs.rs │ ├── daemonsets.rs │ ├── deployments.rs │ ├── dynamic.rs │ ├── ingress.rs │ ├── jobs.rs │ ├── key_binding.rs │ ├── metrics.rs │ ├── mod.rs │ ├── models.rs │ ├── network_policies.rs │ ├── nodes.rs │ ├── ns.rs │ ├── pods.rs │ ├── pvcs.rs │ ├── pvs.rs │ ├── replicasets.rs │ ├── replication_controllers.rs │ ├── roles.rs │ ├── secrets.rs │ ├── serviceaccounts.rs │ ├── statefulsets.rs │ ├── storageclass.rs │ ├── svcs.rs │ └── utils.rs ├── banner.rs ├── cmd │ └── mod.rs ├── event │ ├── events.rs │ ├── key.rs │ └── mod.rs ├── handlers │ └── mod.rs ├── main.rs ├── network │ ├── mod.rs │ └── stream.rs └── ui │ ├── help.rs │ ├── mod.rs │ ├── overview.rs │ ├── resource_tabs.rs │ └── utils.rs └── test_data ├── clusterrole_binding.yaml ├── clusterroles.yaml ├── cm.yaml ├── cronjobs.yaml ├── daemonsets.yaml ├── deployments.yaml ├── dynamic_resource.yaml ├── ingress.yaml ├── jobs.yaml ├── network_policy.yaml ├── node_metrics.yaml ├── nodes.yaml ├── ns.yaml ├── pods.yaml ├── pvcs.yaml ├── pvs.yaml ├── replicasets.yaml ├── replication_controllers.yaml ├── role_bindings.yaml ├── roles.yaml ├── secrets.yaml ├── serviceaccounts.yaml ├── stfs.yaml ├── storageclass.yaml ├── svcs.yaml └── test.yaml /.cargo-husky/hooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | echo "Running pre-commit hook:" 6 | 7 | echo "Executing: cargo fmt" 8 | cargo fmt 9 | 10 | echo "Executing: make lint" 11 | make lint 12 | -------------------------------------------------------------------------------- /.cargo-husky/hooks/pre-push: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | echo "Running pre-push hook:" 6 | 7 | echo "Executing: make lint" 8 | make lint 9 | 10 | echo "Executing: cargo test" 11 | cargo test -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .github 2 | .git 3 | .vscode 4 | /screenshots 5 | /snap 6 | /target 7 | README.md 8 | CHANGELOG.md 9 | LICENSE 10 | ui.gif -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [deepu105] -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior or a screencast 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS and OS version: [e.g. macOS 10.1 / Fedora 34 / WIndows 10] 28 | - Terminal name and version [e.g. Tilix, Yakuake, Gnome Terminal, Konsole, iTerm, Windows CMD] 29 | - Shell name and version [e.g. bash, zsh, fish, powershell] 30 | - Kubectl Client Version [e.g. 1.19] 31 | - Kubectl Server Version [e.g. 1.19] 32 | - KDash version [e.g. 0.0.8] 33 | 34 | **Additional context** 35 | Add any other context about the problem here. 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | -------------------------------------------------------------------------------- /.github/grcov.yml: -------------------------------------------------------------------------------- 1 | branch: true 2 | ignore-not-existing: true 3 | llvm: true 4 | filter: covered 5 | ignore: 6 | - "/*" 7 | - "C:/*" 8 | - "../*" 9 | keep-only: 10 | - "./src/*" 11 | excl-line: "#\\[derive\\(" 12 | excl-br-line: "#\\[derive\\(" 13 | excl-start: "mod tests \\{" 14 | excl-br-start: "mod tests \\{" 15 | parallel: true -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | push: 4 | branches: [main] 5 | 6 | name: Continuous Integration 7 | 8 | jobs: 9 | # Workaround for making Github Actions skip based on commit message `[skip ci]` 10 | # Source https://gist.github.com/ybiquitous/c80f15c18319c63cae8447a3be341267 11 | prepare: 12 | runs-on: ubuntu-latest 13 | if: | 14 | !contains(format('{0} {1} {2}', github.event.head_commit.message, github.event.pull_request.title, github.event.pull_request.body), '[skip ci]') 15 | steps: 16 | - run: | 17 | cat <<'MESSAGE' 18 | github.event_name: ${{ toJson(github.event_name) }} 19 | github.event: 20 | ${{ toJson(github.event) }} 21 | MESSAGE 22 | check: 23 | name: Check 24 | runs-on: ubuntu-latest 25 | needs: prepare 26 | steps: 27 | - uses: actions/checkout@v4 28 | - uses: actions/cache@v3 29 | name: Cache Cargo registry 30 | with: 31 | path: ~/.cargo/registry 32 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('Cargo.lock') }} 33 | - uses: dtolnay/rust-toolchain@stable 34 | name: Set Rust toolchain 35 | - run: cargo check --all --all-targets --workspace 36 | 37 | test: 38 | name: Test Suite 39 | runs-on: ${{ matrix.os }} 40 | strategy: 41 | fail-fast: false 42 | matrix: 43 | os: 44 | - "macOS-latest" 45 | - "windows-latest" 46 | - "ubuntu-latest" 47 | rust: [stable, nightly] 48 | needs: prepare 49 | steps: 50 | - uses: actions/checkout@v4 51 | - uses: actions/cache@v3 52 | name: Cache Cargo registry 53 | with: 54 | path: ~/.cargo/registry 55 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('Cargo.lock') }} 56 | - uses: dtolnay/rust-toolchain@master 57 | name: Set Rust toolchain 58 | with: 59 | toolchain: ${{ matrix.rust }} 60 | components: rustc, rust-std, cargo, llvm-tools, llvm-tools-preview 61 | 62 | - run: cargo clean 63 | if: matrix.os == 'ubuntu-latest' && matrix.rust == 'nightly' 64 | - run: mkdir -p ./target/debug/coverage 65 | if: matrix.os == 'ubuntu-latest' && matrix.rust == 'nightly' 66 | - run: cargo install grcov 67 | if: matrix.os == 'ubuntu-latest' && matrix.rust == 'nightly' 68 | - name: cargo test nightly 69 | run: cargo test --all-features 70 | if: matrix.rust == 'nightly' 71 | env: 72 | CARGO_INCREMENTAL: "0" 73 | LLVM_PROFILE_FILE: 'target/debug/coverage/dcaf-%p-%m.profraw' 74 | RUSTFLAGS: '-Cinstrument-coverage -Cpanic=abort -Zpanic_abort_tests' 75 | RUSTDOCFLAGS: '-C instrument-coverage -Cpanic=abort -Zpanic_abort_tests -Z unstable-options --persist-doctests target/debug/' 76 | - run: zip ./target/debug/coverage/files.zip ./target/debug/coverage/dcaf-*.profraw 77 | if: matrix.os == 'ubuntu-latest' && matrix.rust == 'nightly' 78 | 79 | - name: cargo test stable 80 | run: cargo test --all --all-features --all-targets --workspace 81 | if: matrix.rust == 'stable' 82 | - id: coverage 83 | if: matrix.os == 'ubuntu-latest' && matrix.rust == 'nightly' 84 | run: | 85 | grcov ./target/debug/coverage/files.zip -s . --binary-path ./target/debug/ --service-job-id ${{ github.job }} --service-name 'Continuous Integration' --commit-sha ${{ github.sha }} -o ./target/debug/coverage/ --branch --ignore-not-existing --llvm --filter covered --ignore '/*' --ignore 'C:/*' --ignore '../*' -t coveralls 86 | - name: Push grcov results to Coveralls 87 | if: matrix.os == 'ubuntu-latest' && matrix.rust == 'nightly' 88 | uses: coverallsapp/github-action@v2 89 | with: 90 | github-token: ${{ secrets.GITHUB_TOKEN }} 91 | file: ./target/debug/coverage/coveralls 92 | 93 | fmt: 94 | name: Rustfmt 95 | runs-on: ubuntu-latest 96 | needs: prepare 97 | steps: 98 | - uses: actions/checkout@v4 99 | - uses: actions/cache@v3 100 | name: Cache Cargo registry 101 | with: 102 | path: ~/.cargo/registry 103 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('Cargo.lock') }} 104 | - uses: dtolnay/rust-toolchain@stable 105 | name: Set Rust toolchain 106 | with: 107 | components: rustfmt 108 | - run: cargo fmt --all -- --check 109 | 110 | clippy: 111 | name: Clippy 112 | runs-on: ubuntu-latest 113 | needs: prepare 114 | steps: 115 | - uses: actions/checkout@v4 116 | - uses: actions/cache@v3 117 | name: Cache Cargo registry 118 | with: 119 | path: ~/.cargo/registry 120 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('Cargo.lock') }} 121 | - uses: dtolnay/rust-toolchain@stable 122 | name: Set Rust toolchain 123 | with: 124 | components: clippy 125 | - run: cargo clippy --all --all-features --all-targets --workspace -- -D warnings 126 | env: 127 | CARGO_INCREMENTAL: "0" 128 | -------------------------------------------------------------------------------- /.github/workflows/cicd-test.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [cdtest] 4 | 5 | name: CICD test 6 | 7 | jobs: 8 | 9 | build-release-artifacts: 10 | name: build-release 11 | runs-on: ${{ matrix.job.os }} 12 | env: 13 | RUST_BACKTRACE: 1 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | # prettier-ignore 18 | job: 19 | - { name: "macOS-arm64", os: "macOS-latest", target: "aarch64-apple-darwin", artifact_suffix: "macos-arm64", use-cross: true } 20 | - { name: "macOS-amd64", os: "macOS-latest", target: "x86_64-apple-darwin", artifact_suffix: "macos" } 21 | - { name: "windows-amd64", os: "windows-latest", target: "x86_64-pc-windows-msvc", artifact_suffix: "windows" } 22 | - { name: "windows-aarch64", os: "windows-latest", target: "aarch64-pc-windows-msvc", artifact_suffix: "windows-aarch64", use-cross: true } 23 | - { name: "linux-gnu", os: "ubuntu-latest", target: "x86_64-unknown-linux-gnu", artifact_suffix: "linux" } 24 | - { name: "linux-musl", os: "ubuntu-latest", target: "x86_64-unknown-linux-musl", artifact_suffix: "linux-musl", use-cross: true, } 25 | - { name: "linux-aarch64-gnu", os: "ubuntu-latest", target: "aarch64-unknown-linux-gnu", artifact_suffix: "aarch64-gnu", use-cross: true, test-bin: "--bin kdash" } 26 | - { name: "linux-aarch64-musl", os: "ubuntu-latest", target: "aarch64-unknown-linux-musl", artifact_suffix: "aarch64-musl", use-cross: true, test-bin: "--bin kdash" } 27 | - { name: "linux-arm-gnu", os: "ubuntu-latest", target: "arm-unknown-linux-gnueabi", artifact_suffix: "armv6-gnu", use-cross: true, test-bin: "--bin kdash" } 28 | - { name: "linux-arm-musl", os: "ubuntu-latest", target: "arm-unknown-linux-musleabihf", artifact_suffix: "armv6-musl", use-cross: true, test-bin: "--bin kdash" } 29 | - { name: "linux-armv7-gnu", os: "ubuntu-latest", target: "armv7-unknown-linux-gnueabihf", artifact_suffix: "armv7-gnu", use-cross: true, test-bin: "--bin kdash" } 30 | - { name: "linux-armv7-musl", os: "ubuntu-latest", target: "armv7-unknown-linux-musleabihf", artifact_suffix: "armv7-musl", use-cross: true, test-bin: "--bin kdash" } 31 | rust: [stable] 32 | 33 | steps: 34 | - name: Checkout repository 35 | uses: actions/checkout@v4 36 | with: 37 | fetch-depth: 1 38 | - uses: actions/cache@v3 39 | name: Cache Cargo registry 40 | with: 41 | path: ~/.cargo/registry 42 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('Cargo.lock') }} 43 | - uses: actions/cache@v3 44 | if: startsWith(matrix.job.name, 'linux-') 45 | with: 46 | path: ~/.cargo/bin 47 | key: ${{ runner.os }}-cargo-bin-${{ hashFiles('.github/workflows/cd.yml') }} 48 | 49 | - uses: dtolnay/rust-toolchain@stable 50 | name: Set Rust toolchain 51 | with: 52 | targets: ${{ matrix.job.target }} 53 | - uses: taiki-e/setup-cross-toolchain-action@v1 54 | with: 55 | # NB: sets CARGO_BUILD_TARGET evar - do not need --target flag in build 56 | target: ${{ matrix.job.target }} 57 | - uses: taiki-e/install-action@cross 58 | if: ${{ matrix.job.use-cross }} 59 | 60 | - name: Installing needed Ubuntu dependencies 61 | if: matrix.job.os == 'ubuntu-latest' 62 | shell: bash 63 | run: | 64 | sudo apt-get -y update 65 | case ${{ matrix.job.target }} in 66 | arm*-linux-*) sudo apt-get -y install gcc-arm-linux-gnueabihf ;; 67 | aarch64-*-linux-*) sudo apt-get -y install gcc-aarch64-linux-gnu ;; 68 | esac 69 | 70 | - name: Build 71 | run: cargo build --release --verbose --target=${{ matrix.job.target }} --locked 72 | - name: Verify file 73 | shell: bash 74 | run: | 75 | file target/${{ matrix.job.target }}/release/kdash 76 | - name: Test 77 | if: matrix.job.target != 'aarch64-apple-darwin' && matrix.job.target != 'aarch64-pc-windows-msvc' 78 | run: cargo test --release --verbose --target=${{ matrix.job.target }} ${{ matrix.job.test-bin }} 79 | 80 | - name: Packaging final binary (Windows) 81 | if: matrix.job.os == 'windows-latest' 82 | shell: bash 83 | run: | 84 | cd target/${{ matrix.job.target }}/release 85 | BINARY_NAME=kdash.exe 86 | if [ "${{ matrix.job.target }}" != "aarch64-pc-windows-msvc" ]; then 87 | # strip the binary 88 | strip $BINARY_NAME 89 | fi 90 | RELEASE_NAME=kdash-${{ matrix.job.artifact_suffix }} 91 | tar czvf $RELEASE_NAME.tar.gz $BINARY_NAME 92 | # create sha checksum files 93 | certutil -hashfile $RELEASE_NAME.tar.gz sha256 | grep -E [A-Fa-f0-9]{64} > $RELEASE_NAME.sha256 94 | echo "RELEASE_NAME=$RELEASE_NAME" >> $GITHUB_ENV 95 | 96 | - name: Packaging final binary (macOS and Linux) 97 | if: matrix.job.os != 'windows-latest' 98 | shell: bash 99 | run: | 100 | # set the right strip executable 101 | STRIP="strip"; 102 | case ${{ matrix.job.target }} in 103 | arm*-linux-*) STRIP="arm-linux-gnueabihf-strip" ;; 104 | aarch64-*-linux-*) STRIP="aarch64-linux-gnu-strip" ;; 105 | esac; 106 | cd target/${{ matrix.job.target }}/release 107 | BINARY_NAME=kdash 108 | # strip the binary 109 | "$STRIP" "$BINARY_NAME" 110 | RELEASE_NAME=kdash-${{ matrix.job.artifact_suffix }} 111 | tar czvf $RELEASE_NAME.tar.gz $BINARY_NAME 112 | # create sha checksum files 113 | shasum -a 256 $RELEASE_NAME.tar.gz > $RELEASE_NAME.sha256 114 | echo "RELEASE_NAME=$RELEASE_NAME" >> $GITHUB_ENV 115 | 116 | publish-cargo: 117 | name: Publishing to Cargo 118 | runs-on: ubuntu-latest 119 | steps: 120 | - uses: actions/checkout@v4 121 | - uses: actions/cache@v3 122 | name: Cache Cargo registry 123 | with: 124 | path: ~/.cargo/registry 125 | key: ${{ runner.os }}-cargo-registry-${{ hashFiles('Cargo.lock') }} 126 | - uses: actions/cache@v3 127 | with: 128 | path: ~/.cargo/bin 129 | key: ${{ runner.os }}-cargo-bin-${{ hashFiles('.github/workflows/cd.yml') }} 130 | - uses: dtolnay/rust-toolchain@stable 131 | - run: cargo publish --token ${{ secrets.CARGO_API_KEY }} --allow-dirty --dry-run 132 | 133 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /old 3 | .vscode/* 4 | !.vscode/launch.json 5 | .idea/* 6 | rustc-ice-* 7 | .github/workflows/ci-test.yml.bak -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Windows - Debug executable 'kdash'", 9 | "type": "lldb", 10 | "request": "launch", 11 | "program": "${workspaceFolder}\\target\\debug\\kdash.exe", 12 | "args": [], 13 | "sourceLanguages": [ "rust" ], 14 | "cwd": "${workspaceFolder}" 15 | }, 16 | { 17 | "type": "lldb", 18 | "request": "launch", 19 | "name": "Debug executable 'kdash'", 20 | "cargo": { 21 | "args": [ 22 | "build", 23 | "--bin=kdash", 24 | "--package=kdash" 25 | ], 26 | "filter": { 27 | "name": "kdash", 28 | "kind": "bin" 29 | } 30 | }, 31 | "args": [], 32 | "cwd": "${workspaceFolder}" 33 | }, 34 | { 35 | "type": "lldb", 36 | "request": "launch", 37 | "name": "Debug unit tests in executable 'kdash'", 38 | "cargo": { 39 | "args": [ 40 | "test", 41 | "--no-run", 42 | "--bin=kdash", 43 | "--package=kdash" 44 | ], 45 | "filter": { 46 | "name": "kdash", 47 | "kind": "bin" 48 | } 49 | }, 50 | "args": [], 51 | "cwd": "${workspaceFolder}" 52 | } 53 | ] 54 | } -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## [0.6.2] - 2025-03-05 4 | 5 | - Dependency updates 6 | - Minor fixes 7 | 8 | ## [0.6.1] - 2024-08-27 9 | 10 | - Dependency updates 11 | 12 | ## [0.6.0] - 2024-01-25 13 | 14 | - Describe syntax highlighting 15 | - Library updates 16 | - Add debug mode 17 | - Sock5 proxy support 18 | 19 | ## [0.5.0] - 2024-01-22 20 | 21 | - Linux musl builds as default 22 | - Aarch64 build for windows 23 | - ARM v7 builds for Linux 24 | 25 | ## [0.4.9] - 2024-01-22 26 | 27 | - bug fixes 28 | - library updates 29 | 30 | ## [0.4.8] - 2024-01-18 31 | 32 | - macOS ARM64 release 33 | 34 | ## [0.4.7] - 2024-01-15 35 | 36 | - UI improvements 37 | 38 | ## [0.4.6] - 2024-01-08 39 | 40 | - Fix keypress issues on windows (https://github.com/kdash-rs/kdash/issues/390) 41 | - Upgrade libraries 42 | 43 | ## [0.4.5] - 2024-01-05 44 | 45 | - YAML syntax highlighting 46 | - Upgrade libraries 47 | 48 | ## [0.4.4] - 2023-10-02 49 | 50 | - Patch release for missing MUSL builds 51 | 52 | ## [0.4.3] - 2023-09-19 53 | 54 | - Global Filtering support (https://github.com/kdash-rs/kdash/pull/383) 55 | 56 | ## [0.4.2] - 2023-08-24 57 | 58 | - Allow HTTPS connection (https://github.com/kdash-rs/kdash/pull/372) 59 | - Upgrade libraries 60 | 61 | ## [0.4.0] - 2023-07-04 62 | 63 | - Add basic support for Custom Resource Definitions (https://github.com/kdash-rs/kdash/commit/fb4b4c4451fca5e5ad62dd93e658dd8009e8c659) 64 | - Add support for NetworkPolicy 65 | - Upgrade libraries 66 | 67 | ## [0.3.7] - 2023-06-30 68 | 69 | - Add support for Ingress (https://github.com/kdash-rs/kdash/pull/316) 70 | - Add support for PersistentVolumeClaims 71 | - Add support for PersistentVolumes 72 | - Add support for ServiceAccounts 73 | - Upgrade libraries 74 | 75 | ## [0.3.6] - 2022-12-21 76 | 77 | - Hide managedFields in YAML view by default (https://github.com/kdash-rs/kdash/pull/296) 78 | - Fail gracefully when context not found (https://github.com/kdash-rs/kdash/issues/280) 79 | - Improve cache (https://github.com/kdash-rs/kdash/issues/271) 80 | - Upgrade libraries 81 | 82 | ## [0.3.5] - 2022-08-31 83 | 84 | - Add support for RoleBindings (https://github.com/kdash-rs/kdash/pull/245) 85 | - Add support for ClusterRoleBindings (https://github.com/kdash-rs/kdash/pull/249) 86 | - Upgrade libraries 87 | 88 | ## [0.3.4] - 2022-08-18 89 | 90 | - Add support for Cluster Roles (https://github.com/kdash-rs/kdash/pull/236) 91 | - Add support for decoding secrets (https://github.com/kdash-rs/kdash/pull/242) 92 | - Upgrade libraries 93 | 94 | ## [0.3.3] - 2022-08-01 95 | 96 | - Upgrade libraries 97 | - Add sponsors 98 | - Add support for Roles (https://github.com/kdash-rs/kdash/pull/224) 99 | - Add support for Storage classes (https://github.com/kdash-rs/kdash/pull/222) 100 | - Add support for ARM and aarch64 binaries 101 | 102 | ## [0.3.1] - 2022-04-06 103 | 104 | - Upgrade libraries trying to fix cargo install issue 105 | 106 | ## [0.3.0] - 2022-02-05 107 | 108 | - UI updates (https://github.com/kdash-rs/kdash/pull/157) 109 | - Fix stack overflow error (https://github.com/kdash-rs/kdash/issues/160) 110 | - Color contrast improvements (fix https://github.com/kdash-rs/kdash/issues/162) 111 | 112 | ## [0.2.7] - 2022-01-20 113 | 114 | - Fix crashes when memory and/or cpu usages are higher than 100% 115 | - Improve cache 116 | 117 | ## [0.2.6] - 2022-01-19 118 | 119 | - Fix status color of pods not ready 120 | 121 | ## [0.2.5] - 2021-12-21 122 | 123 | - Fix help screen which was not rendered 124 | - Fix status color of pods not ready 125 | - Update dependencies 126 | 127 | ## [0.2.4] - 2021-09-27 128 | 129 | - Update dependencies 130 | - Fix crash on cargo install 131 | 132 | ## [0.2.3] - 2021-08-02 133 | 134 | - Add support for ReplicationControllers 135 | - Fix issue with table overflow crash 136 | 137 | ## [0.2.2] - 2021-07-20 138 | 139 | - Add support for page up and page down on tables and text views 140 | - Fix crash on escape 141 | 142 | ## [0.2.0] - 2021-07-12 143 | 144 | - Add support for Job resource - [#42](https://github.com/kdash-rs/kdash/pull/42), Thanks to [somayaj](https://github.com/somayaj) 145 | - Add support for CronJob resource - [#69](https://github.com/kdash-rs/kdash/pull/69), Thanks to [somayaj](https://github.com/somayaj) 146 | - Add support for DaemonSets 147 | - Add support for Secrets 148 | - Add more resources tab and menu 149 | - Show init containers in container view 150 | - Internal optimizations 151 | 152 | ## [0.1.2] - 2021-06-12 153 | 154 | - Add human friendly crash messages 155 | - Add Tab keybinding to cycle through views 156 | - Migrate to kubectl-view-allocations library 157 | 158 | ## [0.1.1] - 2021-06-04 159 | 160 | - Fix a small bug that crashes the app in certain terminal size 161 | 162 | ## [0.1.0] - 2021-05-17 163 | 164 | - Stable release 165 | - Minor bug fixes 166 | - Add vim key bindings for arrow keys 167 | - Chocolatey deployment for Windows 10 168 | 169 | ## [0.0.9] - 2021-05-10 170 | 171 | - Improved error handling and error display 172 | - Minor bug fixes and improvements 173 | 174 | ## [0.0.8] - 2021-05-04 175 | 176 | ### Added 177 | 178 | - Get YAML for all resources (pod, svc, node, statefulset, replicaset, configmap, deployment) 179 | - Describe for all remaining resources (svc, statefulset, replicaset, configmap, deployment) 180 | 181 | ### Changed 182 | 183 | - Table scrolling doesn't circle back now. This seems to be better UX when having long lists 184 | 185 | ### Fixed 186 | 187 | - Describe view spacing 188 | 189 | ## [0.0.7] - 2021-05-03 190 | 191 | ### Added 192 | 193 | - Container ports and probes 194 | 195 | ### Fixed 196 | 197 | - Library updates 198 | - Scroll improvements 199 | - More tests 200 | - Show containers for failing pods 201 | 202 | ## [0.0.6] - 2021-04-27 203 | 204 | ### Added 205 | 206 | - Switch k8s contexts from the all contexts view 207 | 208 | ## [0.0.5] - 2021-04-27 209 | 210 | ### Fixed 211 | 212 | - Scrolling issues 213 | - Log streaming discrepancy 214 | - CLI versions UI glitch 215 | 216 | ## [0.0.4] - 2021-04-26 217 | 218 | ### Added 219 | 220 | - Homebrew installation 221 | - Docker installation 222 | 223 | ## [0.0.3] - 2021-04-25 224 | 225 | ### Fixed 226 | 227 | - Minor bug fixes 228 | - Refactor and polish 229 | 230 | ### Added 231 | 232 | - Resource utilization view with grouping 233 | - Select/copy text in logs and describe view 234 | - Config map tab 235 | - Statefulsets tab 236 | - Replicasets tab 237 | - Deployments tab 238 | 239 | ## [0.0.2] - 2021-04-22 240 | 241 | ### Fixed 242 | 243 | - Pod status fix 244 | - Switch to API for metrics 245 | - Various bug fixes 246 | - Update key bindings 247 | - Update theme consistency 248 | 249 | ### Added 250 | 251 | - Containers view 252 | - Container logs 253 | - Pod describe 254 | - Node describe 255 | 256 | ## [0.0.1] - 2021-04-18 257 | 258 | - Initial beta release 259 | 260 | --- 261 | 262 | # What is this? 263 | 264 | All notable changes to this project will be documented in this file. 265 | 266 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 267 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 268 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | d4udts@gmail.com. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contribution are welcome 🙏 2 | 3 | You need to have the Rust tool belt for developing KDash 4 | 5 | Install Rust tool belt following [this](https://www.rust-lang.org/tools/install). This will install `rustup`, `rustc` and `cargo` 6 | 7 | ## Other requirements 8 | 9 | - kubectl for local testing 10 | 11 | ## Setup workspace 12 | 13 | 1. Clone this repo 14 | 1. Run `cargo test` to setup hooks 15 | 1. Make changes 16 | 1. Run the application using `make run` or `cargo run` 17 | 1. Commit changes. This will trigger pre-commit hooks that will run format, test and lint. If there are errors or warnings from Clippy, fix them 18 | 1. Push to your clone. This will trigger pre-push hooks that will run lint and test 19 | 1. Create a PR 20 | 21 | - There are other commands that are configured on the Makefile. If you have make installed then you can use those directly 22 | - For `make test-cov` you need to install tarpaulin with `cargo install cargo-tarpaulin` 23 | - For `make analyse` you need to install geiger with `cargo install cargo-geiger` 24 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "kdash" 3 | version = "0.6.2" 4 | authors = ["Deepu K Sasidharan "] 5 | description = """ 6 | A fast and simple dashboard for Kubernetes 7 | """ 8 | documentation = "https://github.com/kdash-rs/kdash" 9 | keywords = ["kdash", "kubernetes", "dashboard"] 10 | repository = "https://github.com/kdash-rs/kdash" 11 | homepage = "https://github.com/kdash-rs/kdash" 12 | readme = "README.md" 13 | license = "MIT" 14 | exclude = [ 15 | "assets/*", 16 | ".github", 17 | "Makefile.toml", 18 | "CONTRIBUTING.md", 19 | "*.log", 20 | "tags", 21 | ] 22 | edition = "2021" 23 | 24 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 25 | 26 | [badges] 27 | 28 | [dependencies] 29 | openssl-probe = "0.1.5" 30 | crossterm = "0.28.1" 31 | ratatui = { version = "0.29.0", default-features = false, features = [ 32 | 'crossterm', 33 | ] } 34 | serde = { version = "1.0", default-features = false, features = ["derive"] } 35 | serde_json = "1.0" 36 | serde_yaml = "0.9" 37 | syntect = "5.2.0" 38 | syntect-tui = "3.0.5" 39 | clap = { version = "4.5.23", default-features = false, features = [ 40 | "help", 41 | "usage", 42 | "error-context", 43 | "derive", 44 | ] } 45 | tokio = { version = "1.42.0", default-features = false, features = [ 46 | "macros", 47 | "rt-multi-thread", 48 | ] } 49 | tokio-stream = { version = "0.1.17", default-features = false, features = [ 50 | "time", 51 | ] } 52 | futures = "0.3.31" 53 | tui-input = "0.11.1" 54 | duct = "0.13" 55 | anyhow = "1.0.94" 56 | backtrace = { version = "0.3.69", features = ["gimli-symbolize"] } 57 | textwrap = "0.16.1" 58 | regex = "1.11.1" 59 | kube = { version = "0.98.0", default-features = false, features = [ 60 | "socks5", 61 | "client", 62 | "rustls-tls", 63 | "oidc", 64 | "oauth", 65 | "ws", 66 | ] } 67 | k8s-openapi = { version = "0.24.0", default-features = false, features = [ 68 | "v1_29", 69 | ] } 70 | base64 = "0.22.1" 71 | human-panic = "2.0.2" 72 | kubectl-view-allocations = { version = "0.20.3", default-features = false } 73 | async-trait = "0.1.83" 74 | glob-match = "0.2.1" 75 | rand = "0.8" 76 | copypasta = "0.10.1" 77 | log = "0.4.22" 78 | simplelog = { version = "0.12.2", default-features = false } 79 | 80 | [dev-dependencies.cargo-husky] 81 | version = "1" 82 | default-features = false 83 | features = ["user-hooks"] 84 | 85 | [[bin]] 86 | bench = false 87 | path = "src/main.rs" 88 | name = "kdash" 89 | 90 | [profile.release] 91 | lto = true 92 | codegen-units = 1 93 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # ----------------------------- 2 | # Build Kdash base image 3 | # ----------------------------- 4 | 5 | FROM clux/muslrust:stable AS builder 6 | WORKDIR /usr/src 7 | 8 | # Download and compile Rust dependencies in an empty project and cache as a separate Docker layer 9 | RUN USER=root cargo new --bin kdash-temp 10 | 11 | WORKDIR /usr/src/kdash-temp 12 | COPY Cargo.* . 13 | RUN cargo build --release --target x86_64-unknown-linux-musl 14 | # remove src from empty project 15 | RUN rm -r src 16 | # Copy actual source files and Build the app binary 17 | COPY src ./src 18 | # remove previous deps 19 | RUN rm ./target/x86_64-unknown-linux-musl/release/deps/kdash* 20 | 21 | RUN --mount=type=cache,target=/volume/target \ 22 | --mount=type=cache,target=/root/.cargo/registry \ 23 | cargo build --release --target x86_64-unknown-linux-musl --bin kdash 24 | RUN mv target/x86_64-unknown-linux-musl/release/kdash . 25 | 26 | # ----------------------------- 27 | # build final Kdash image 28 | # ----------------------------- 29 | FROM debian:stable-slim 30 | 31 | ARG KUBECTL_VERSION="v1.29.0" 32 | # Copy the compiled binary from the builder container 33 | COPY --from=builder --chown=nonroot:nonroot /usr/src/kdash-temp/kdash /usr/local/bin 34 | 35 | # Install dependencies like kubectl 36 | RUN apt-get update && \ 37 | apt-get dist-upgrade -y && \ 38 | apt-get install -y -qq libxcb1 curl vim && \ 39 | curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && \ 40 | chmod +x /usr/local/bin/kubectl && \ 41 | apt-get autoremove && apt-get autoclean 42 | 43 | RUN /usr/local/bin/kdash -h 44 | 45 | ENTRYPOINT [ "/usr/local/bin/kdash" ] 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Deepu K Sasidharan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | VERSION := latest 2 | IMG_NAME := deepu105/kdash 3 | IMAGE := ${IMG_NAME}:${VERSION} 4 | 5 | default: run 6 | 7 | ## Run all tests 8 | test: 9 | @make lint && cargo test 10 | 11 | ## Run all tests with coverage- `cargo install cargo-tarpaulin` 12 | test-cov: 13 | @cargo tarpaulin 14 | 15 | ## Builds the app for current os-arch 16 | build: 17 | @make test && cargo build --release 18 | 19 | ## Runs the app 20 | run: 21 | @cargo fmt && make lint && CARGO_INCREMENTAL=1 cargo run -- $(filter-out $@, $(MAKECMDGOALS)) 22 | 23 | ## Run clippy 24 | lint: 25 | @CARGO_INCREMENTAL=0 cargo clippy --all --all-features --all-targets --workspace -- -D warnings 26 | 27 | ## Fix lint 28 | lint-fix: 29 | @cargo fix --allow-staged 30 | 31 | ## Run format 32 | fmt: 33 | @cargo fmt 34 | 35 | ## Build a Docker Image 36 | docker: 37 | @DOCKER_BUILDKIT=1 docker build --progress=plain --rm -t ${IMAGE} . 38 | 39 | ## Run Docker Image locally 40 | docker-run: 41 | @docker run --network host --rm -it -v ~/.kube/config:/root/.kube/config ${IMAGE} 42 | 43 | ## Analyse for unsafe usage - `cargo install cargo-geiger` 44 | analyse: 45 | @cargo geiger 46 | 47 | ## Release tag 48 | release: 49 | @git tag -a ${V} -m "Release ${V}" && git push origin ${V} 50 | 51 | ## Delete tag 52 | delete-tag: 53 | @git tag -d ${V} && git push --delete origin ${V} -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Only latest version of the software will be supported with security patches. 6 | 7 | | Version | Supported | 8 | | ------- | ------------------ | 9 | | latest | :white_check_mark: | 10 | 11 | ## Reporting a Vulnerability 12 | 13 | If you find a vulnerability, please reach out to me via email (d4udts@gmail.com) or via Twitter DM ([@deepu105](https://twitter.com/deepu105)) to responsible disclosure. 14 | Please dont create GitHub issues publicly so that I can try to fix the issue and release a patch first. 15 | -------------------------------------------------------------------------------- /artwork/icon-2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/artwork/icon-2x.png -------------------------------------------------------------------------------- /artwork/icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/artwork/icon.jpg -------------------------------------------------------------------------------- /artwork/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/artwork/icon.png -------------------------------------------------------------------------------- /artwork/logo-2x.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/artwork/logo-2x.png -------------------------------------------------------------------------------- /artwork/logo-icon.svg: -------------------------------------------------------------------------------- 1 | 2 | 22 | 24 | 25 | 27 | image/svg+xml 28 | 30 | 31 | 32 | 33 | 53 | 55 | 57 | 61 | 65 | 66 | 76 | 86 | 87 | 95 | 99 | 100 | 105 | 112 | 113 | -------------------------------------------------------------------------------- /artwork/logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/artwork/logo.jpg -------------------------------------------------------------------------------- /artwork/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/artwork/logo.png -------------------------------------------------------------------------------- /artwork/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 21 | 23 | 24 | 26 | image/svg+xml 27 | 29 | 30 | 31 | 32 | 52 | 54 | 56 | 60 | 64 | 65 | 66 | 71 | 74 | 75 | 80 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /deployment/chocolatey/chocolateyinstall.ps1.template: -------------------------------------------------------------------------------- 1 | $ErrorActionPreference = 'Stop'; 2 | 3 | $PackageName = 'kdash' 4 | $toolsDir = "$(Split-Path -parent $MyInvocation.MyCommand.Definition)" 5 | $url64 = 'https://github.com/kdash-rs/kdash/releases/download/v$version/kdash-windows.tar.gz' 6 | $checksum64 = '$hash_64' 7 | 8 | $packageArgs = @{ 9 | packageName = $packageName 10 | softwareName = $packageName 11 | unzipLocation = $toolsDir 12 | fileType = 'exe' 13 | url = $url64 14 | checksum = $checksum64 15 | checksumType = 'sha256' 16 | 17 | } 18 | Install-ChocolateyZipPackage @packageArgs 19 | $File = Get-ChildItem -File -Path $env:ChocolateyInstall\lib\$packageName\tools\ -Filter *.tar 20 | Get-ChocolateyUnzip -fileFullPath $File.FullName -destination $env:ChocolateyInstall\lib\$packageName\tools\ -------------------------------------------------------------------------------- /deployment/chocolatey/kdash.nuspec.template: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | kdash 23 | $version 24 | 25 | 26 | 27 | KDash 28 | Deepu K Sasidharan 29 | https://github.com/kdash-rs/kdash 30 | https://github.com/kdash-rs/kdash/blob/main/LICENSE 31 | true 32 | https://github.com/kdash-rs/kdash 33 | https://github.com/kdash-rs/kdash/blob/main/README.md 34 | https://github.com/kdash-rs/kdash/issues 35 | cli cross-platform terminal k8s tui monitoring kdash kubernetes rust 36 | A fast and simple dashboard for Kubernetes written in Rust. 37 | 38 | A fast and simple dashboard for Kubernetes written in Rust. 39 | 40 | **Usage** 41 | To use, run `kdash` in a terminal. 42 | 43 | For more [documentation and usage](https://github.com/kdash-rs/kdash/blob/main/README.md), see the [official repo](https://github.com/kdash-rs/kdash). 44 | 45 | 46 | https://github.com/kdash-rs/kdash/releases/tag/v$version/ 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /deployment/chocolatey/packager.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import sys 3 | from string import Template 4 | 5 | args = sys.argv 6 | version = args[1].replace("v", "") 7 | template_file_path = args[2] 8 | generated_file_path = args[3] 9 | 10 | # Deployment files 11 | hash_64 = args[4].strip() 12 | 13 | print("Generating formula") 14 | print(" VERSION: %s" % version) 15 | print(" TEMPLATE PATH: %s" % template_file_path) 16 | print(" SAVING AT: %s" % generated_file_path) 17 | print(" HASH: %s" % hash_64) 18 | 19 | with open(template_file_path, "r") as template_file: 20 | template = Template(template_file.read()) 21 | substitute = template.safe_substitute(version=version, hash_64=hash_64) 22 | print("\n================== Generated package file ==================\n") 23 | print(substitute) 24 | print("\n============================================================\n") 25 | 26 | with open(generated_file_path, "w") as generated_file: 27 | generated_file.write(substitute) -------------------------------------------------------------------------------- /deployment/getLatest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The install script is licensed under the CC-0 1.0 license. 4 | 5 | # See https://github.com/kdash-rs/kdash/blob/main/LICENSE for more details. 6 | # 7 | # To run this script execute: 8 | # `curl https://raw.githubusercontent.com/kdash-rs/kdash/main/deployment/getLatest.sh | sh` 9 | 10 | GITHUB_REPO="kdash" 11 | GITHUB_USER="kdash-rs" 12 | EXE_FILENAME="kdash" 13 | EXE_DEST_DIR="/usr/local/bin" 14 | 15 | bye() { 16 | result=$? 17 | if [ "$result" != "0" ]; then 18 | echo "Fail to install ${GITHUB_USER}/${GITHUB_REPO}" 19 | fi 20 | exit $result 21 | } 22 | 23 | fail() { 24 | echo "$1" 25 | exit 1 26 | } 27 | 28 | find_download_url() { 29 | local SUFFIX=$1 30 | local LATEST_URL="https://api.github.com/repos/${GITHUB_USER}/${GITHUB_REPO}/releases/latest" 31 | local URL=$(curl -s "${LATEST_URL}" | grep "browser_download_url.*${SUFFIX}" | cut -d : -f 2,3 | tr -d \" | head -n 1) 32 | echo "${URL//[[:space:]]/}" 33 | } 34 | 35 | find_arch() { 36 | local ARCH=$(uname -m) 37 | 38 | case $ARCH in 39 | armv*) ARCH="arm" ;; 40 | arm64) ARCH="arm64" ;; 41 | aarch64) ARCH="aarch64" ;; 42 | x86_64) ARCH="amd64" ;; 43 | *) fail "Your system architecture is not supported: $ARCH" ;; 44 | esac 45 | 46 | echo $ARCH 47 | } 48 | 49 | find_os() { 50 | local OS=$(echo $(uname) | tr '[:upper:]' '[:lower:]') 51 | 52 | case "$OS" in 53 | # Minimalist GNU for Windows 54 | mingw*) OS='windows' ;; 55 | msys*) OS='windows' ;; 56 | esac 57 | 58 | echo $OS 59 | } 60 | 61 | find_suffix() { 62 | local ARCH=$1 63 | local OS=$2 64 | local SUFFIX="$OS.tar.gz" 65 | 66 | case "$OS" in 67 | "darwin") 68 | case "$ARCH" in 69 | "arm64") SUFFIX="macos-arm64.tar.gz" ;; 70 | *) SUFFIX='macos.tar.gz' ;; 71 | esac 72 | ;; 73 | "windows") SUFFIX='windows.tar.gz' ;; 74 | *) 75 | case "$ARCH" in 76 | "arm") SUFFIX="arm-gnu.tar.gz" ;; 77 | "aarch64") SUFFIX="aarch64-gnu.tar.gz" ;; 78 | *) SUFFIX='linux.tar.gz' ;; 79 | esac 80 | ;; 81 | esac 82 | 83 | echo $SUFFIX 84 | } 85 | 86 | download_file() { 87 | local FILE_URL="$1" 88 | local FILE_PATH="$2" 89 | echo "Getting $FILE_URL ....." 90 | httpStatusCode=$(curl -s -w '%{http_code}' -L "$FILE_URL" -o "$FILE_PATH") 91 | if [ "$httpStatusCode" != 200 ]; then 92 | echo "failed to download '${URL}'" 93 | fail "Request fail with http status code $httpStatusCode" 94 | fi 95 | } 96 | 97 | find_exec_dest_path() { 98 | if [ ! -w $EXE_DEST_DIR ]; then 99 | echo "Cannot write to ${EXE_DEST_DIR}. Run with 'sudo' to install to ${EXE_DEST_DIR}. Installing to current directory now ....." 100 | EXE_DEST_DIR=$(pwd) 101 | fi 102 | } 103 | 104 | install_file() { 105 | local FILE_PATH=$1 106 | local EXE_DEST_FILE=$2 107 | TMP_DIR="/tmp/${GITHUB_USER}_${GITHUB_REPO}" 108 | mkdir -p "$TMP_DIR" || true 109 | tar xf "$FILE_PATH" -C "$TMP_DIR" 110 | cp "$TMP_DIR/${EXE_FILENAME}" "${EXE_DEST_FILE}" 111 | chmod +x "${EXE_DEST_FILE}" 112 | rm -rf "$TMP_DIR" 113 | } 114 | 115 | main() { 116 | find_exec_dest_path 117 | local EXE_DEST_FILE="${EXE_DEST_DIR}/${EXE_FILENAME}" 118 | local ARCH=$(find_arch) 119 | local OS=$(find_os) 120 | local SUFFIX=$(find_suffix $ARCH $OS) 121 | local FILE_URL=$(find_download_url $SUFFIX) 122 | if [ -z "${FILE_URL}" ]; then 123 | fail "Did not find a latest release for your system: $OS $ARCH ($SUFFIX)" 124 | fi 125 | local FILE_PATH="/tmp/${GITHUB_USER}-${GITHUB_REPO}-latest-${SUFFIX}" 126 | download_file "${FILE_URL}" "${FILE_PATH}" 127 | install_file "${FILE_PATH}" "${EXE_DEST_FILE}" 128 | rm -Rf ${FILE_PATH} 129 | echo "executable installed at ${EXE_DEST_FILE}" 130 | bye 131 | } 132 | 133 | #TODO check bash is used `readlink /proc/$$/exe` 134 | # because the script is not compatible with dash (default sh on ubuntu), other posix only shell,... 135 | 136 | #Stop execution on any error 137 | trap "bye" EXIT 138 | set -e 139 | # set -x 140 | main 141 | -------------------------------------------------------------------------------- /deployment/homebrew/kdash.rb.template: -------------------------------------------------------------------------------- 1 | 2 | # Documentation: https://docs.brew.sh/Formula-Cookbook 3 | # https://rubydoc.brew.sh/Formula 4 | class Kdash < Formula 5 | desc "A fast and simple dashboard for Kubernetes written in Rust" 6 | homepage "https://github.com/kdash-rs/kdash" 7 | if OS.mac? and Hardware::CPU.arm? 8 | url "https://github.com/kdash-rs/kdash/releases/download/$version/kdash-macos-arm64.tar.gz" 9 | sha256 "$hash_mac_arm" 10 | elsif OS.mac? and Hardware::CPU.intel? 11 | url "https://github.com/kdash-rs/kdash/releases/download/$version/kdash-macos.tar.gz" 12 | sha256 "$hash_mac" 13 | else 14 | url "https://github.com/kdash-rs/kdash/releases/download/$version/kdash-linux-musl.tar.gz" 15 | sha256 "$hash_linux" 16 | end 17 | version "$version" 18 | license "MIT" 19 | 20 | def install 21 | bin.install "kdash" 22 | ohai "You're done! Run with \"kdash\"" 23 | ohai "For runtime flags, see \"kdash --help\"" 24 | end 25 | end -------------------------------------------------------------------------------- /deployment/homebrew/packager.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import sys 3 | from string import Template 4 | 5 | args = sys.argv 6 | version = args[1] 7 | template_file_path = args[2] 8 | generated_file_path = args[3] 9 | 10 | # Deployment files 11 | hash_mac = args[4].strip() 12 | hash_mac_arm = args[5].strip() 13 | hash_linux = args[6].strip() 14 | 15 | print("Generating formula") 16 | print(" VERSION: %s" % version) 17 | print(" TEMPLATE PATH: %s" % template_file_path) 18 | print(" SAVING AT: %s" % generated_file_path) 19 | print(" MAC HASH: %s" % hash_mac) 20 | print(" MAC ARM HASH: %s" % hash_mac_arm) 21 | print(" LINUX HASH: %s" % hash_linux) 22 | 23 | with open(template_file_path, "r") as template_file: 24 | template = Template(template_file.read()) 25 | substitute = template.safe_substitute(version=version, hash_mac=hash_mac, hash_mac_arm=hash_mac_arm, hash_linux=hash_linux) 26 | print("\n================== Generated package file ==================\n") 27 | print(substitute) 28 | print("\n============================================================\n") 29 | 30 | with open(generated_file_path, "w") as generated_file: 31 | generated_file.write(substitute) -------------------------------------------------------------------------------- /deployment/scoop/kdash.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "homepage": "https://github.com/kdash-rs/kdash", 3 | "description": "A fast and simple dashboard for Kubernetes", 4 | "version": "$version64", 5 | "license": "MIT", 6 | "architecture": { 7 | "64bit": { 8 | "url": "https://github.com/kdash-rs/kdash/releases/download/v$version64/kdash-windows.tar.gz", 9 | "hash": "$hash_64" 10 | } 11 | }, 12 | "bin": "kdash.exe", 13 | "checkver": "github", 14 | "autoupdate": { 15 | "architecture": { 16 | "64bit": { 17 | "url": "https://github.com/kdash-rs/kdash/releases/download/v$version/kdash-windows.tar.gz", 18 | "hash": { 19 | "url": "https://github.com/kdash-rs/kdash/releases/download/v$version/kdash-windows.sha256", 20 | "regex": "$sha256" 21 | } 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /deployment/scoop/packager.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import sys 3 | from string import Template 4 | 5 | args = sys.argv 6 | version64 = args[1].replace("v", "") 7 | template_file_path = args[2] 8 | generated_file_path = args[3] 9 | 10 | # Deployment files 11 | hash_64 = args[4].strip() 12 | 13 | print("Generating formula") 14 | print(" VERSION: %s" % version64) 15 | print(" TEMPLATE PATH: %s" % template_file_path) 16 | print(" SAVING AT: %s" % generated_file_path) 17 | print(" HASH: %s" % hash_64) 18 | 19 | with open(template_file_path, "r") as template_file: 20 | template = Template(template_file.read()) 21 | substitute = template.safe_substitute(version64=version64, hash_64=hash_64) 22 | print("\n================== Generated package file ==================\n") 23 | print(substitute) 24 | print("\n============================================================\n") 25 | 26 | with open(generated_file_path, "w") as generated_file: 27 | generated_file.write(substitute) -------------------------------------------------------------------------------- /k8s.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: nginx-new 5 | spec: 6 | replicas: 3 7 | selector: 8 | app: nginx 9 | template: 10 | metadata: 11 | name: nginx 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx 18 | ports: 19 | - containerPort: 80 20 | - name: nginx2 21 | image: nginx 22 | ports: 23 | - containerPort: 80 24 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | tab_spaces=2 2 | edition = "2021" 3 | reorder_imports = true 4 | imports_granularity = "Crate" 5 | group_imports = "StdExternalCrate" 6 | reorder_modules = true 7 | merge_derives = true 8 | use_field_init_shorthand = true 9 | format_macro_matchers = true 10 | format_macro_bodies = true -------------------------------------------------------------------------------- /screenshots/contexts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/screenshots/contexts.png -------------------------------------------------------------------------------- /screenshots/describe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/screenshots/describe.png -------------------------------------------------------------------------------- /screenshots/logs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/screenshots/logs.png -------------------------------------------------------------------------------- /screenshots/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/screenshots/overview.png -------------------------------------------------------------------------------- /screenshots/ui.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/screenshots/ui.gif -------------------------------------------------------------------------------- /screenshots/utilization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kdash-rs/kdash/a09eee25dcaeeba56eb660ca39d6ae34e981453b/screenshots/utilization.png -------------------------------------------------------------------------------- /src/app/contexts.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use kube::config::{Context, Kubeconfig, NamedContext}; 3 | use ratatui::{ 4 | layout::{Constraint, Rect}, 5 | widgets::{Cell, Row, Table}, 6 | Frame, 7 | }; 8 | 9 | use super::{models::AppResource, ActiveBlock, App}; 10 | use crate::{ 11 | network::Network, 12 | ui::{ 13 | utils::{ 14 | layout_block_active, loading, style_highlight, style_primary, style_secondary, 15 | table_header_style, 16 | }, 17 | HIGHLIGHT, 18 | }, 19 | }; 20 | 21 | #[derive(Clone, Default)] 22 | pub struct KubeContext { 23 | pub name: String, 24 | pub cluster: String, 25 | pub user: Option, 26 | // pub namespace: Option, 27 | pub is_active: bool, 28 | } 29 | 30 | impl KubeContext { 31 | pub fn from_api(ctx: &NamedContext, is_active: bool) -> Self { 32 | let def_context = Context::default(); 33 | let context = ctx.context.as_ref().unwrap_or(&def_context); 34 | KubeContext { 35 | name: ctx.name.clone(), 36 | cluster: context.cluster.clone(), 37 | user: context.user.clone(), 38 | // namespace: context.namespace.clone(), 39 | is_active, 40 | } 41 | } 42 | } 43 | 44 | pub fn get_contexts(config: &Kubeconfig, selected_ctx: Option) -> Vec { 45 | config 46 | .contexts 47 | .iter() 48 | .map(|ctx| { 49 | KubeContext::from_api( 50 | ctx, 51 | is_active_context(&ctx.name, &config.current_context, selected_ctx.to_owned()), 52 | ) 53 | }) 54 | .collect::>() 55 | } 56 | 57 | fn is_active_context( 58 | name: &str, 59 | current_ctx: &Option, 60 | selected_ctx: Option, 61 | ) -> bool { 62 | match selected_ctx { 63 | Some(ctx) => name == ctx, 64 | None => match current_ctx { 65 | Some(ctx) => name == ctx, 66 | None => false, 67 | }, 68 | } 69 | } 70 | 71 | pub struct ContextResource {} 72 | 73 | #[async_trait] 74 | impl AppResource for ContextResource { 75 | fn render(_block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 76 | let title = format!(" Contexts [{}] ", app.data.contexts.items.len()); 77 | let block = layout_block_active(title.as_str(), app.light_theme); 78 | 79 | if !app.data.contexts.items.is_empty() { 80 | let rows = app.data.contexts.items.iter().map(|c| { 81 | let style = if c.is_active { 82 | style_secondary(app.light_theme) 83 | } else { 84 | style_primary(app.light_theme) 85 | }; 86 | Row::new(vec![ 87 | Cell::from(c.name.to_owned()), 88 | Cell::from(c.cluster.to_owned()), 89 | Cell::from(c.user.clone().unwrap_or("".to_string())), 90 | ]) 91 | .style(style) 92 | }); 93 | 94 | let table = Table::new( 95 | rows, 96 | [ 97 | Constraint::Percentage(34), 98 | Constraint::Percentage(33), 99 | Constraint::Percentage(33), 100 | ], 101 | ) 102 | .header(table_header_style( 103 | vec!["Context", "Cluster", "User"], 104 | app.light_theme, 105 | )) 106 | .block(block) 107 | .row_highlight_style(style_highlight()) 108 | .highlight_symbol(HIGHLIGHT); 109 | 110 | f.render_stateful_widget(table, area, &mut app.data.contexts.state); 111 | } else { 112 | loading(f, block, area, app.is_loading, app.light_theme); 113 | } 114 | } 115 | 116 | async fn get_resource(_nw: &Network<'_>) { 117 | // not required 118 | unimplemented!() 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/app/cronjobs.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use k8s_openapi::{api::batch::v1::CronJob, chrono::Utc}; 3 | use ratatui::{ 4 | layout::{Constraint, Rect}, 5 | widgets::{Cell, Row}, 6 | Frame, 7 | }; 8 | 9 | use super::{ 10 | models::{AppResource, KubeResource}, 11 | utils, ActiveBlock, App, 12 | }; 13 | use crate::{ 14 | draw_resource_tab, 15 | network::Network, 16 | ui::utils::{ 17 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 18 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 19 | DESCRIBE_YAML_AND_ESC_HINT, 20 | }, 21 | }; 22 | 23 | #[derive(Clone, Debug, PartialEq)] 24 | pub struct KubeCronJob { 25 | pub name: String, 26 | pub namespace: String, 27 | pub schedule: String, 28 | pub last_schedule: String, 29 | pub suspend: bool, 30 | pub active: usize, 31 | pub age: String, 32 | k8s_obj: CronJob, 33 | } 34 | 35 | impl From for KubeCronJob { 36 | fn from(cronjob: CronJob) -> Self { 37 | let (last_schedule, active) = match &cronjob.status { 38 | Some(cjs) => ( 39 | utils::to_age_secs(cjs.last_schedule_time.as_ref(), Utc::now()), 40 | cjs.active.clone().unwrap_or_default().len(), 41 | ), 42 | None => ("".to_string(), 0), 43 | }; 44 | 45 | let (schedule, suspend) = match &cronjob.spec { 46 | Some(cjs) => (cjs.schedule.clone(), cjs.suspend.unwrap_or_default()), 47 | None => ("".to_string(), false), 48 | }; 49 | 50 | KubeCronJob { 51 | name: cronjob.metadata.name.clone().unwrap_or_default(), 52 | namespace: cronjob.metadata.namespace.clone().unwrap_or_default(), 53 | schedule, 54 | suspend, 55 | last_schedule, 56 | active, 57 | age: utils::to_age(cronjob.metadata.creation_timestamp.as_ref(), Utc::now()), 58 | k8s_obj: utils::sanitize_obj(cronjob), 59 | } 60 | } 61 | } 62 | 63 | impl KubeResource for KubeCronJob { 64 | fn get_name(&self) -> &String { 65 | &self.name 66 | } 67 | fn get_k8s_obj(&self) -> &CronJob { 68 | &self.k8s_obj 69 | } 70 | } 71 | 72 | static CRON_JOBS_TITLE: &str = "CronJobs"; 73 | 74 | pub struct CronJobResource {} 75 | 76 | #[async_trait] 77 | impl AppResource for CronJobResource { 78 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 79 | draw_resource_tab!( 80 | CRON_JOBS_TITLE, 81 | block, 82 | f, 83 | app, 84 | area, 85 | Self::render, 86 | draw_block, 87 | app.data.cronjobs 88 | ); 89 | } 90 | 91 | async fn get_resource(nw: &Network<'_>) { 92 | let items: Vec = nw.get_namespaced_resources(CronJob::into).await; 93 | 94 | let mut app = nw.app.lock().await; 95 | app.data.cronjobs.set_items(items); 96 | } 97 | } 98 | 99 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 100 | let title = get_resource_title(app, CRON_JOBS_TITLE, "", app.data.cronjobs.items.len()); 101 | 102 | draw_resource_block( 103 | f, 104 | area, 105 | ResourceTableProps { 106 | title, 107 | inline_help: DESCRIBE_YAML_AND_ESC_HINT.into(), 108 | resource: &mut app.data.cronjobs, 109 | table_headers: vec![ 110 | "Namespace", 111 | "Name", 112 | "Schedule", 113 | "Last Scheduled", 114 | "Suspend", 115 | "Active", 116 | "Age", 117 | ], 118 | column_widths: vec![ 119 | Constraint::Percentage(20), 120 | Constraint::Percentage(25), 121 | Constraint::Percentage(15), 122 | Constraint::Percentage(10), 123 | Constraint::Percentage(10), 124 | Constraint::Percentage(10), 125 | Constraint::Percentage(10), 126 | ], 127 | }, 128 | |c| { 129 | Row::new(vec![ 130 | Cell::from(c.namespace.to_owned()), 131 | Cell::from(c.name.to_owned()), 132 | Cell::from(c.schedule.to_owned()), 133 | Cell::from(c.last_schedule.to_string()), 134 | Cell::from(c.suspend.to_string()), 135 | Cell::from(c.active.to_string()), 136 | Cell::from(c.age.to_owned()), 137 | ]) 138 | .style(style_primary(app.light_theme)) 139 | }, 140 | app.light_theme, 141 | app.is_loading, 142 | app.data.selected.filter.to_owned(), 143 | ); 144 | } 145 | 146 | #[cfg(test)] 147 | mod tests { 148 | use super::*; 149 | use crate::app::test_utils::{convert_resource_from_file, get_time}; 150 | 151 | #[test] 152 | fn test_cronjobs_from_api() { 153 | let (jobs, jobs_list): (Vec, Vec<_>) = convert_resource_from_file("cronjobs"); 154 | 155 | assert_eq!(jobs.len(), 1); 156 | assert_eq!( 157 | jobs[0], 158 | KubeCronJob { 159 | name: "hello".into(), 160 | namespace: "default".into(), 161 | schedule: "*/1 * * * *".into(), 162 | suspend: false, 163 | active: 0, 164 | last_schedule: utils::to_age_secs(Some(&get_time("2021-07-05T09:39:00Z")), Utc::now()), 165 | age: utils::to_age(Some(&get_time("2021-07-05T09:37:21Z")), Utc::now()), 166 | k8s_obj: jobs_list[0].clone(), 167 | } 168 | ); 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /src/app/daemonsets.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use k8s_openapi::{api::apps::v1::DaemonSet, chrono::Utc}; 3 | use ratatui::{ 4 | layout::{Constraint, Rect}, 5 | widgets::{Cell, Row}, 6 | Frame, 7 | }; 8 | 9 | use super::{ 10 | models::{AppResource, KubeResource}, 11 | utils, ActiveBlock, App, 12 | }; 13 | use crate::{ 14 | draw_resource_tab, 15 | network::Network, 16 | ui::utils::{ 17 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 18 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 19 | DESCRIBE_AND_YAML_HINT, 20 | }, 21 | }; 22 | 23 | #[derive(Clone, Debug, PartialEq)] 24 | pub struct KubeDaemonSet { 25 | pub name: String, 26 | pub namespace: String, 27 | pub desired: i32, 28 | pub current: i32, 29 | pub ready: i32, 30 | pub up_to_date: i32, 31 | pub available: i32, 32 | pub age: String, 33 | k8s_obj: DaemonSet, 34 | } 35 | impl From for KubeDaemonSet { 36 | fn from(ds: DaemonSet) -> Self { 37 | let (desired, current, ready, up_to_date, available) = match ds.status.as_ref() { 38 | Some(s) => ( 39 | s.desired_number_scheduled, 40 | s.current_number_scheduled, 41 | s.number_ready, 42 | s.updated_number_scheduled.unwrap_or_default(), 43 | s.number_available.unwrap_or_default(), 44 | ), 45 | _ => (0, 0, 0, 0, 0), 46 | }; 47 | 48 | KubeDaemonSet { 49 | name: ds.metadata.name.clone().unwrap_or_default(), 50 | namespace: ds.metadata.namespace.clone().unwrap_or_default(), 51 | age: utils::to_age(ds.metadata.creation_timestamp.as_ref(), Utc::now()), 52 | desired, 53 | current, 54 | ready, 55 | up_to_date, 56 | available, 57 | k8s_obj: utils::sanitize_obj(ds), 58 | } 59 | } 60 | } 61 | 62 | impl KubeResource for KubeDaemonSet { 63 | fn get_name(&self) -> &String { 64 | &self.name 65 | } 66 | fn get_k8s_obj(&self) -> &DaemonSet { 67 | &self.k8s_obj 68 | } 69 | } 70 | 71 | static DAEMON_SETS_TITLE: &str = "DaemonSets"; 72 | 73 | pub struct DaemonSetResource {} 74 | 75 | #[async_trait] 76 | impl AppResource for DaemonSetResource { 77 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 78 | draw_resource_tab!( 79 | DAEMON_SETS_TITLE, 80 | block, 81 | f, 82 | app, 83 | area, 84 | Self::render, 85 | draw_block, 86 | app.data.daemon_sets 87 | ); 88 | } 89 | 90 | async fn get_resource(nw: &Network<'_>) { 91 | let items: Vec = nw.get_namespaced_resources(DaemonSet::into).await; 92 | 93 | let mut app = nw.app.lock().await; 94 | app.data.daemon_sets.set_items(items); 95 | } 96 | } 97 | 98 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 99 | let title = get_resource_title(app, DAEMON_SETS_TITLE, "", app.data.daemon_sets.items.len()); 100 | 101 | draw_resource_block( 102 | f, 103 | area, 104 | ResourceTableProps { 105 | title, 106 | inline_help: DESCRIBE_AND_YAML_HINT.into(), 107 | resource: &mut app.data.daemon_sets, 108 | table_headers: vec![ 109 | "Namespace", 110 | "Name", 111 | "Desired", 112 | "Current", 113 | "Ready", 114 | "Up-to-date", 115 | "Available", 116 | "Age", 117 | ], 118 | column_widths: vec![ 119 | Constraint::Percentage(20), 120 | Constraint::Percentage(20), 121 | Constraint::Percentage(10), 122 | Constraint::Percentage(10), 123 | Constraint::Percentage(10), 124 | Constraint::Percentage(10), 125 | Constraint::Percentage(10), 126 | Constraint::Percentage(10), 127 | ], 128 | }, 129 | |c| { 130 | Row::new(vec![ 131 | Cell::from(c.namespace.to_owned()), 132 | Cell::from(c.name.to_owned()), 133 | Cell::from(c.desired.to_string()), 134 | Cell::from(c.current.to_string()), 135 | Cell::from(c.ready.to_string()), 136 | Cell::from(c.up_to_date.to_string()), 137 | Cell::from(c.available.to_string()), 138 | Cell::from(c.age.to_owned()), 139 | ]) 140 | .style(style_primary(app.light_theme)) 141 | }, 142 | app.light_theme, 143 | app.is_loading, 144 | app.data.selected.filter.to_owned(), 145 | ); 146 | } 147 | 148 | #[cfg(test)] 149 | mod tests { 150 | use super::*; 151 | use crate::app::test_utils::*; 152 | 153 | #[test] 154 | fn test_daemon_sets_from_api() { 155 | let (dss, dss_list): (Vec, Vec<_>) = convert_resource_from_file("daemonsets"); 156 | 157 | assert_eq!(dss.len(), 1); 158 | assert_eq!( 159 | dss[0], 160 | KubeDaemonSet { 161 | name: "svclb-traefik".into(), 162 | namespace: "kube-system".into(), 163 | age: utils::to_age(Some(&get_time("2021-07-05T09:36:45Z")), Utc::now()), 164 | k8s_obj: dss_list[0].clone(), 165 | desired: 1, 166 | current: 1, 167 | ready: 1, 168 | up_to_date: 1, 169 | available: 1, 170 | } 171 | ); 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /src/app/deployments.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use k8s_openapi::{api::apps::v1::Deployment, chrono::Utc}; 3 | use ratatui::{ 4 | layout::{Constraint, Rect}, 5 | widgets::{Cell, Row}, 6 | Frame, 7 | }; 8 | 9 | use super::{ 10 | models::{AppResource, KubeResource}, 11 | utils, ActiveBlock, App, 12 | }; 13 | use crate::{ 14 | draw_resource_tab, 15 | network::Network, 16 | ui::utils::{ 17 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 18 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 19 | DESCRIBE_AND_YAML_HINT, 20 | }, 21 | }; 22 | 23 | #[derive(Clone, Debug, PartialEq)] 24 | pub struct KubeDeployment { 25 | pub name: String, 26 | pub namespace: String, 27 | pub ready: String, 28 | pub updated: i32, 29 | pub available: i32, 30 | pub age: String, 31 | k8s_obj: Deployment, 32 | } 33 | 34 | impl From for KubeDeployment { 35 | fn from(deployment: Deployment) -> Self { 36 | let (ready, available, updated) = match &deployment.status { 37 | Some(s) => ( 38 | format!( 39 | "{}/{}", 40 | s.available_replicas.unwrap_or_default(), 41 | s.replicas.unwrap_or_default() 42 | ), 43 | s.available_replicas.unwrap_or_default(), 44 | s.updated_replicas.unwrap_or_default(), 45 | ), 46 | _ => ("".into(), 0, 0), 47 | }; 48 | 49 | Self { 50 | name: deployment.metadata.name.clone().unwrap_or_default(), 51 | namespace: deployment.metadata.namespace.clone().unwrap_or_default(), 52 | age: utils::to_age(deployment.metadata.creation_timestamp.as_ref(), Utc::now()), 53 | available, 54 | updated, 55 | ready, 56 | k8s_obj: utils::sanitize_obj(deployment), 57 | } 58 | } 59 | } 60 | 61 | impl KubeResource for KubeDeployment { 62 | fn get_name(&self) -> &String { 63 | &self.name 64 | } 65 | fn get_k8s_obj(&self) -> &Deployment { 66 | &self.k8s_obj 67 | } 68 | } 69 | 70 | static DEPLOYMENTS_TITLE: &str = "Deployments"; 71 | 72 | pub struct DeploymentResource {} 73 | 74 | #[async_trait] 75 | impl AppResource for DeploymentResource { 76 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 77 | draw_resource_tab!( 78 | DEPLOYMENTS_TITLE, 79 | block, 80 | f, 81 | app, 82 | area, 83 | Self::render, 84 | draw_block, 85 | app.data.deployments 86 | ); 87 | } 88 | 89 | async fn get_resource(nw: &Network<'_>) { 90 | let items: Vec = nw.get_namespaced_resources(Deployment::into).await; 91 | 92 | let mut app = nw.app.lock().await; 93 | app.data.deployments.set_items(items); 94 | } 95 | } 96 | 97 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 98 | let title = get_resource_title(app, DEPLOYMENTS_TITLE, "", app.data.deployments.items.len()); 99 | 100 | draw_resource_block( 101 | f, 102 | area, 103 | ResourceTableProps { 104 | title, 105 | inline_help: DESCRIBE_AND_YAML_HINT.into(), 106 | resource: &mut app.data.deployments, 107 | table_headers: vec![ 108 | "Namespace", 109 | "Name", 110 | "Ready", 111 | "Up-to-date", 112 | "Available", 113 | "Age", 114 | ], 115 | column_widths: vec![ 116 | Constraint::Percentage(25), 117 | Constraint::Percentage(35), 118 | Constraint::Percentage(10), 119 | Constraint::Percentage(10), 120 | Constraint::Percentage(10), 121 | Constraint::Percentage(10), 122 | ], 123 | }, 124 | |c| { 125 | Row::new(vec![ 126 | Cell::from(c.namespace.to_owned()), 127 | Cell::from(c.name.to_owned()), 128 | Cell::from(c.ready.to_owned()), 129 | Cell::from(c.updated.to_string()), 130 | Cell::from(c.available.to_string()), 131 | Cell::from(c.age.to_owned()), 132 | ]) 133 | .style(style_primary(app.light_theme)) 134 | }, 135 | app.light_theme, 136 | app.is_loading, 137 | app.data.selected.filter.to_owned(), 138 | ); 139 | } 140 | 141 | #[cfg(test)] 142 | mod tests { 143 | use super::*; 144 | use crate::app::test_utils::*; 145 | 146 | #[test] 147 | fn test_deployments_from_api() { 148 | let (deployments, deployment_list): (Vec, Vec<_>) = 149 | convert_resource_from_file("deployments"); 150 | 151 | assert_eq!(deployments.len(), 4); 152 | assert_eq!( 153 | deployments[0], 154 | KubeDeployment { 155 | name: "metrics-server".into(), 156 | namespace: "kube-system".into(), 157 | age: utils::to_age(Some(&get_time("2021-05-10T21:48:06Z")), Utc::now()), 158 | k8s_obj: deployment_list[0].clone(), 159 | available: 1, 160 | updated: 1, 161 | ready: "1/1".into(), 162 | } 163 | ); 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /src/app/dynamic.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use async_trait::async_trait; 3 | use k8s_openapi::chrono::Utc; 4 | use kube::{ 5 | core::DynamicObject, 6 | discovery::{ApiResource, Scope}, 7 | Api, ResourceExt, 8 | }; 9 | use ratatui::{ 10 | layout::{Constraint, Rect}, 11 | widgets::{Cell, Row}, 12 | Frame, 13 | }; 14 | 15 | use super::{ 16 | models::{AppResource, KubeResource}, 17 | utils, ActiveBlock, App, 18 | }; 19 | use crate::{ 20 | draw_resource_tab, 21 | network::Network, 22 | ui::utils::{ 23 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 24 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 25 | DESCRIBE_YAML_AND_ESC_HINT, 26 | }, 27 | }; 28 | 29 | #[derive(Clone, Debug)] 30 | pub struct KubeDynamicKind { 31 | // pub name: String, 32 | // pub group: String, 33 | // pub version: String, 34 | // pub api_version: String, 35 | pub kind: String, 36 | pub scope: Scope, 37 | pub api_resource: ApiResource, 38 | } 39 | 40 | impl KubeDynamicKind { 41 | pub fn new(ar: ApiResource, scope: Scope) -> Self { 42 | KubeDynamicKind { 43 | api_resource: ar.clone(), 44 | // name: ar.plural, 45 | // group: ar.group, 46 | // version: ar.version, 47 | // api_version: ar.api_version, 48 | kind: ar.kind, 49 | scope, 50 | } 51 | } 52 | } 53 | 54 | #[derive(Clone, Debug, PartialEq)] 55 | pub struct KubeDynamicResource { 56 | pub name: String, 57 | pub namespace: Option, 58 | pub age: String, 59 | k8s_obj: DynamicObject, 60 | } 61 | 62 | impl From for KubeDynamicResource { 63 | fn from(item: DynamicObject) -> Self { 64 | KubeDynamicResource { 65 | name: item.name_any(), 66 | namespace: item.clone().metadata.namespace, 67 | age: utils::to_age(item.metadata.creation_timestamp.as_ref(), Utc::now()), 68 | k8s_obj: item, 69 | } 70 | } 71 | } 72 | 73 | impl KubeResource for KubeDynamicResource { 74 | fn get_name(&self) -> &String { 75 | &self.name 76 | } 77 | fn get_k8s_obj(&self) -> &DynamicObject { 78 | &self.k8s_obj 79 | } 80 | } 81 | 82 | pub struct DynamicResource {} 83 | 84 | #[async_trait] 85 | impl AppResource for DynamicResource { 86 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 87 | let title = if let Some(res) = &app.data.selected.dynamic_kind { 88 | res.kind.as_str() 89 | } else { 90 | "" 91 | }; 92 | draw_resource_tab!( 93 | title, 94 | block, 95 | f, 96 | app, 97 | area, 98 | Self::render, 99 | draw_block, 100 | app.data.dynamic_resources 101 | ); 102 | } 103 | 104 | /// fetch entries for a custom resource from the cluster 105 | async fn get_resource(nw: &Network<'_>) { 106 | let mut app = nw.app.lock().await; 107 | 108 | if let Some(drs) = &app.data.selected.dynamic_kind { 109 | let api: Api = if drs.scope == Scope::Cluster { 110 | Api::all_with(nw.client.clone(), &drs.api_resource) 111 | } else { 112 | match &app.data.selected.ns { 113 | Some(ns) => Api::namespaced_with(nw.client.clone(), ns, &drs.api_resource), 114 | None => Api::all_with(nw.client.clone(), &drs.api_resource), 115 | } 116 | }; 117 | 118 | let items = match api.list(&Default::default()).await { 119 | Ok(list) => list 120 | .items 121 | .iter() 122 | .map(|item| KubeDynamicResource::from(item.clone())) 123 | .collect::>(), 124 | Err(e) => { 125 | nw.handle_error(anyhow!("Failed to get dynamic resources. {:?}", e)) 126 | .await; 127 | return; 128 | } 129 | }; 130 | app.data.dynamic_resources.set_items(items); 131 | } 132 | } 133 | } 134 | 135 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 136 | let (title, scope) = if let Some(res) = &app.data.selected.dynamic_kind { 137 | (res.kind.as_str(), res.scope.clone()) 138 | } else { 139 | ("", Scope::Cluster) 140 | }; 141 | let title = get_resource_title(app, title, "", app.data.dynamic_resources.items.len()); 142 | 143 | let (table_headers, column_widths) = if scope == Scope::Cluster { 144 | ( 145 | vec!["Name", "Age"], 146 | vec![Constraint::Percentage(70), Constraint::Percentage(30)], 147 | ) 148 | } else { 149 | ( 150 | vec!["Namespace", "Name", "Age"], 151 | vec![ 152 | Constraint::Percentage(30), 153 | Constraint::Percentage(50), 154 | Constraint::Percentage(20), 155 | ], 156 | ) 157 | }; 158 | 159 | draw_resource_block( 160 | f, 161 | area, 162 | ResourceTableProps { 163 | title, 164 | inline_help: DESCRIBE_YAML_AND_ESC_HINT.into(), 165 | resource: &mut app.data.dynamic_resources, 166 | table_headers, 167 | column_widths, 168 | }, 169 | |c| { 170 | let rows = if scope == Scope::Cluster { 171 | Row::new(vec![ 172 | Cell::from(c.name.to_owned()), 173 | Cell::from(c.age.to_owned()), 174 | ]) 175 | } else { 176 | Row::new(vec![ 177 | Cell::from(c.namespace.clone().unwrap_or_default()), 178 | Cell::from(c.name.to_owned()), 179 | Cell::from(c.age.to_owned()), 180 | ]) 181 | }; 182 | rows.style(style_primary(app.light_theme)) 183 | }, 184 | app.light_theme, 185 | app.is_loading, 186 | app.data.selected.filter.to_owned(), 187 | ); 188 | } 189 | 190 | #[cfg(test)] 191 | mod tests { 192 | use super::*; 193 | use crate::app::test_utils::*; 194 | 195 | #[test] 196 | fn test_dynamic_resource_from_api() { 197 | let (dynamic_resource, res_list): (Vec, Vec<_>) = 198 | convert_resource_from_file("dynamic_resource"); 199 | 200 | assert_eq!(dynamic_resource.len(), 6); 201 | assert_eq!( 202 | dynamic_resource[0], 203 | KubeDynamicResource { 204 | name: "consul-5bb65dd4c8".into(), 205 | namespace: Some("jhipster".into()), 206 | age: utils::to_age(Some(&get_time("2023-06-30T17:27:23Z")), Utc::now()), 207 | k8s_obj: res_list[0].clone(), 208 | } 209 | ); 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /src/app/jobs.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use k8s_openapi::{api::batch::v1::Job, chrono::Utc}; 3 | use ratatui::{ 4 | layout::{Constraint, Rect}, 5 | widgets::{Cell, Row}, 6 | Frame, 7 | }; 8 | 9 | use super::{ 10 | models::{AppResource, KubeResource}, 11 | utils, ActiveBlock, App, 12 | }; 13 | use crate::{ 14 | draw_resource_tab, 15 | network::Network, 16 | ui::utils::{ 17 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 18 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 19 | DESCRIBE_AND_YAML_HINT, 20 | }, 21 | }; 22 | 23 | #[derive(Clone, Debug, PartialEq)] 24 | pub struct KubeJob { 25 | pub name: String, 26 | pub namespace: String, 27 | pub completions: String, 28 | pub duration: String, 29 | pub age: String, 30 | k8s_obj: Job, 31 | } 32 | 33 | impl From for KubeJob { 34 | fn from(job: Job) -> Self { 35 | let completions = match (job.spec.as_ref(), job.status.as_ref()) { 36 | (Some(spc), Some(stat)) => match spc.completions { 37 | Some(c) => format!("{:?}/{:?}", stat.succeeded.unwrap_or_default(), c), 38 | None => match spc.parallelism { 39 | Some(p) => format!("{:?}/1 of {}", stat.succeeded.unwrap_or_default(), p), 40 | None => format!("{:?}/1", stat.succeeded), 41 | }, 42 | }, 43 | (None, Some(stat)) => format!("{:?}/1", stat.succeeded.unwrap_or_default()), 44 | _ => "".into(), 45 | }; 46 | 47 | let duration = match job.status.as_ref() { 48 | Some(stat) => match stat.start_time.as_ref() { 49 | Some(st) => match stat.completion_time.as_ref() { 50 | Some(ct) => { 51 | let duration = ct.0.signed_duration_since(st.0); 52 | utils::duration_to_age(duration, true) 53 | } 54 | None => utils::to_age(stat.start_time.as_ref(), Utc::now()), 55 | }, 56 | None => "".to_string(), 57 | }, 58 | None => "".to_string(), 59 | }; 60 | 61 | Self { 62 | name: job.metadata.name.clone().unwrap_or_default(), 63 | namespace: job.metadata.namespace.clone().unwrap_or_default(), 64 | completions, 65 | duration, 66 | age: utils::to_age(job.metadata.creation_timestamp.as_ref(), Utc::now()), 67 | k8s_obj: utils::sanitize_obj(job), 68 | } 69 | } 70 | } 71 | 72 | impl KubeResource for KubeJob { 73 | fn get_name(&self) -> &String { 74 | &self.name 75 | } 76 | fn get_k8s_obj(&self) -> &Job { 77 | &self.k8s_obj 78 | } 79 | } 80 | 81 | static JOBS_TITLE: &str = "Jobs"; 82 | 83 | pub struct JobResource {} 84 | 85 | #[async_trait] 86 | impl AppResource for JobResource { 87 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 88 | draw_resource_tab!( 89 | JOBS_TITLE, 90 | block, 91 | f, 92 | app, 93 | area, 94 | Self::render, 95 | draw_block, 96 | app.data.jobs 97 | ); 98 | } 99 | 100 | async fn get_resource(nw: &Network<'_>) { 101 | let items: Vec = nw.get_namespaced_resources(Job::into).await; 102 | 103 | let mut app = nw.app.lock().await; 104 | app.data.jobs.set_items(items); 105 | } 106 | } 107 | 108 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 109 | let title = get_resource_title(app, JOBS_TITLE, "", app.data.jobs.items.len()); 110 | 111 | draw_resource_block( 112 | f, 113 | area, 114 | ResourceTableProps { 115 | title, 116 | inline_help: DESCRIBE_AND_YAML_HINT.into(), 117 | resource: &mut app.data.jobs, 118 | table_headers: vec!["Namespace", "Name", "Completions", "Duration", "Age"], 119 | column_widths: vec![ 120 | Constraint::Percentage(25), 121 | Constraint::Percentage(40), 122 | Constraint::Percentage(15), 123 | Constraint::Percentage(10), 124 | Constraint::Percentage(10), 125 | ], 126 | }, 127 | |c| { 128 | Row::new(vec![ 129 | Cell::from(c.namespace.to_owned()), 130 | Cell::from(c.name.to_owned()), 131 | Cell::from(c.completions.to_owned()), 132 | Cell::from(c.duration.to_string()), 133 | Cell::from(c.age.to_owned()), 134 | ]) 135 | .style(style_primary(app.light_theme)) 136 | }, 137 | app.light_theme, 138 | app.is_loading, 139 | app.data.selected.filter.to_owned(), 140 | ); 141 | } 142 | 143 | #[cfg(test)] 144 | mod tests { 145 | use super::*; 146 | use crate::app::test_utils::{convert_resource_from_file, get_time}; 147 | 148 | #[test] 149 | fn test_jobs_from_api() { 150 | let (jobs, jobs_list): (Vec, Vec<_>) = convert_resource_from_file("jobs"); 151 | 152 | assert_eq!(jobs.len(), 3); 153 | assert_eq!( 154 | jobs[0], 155 | KubeJob { 156 | name: "helm-install-traefik".into(), 157 | namespace: "kube-system".into(), 158 | age: utils::to_age(Some(&get_time("2021-06-11T13:49:45Z")), Utc::now()), 159 | k8s_obj: jobs_list[0].clone(), 160 | completions: "1/1".into(), 161 | duration: "39m44s".into() 162 | } 163 | ); 164 | assert_eq!( 165 | jobs[1], 166 | KubeJob { 167 | name: "helm-install-traefik-2".into(), 168 | namespace: "kube-system".into(), 169 | age: utils::to_age(Some(&get_time("2021-06-11T13:49:45Z")), Utc::now()), 170 | k8s_obj: jobs_list[1].clone(), 171 | completions: "1/1 of 1".into(), 172 | duration: "39m44s".into() 173 | } 174 | ); 175 | assert_eq!( 176 | jobs[2], 177 | KubeJob { 178 | name: "helm-install-traefik-3".into(), 179 | namespace: "kube-system".into(), 180 | age: utils::to_age(Some(&get_time("2021-06-11T13:49:45Z")), Utc::now()), 181 | k8s_obj: jobs_list[2].clone(), 182 | completions: "1/1".into(), 183 | duration: "39m44s".into() 184 | } 185 | ); 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /src/app/network_policies.rs: -------------------------------------------------------------------------------- 1 | use std::vec; 2 | 3 | use async_trait::async_trait; 4 | use k8s_openapi::{api::networking::v1::NetworkPolicy, chrono::Utc}; 5 | use ratatui::{ 6 | layout::{Constraint, Rect}, 7 | widgets::{Cell, Row}, 8 | Frame, 9 | }; 10 | 11 | use super::{ 12 | models::{AppResource, KubeResource}, 13 | utils, ActiveBlock, App, 14 | }; 15 | use crate::{ 16 | draw_resource_tab, 17 | network::Network, 18 | ui::utils::{ 19 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 20 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 21 | DESCRIBE_YAML_AND_ESC_HINT, 22 | }, 23 | }; 24 | 25 | #[derive(Clone, Debug, PartialEq)] 26 | pub struct KubeNetworkPolicy { 27 | pub name: String, 28 | pub namespace: String, 29 | pub pod_selector: String, 30 | pub policy_types: String, 31 | pub age: String, 32 | k8s_obj: NetworkPolicy, 33 | } 34 | 35 | impl From for KubeNetworkPolicy { 36 | fn from(nw_policy: NetworkPolicy) -> Self { 37 | let pod_selector = match &nw_policy.spec { 38 | Some(s) => { 39 | let mut pod_selector = vec![]; 40 | if let Some(match_labels) = &s.pod_selector.match_labels { 41 | for (k, v) in match_labels { 42 | pod_selector.push(format!("{}={}", k, v)); 43 | } 44 | } 45 | pod_selector 46 | } 47 | _ => vec![], 48 | }; 49 | 50 | Self { 51 | name: nw_policy.metadata.name.clone().unwrap_or_default(), 52 | namespace: nw_policy.metadata.namespace.clone().unwrap_or_default(), 53 | age: utils::to_age(nw_policy.metadata.creation_timestamp.as_ref(), Utc::now()), 54 | pod_selector: pod_selector.join(","), 55 | policy_types: nw_policy.spec.as_ref().map_or_else( 56 | || "".into(), 57 | |s| s.policy_types.clone().unwrap_or_default().join(","), 58 | ), 59 | k8s_obj: utils::sanitize_obj(nw_policy), 60 | } 61 | } 62 | } 63 | 64 | impl KubeResource for KubeNetworkPolicy { 65 | fn get_name(&self) -> &String { 66 | &self.name 67 | } 68 | fn get_k8s_obj(&self) -> &NetworkPolicy { 69 | &self.k8s_obj 70 | } 71 | } 72 | 73 | static NW_POLICY_TITLE: &str = "NetworkPolicies"; 74 | 75 | pub struct NetworkPolicyResource {} 76 | 77 | #[async_trait] 78 | impl AppResource for NetworkPolicyResource { 79 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 80 | draw_resource_tab!( 81 | NW_POLICY_TITLE, 82 | block, 83 | f, 84 | app, 85 | area, 86 | Self::render, 87 | draw_block, 88 | app.data.nw_policies 89 | ); 90 | } 91 | 92 | async fn get_resource(nw: &Network<'_>) { 93 | let items: Vec = nw.get_namespaced_resources(NetworkPolicy::into).await; 94 | 95 | let mut app = nw.app.lock().await; 96 | app.data.nw_policies.set_items(items); 97 | } 98 | } 99 | 100 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 101 | let title = get_resource_title(app, NW_POLICY_TITLE, "", app.data.nw_policies.items.len()); 102 | 103 | draw_resource_block( 104 | f, 105 | area, 106 | ResourceTableProps { 107 | title, 108 | inline_help: DESCRIBE_YAML_AND_ESC_HINT.into(), 109 | resource: &mut app.data.nw_policies, 110 | table_headers: vec!["Namespace", "Name", "Pod Selector", "Policy Types", "Age"], 111 | column_widths: vec![ 112 | Constraint::Percentage(20), 113 | Constraint::Percentage(20), 114 | Constraint::Percentage(30), 115 | Constraint::Percentage(20), 116 | Constraint::Percentage(10), 117 | ], 118 | }, 119 | |c| { 120 | Row::new(vec![ 121 | Cell::from(c.namespace.to_owned()), 122 | Cell::from(c.name.to_owned()), 123 | Cell::from(c.pod_selector.to_owned()), 124 | Cell::from(c.policy_types.to_owned()), 125 | Cell::from(c.age.to_owned()), 126 | ]) 127 | .style(style_primary(app.light_theme)) 128 | }, 129 | app.light_theme, 130 | app.is_loading, 131 | app.data.selected.filter.to_owned(), 132 | ); 133 | } 134 | 135 | #[cfg(test)] 136 | mod tests { 137 | use super::*; 138 | use crate::app::test_utils::*; 139 | 140 | #[test] 141 | fn test_nw_policys_from_api() { 142 | let (nw_policys, nw_policy_list): (Vec, Vec<_>) = 143 | convert_resource_from_file("network_policy"); 144 | 145 | assert_eq!(nw_policys.len(), 4); 146 | assert_eq!( 147 | nw_policys[3], 148 | KubeNetworkPolicy { 149 | name: "sample-network-policy-4".into(), 150 | namespace: "default".into(), 151 | age: utils::to_age(Some(&get_time("2023-07-04T17:04:33Z")), Utc::now()), 152 | k8s_obj: nw_policy_list[3].clone(), 153 | pod_selector: "app=webapp,app3=webapp3".into(), 154 | policy_types: "Egress,Ingress".into(), 155 | } 156 | ); 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /src/app/ns.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use async_trait::async_trait; 3 | use k8s_openapi::api::core::v1::Namespace; 4 | use kube::{api::ListParams, Api}; 5 | use ratatui::{ 6 | layout::{Constraint, Rect}, 7 | widgets::{Cell, Row, Table}, 8 | Frame, 9 | }; 10 | 11 | use super::{ 12 | key_binding::DEFAULT_KEYBINDING, 13 | models::{AppResource, KubeResource}, 14 | utils::{self, UNKNOWN}, 15 | ActiveBlock, App, 16 | }; 17 | use crate::{ 18 | network::Network, 19 | ui::{ 20 | utils::{ 21 | filter_by_resource_name, layout_block_default, loading, style_highlight, style_primary, 22 | style_secondary, table_header_style, 23 | }, 24 | HIGHLIGHT, 25 | }, 26 | }; 27 | 28 | #[derive(Clone, Debug, PartialEq, Default)] 29 | pub struct KubeNs { 30 | pub name: String, 31 | pub status: String, 32 | k8s_obj: Namespace, 33 | } 34 | 35 | impl From for KubeNs { 36 | fn from(ns: Namespace) -> Self { 37 | let status = match &ns.status { 38 | Some(stat) => match &stat.phase { 39 | Some(phase) => phase.clone(), 40 | _ => UNKNOWN.into(), 41 | }, 42 | _ => UNKNOWN.into(), 43 | }; 44 | 45 | KubeNs { 46 | name: ns.metadata.name.clone().unwrap_or_default(), 47 | status, 48 | k8s_obj: utils::sanitize_obj(ns), 49 | } 50 | } 51 | } 52 | 53 | impl KubeResource for KubeNs { 54 | fn get_name(&self) -> &String { 55 | &self.name 56 | } 57 | fn get_k8s_obj(&self) -> &Namespace { 58 | &self.k8s_obj 59 | } 60 | } 61 | 62 | pub struct NamespaceResource {} 63 | 64 | #[async_trait] 65 | impl AppResource for NamespaceResource { 66 | fn render(_block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 67 | let title = format!( 68 | " Namespaces {} (all: {}) ", 69 | DEFAULT_KEYBINDING.jump_to_namespace.key, DEFAULT_KEYBINDING.select_all_namespace.key 70 | ); 71 | let mut block = layout_block_default(title.as_str()); 72 | 73 | if app.get_current_route().active_block == ActiveBlock::Namespaces { 74 | block = block.style(style_secondary(app.light_theme)) 75 | } 76 | 77 | if !app.data.namespaces.items.is_empty() { 78 | let rows = app.data.namespaces.items.iter().filter_map(|s| { 79 | let style = if Some(s.name.clone()) == app.data.selected.ns { 80 | style_secondary(app.light_theme) 81 | } else { 82 | style_primary(app.light_theme) 83 | }; 84 | 85 | let mapper = row_cell_mapper(s).style(style); 86 | // return only rows that match filter if filter is set 87 | filter_by_resource_name(app.data.selected.filter.clone(), s, mapper) 88 | }); 89 | 90 | let table = Table::new(rows, [Constraint::Length(22), Constraint::Length(6)]) 91 | .header(table_header_style(vec!["Name", "Status"], app.light_theme)) 92 | .block(block) 93 | .row_highlight_style(style_highlight()) 94 | .highlight_symbol(HIGHLIGHT); 95 | 96 | f.render_stateful_widget(table, area, &mut app.data.namespaces.state); 97 | } else { 98 | loading(f, block, area, app.is_loading, app.light_theme); 99 | } 100 | } 101 | 102 | async fn get_resource(nw: &Network<'_>) { 103 | let api: Api = Api::all(nw.client.clone()); 104 | 105 | let lp = ListParams::default(); 106 | match api.list(&lp).await { 107 | Ok(ns_list) => { 108 | let items = ns_list.into_iter().map(KubeNs::from).collect::>(); 109 | let mut app = nw.app.lock().await; 110 | app.data.namespaces.set_items(items); 111 | } 112 | Err(e) => { 113 | nw.handle_error(anyhow!("Failed to get namespaces. {:?}", e)) 114 | .await; 115 | } 116 | } 117 | } 118 | } 119 | 120 | fn row_cell_mapper(s: &KubeNs) -> Row<'static> { 121 | Row::new(vec![ 122 | Cell::from(s.name.to_owned()), 123 | Cell::from(s.status.to_owned()), 124 | ]) 125 | } 126 | 127 | #[cfg(test)] 128 | mod tests { 129 | use super::*; 130 | use crate::app::test_utils::convert_resource_from_file; 131 | 132 | #[test] 133 | fn test_namespace_from_api() { 134 | let (nss, ns_list): (Vec, Vec<_>) = convert_resource_from_file("ns"); 135 | 136 | assert_eq!(nss.len(), 4); 137 | assert_eq!( 138 | nss[0], 139 | KubeNs { 140 | name: "default".into(), 141 | status: "Active".into(), 142 | k8s_obj: ns_list[0].clone() 143 | } 144 | ); 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /src/app/pvcs.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use k8s_openapi::{ 3 | api::core::v1::PersistentVolumeClaim, apimachinery::pkg::api::resource::Quantity, chrono::Utc, 4 | }; 5 | use ratatui::{ 6 | layout::{Constraint, Rect}, 7 | widgets::{Cell, Row}, 8 | Frame, 9 | }; 10 | 11 | use super::{ 12 | models::{AppResource, KubeResource}, 13 | utils::{self}, 14 | ActiveBlock, App, 15 | }; 16 | use crate::{ 17 | draw_resource_tab, 18 | network::Network, 19 | ui::utils::{ 20 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 21 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 22 | DESCRIBE_YAML_AND_ESC_HINT, 23 | }, 24 | }; 25 | 26 | #[derive(Clone, Debug, PartialEq)] 27 | pub struct KubePVC { 28 | pub name: String, 29 | pub namespace: String, 30 | pub status: String, 31 | pub volume: String, 32 | pub capacity: String, 33 | pub access_modes: String, 34 | pub storage_class: String, 35 | pub age: String, 36 | k8s_obj: PersistentVolumeClaim, 37 | } 38 | 39 | impl From for KubePVC { 40 | fn from(pvc: PersistentVolumeClaim) -> Self { 41 | let quantity = Quantity::default(); 42 | let capacity = pvc 43 | .status 44 | .clone() 45 | .unwrap_or_default() 46 | .capacity 47 | .unwrap_or_default(); 48 | let capacity = capacity.get("storage").unwrap_or(&quantity); 49 | 50 | KubePVC { 51 | name: pvc.metadata.name.clone().unwrap_or_default(), 52 | namespace: pvc.metadata.namespace.clone().unwrap_or_default(), 53 | age: utils::to_age(pvc.metadata.creation_timestamp.as_ref(), Utc::now()), 54 | status: pvc 55 | .status 56 | .clone() 57 | .unwrap_or_default() 58 | .phase 59 | .unwrap_or_default(), 60 | volume: pvc 61 | .spec 62 | .clone() 63 | .unwrap_or_default() 64 | .volume_name 65 | .unwrap_or_default(), 66 | capacity: capacity.0.clone(), 67 | access_modes: pvc 68 | .spec 69 | .clone() 70 | .unwrap_or_default() 71 | .access_modes 72 | .unwrap_or_default() 73 | .join(","), 74 | storage_class: pvc 75 | .spec 76 | .clone() 77 | .unwrap_or_default() 78 | .storage_class_name 79 | .unwrap_or_default(), 80 | k8s_obj: utils::sanitize_obj(pvc), 81 | } 82 | } 83 | } 84 | 85 | impl KubeResource for KubePVC { 86 | fn get_name(&self) -> &String { 87 | &self.name 88 | } 89 | fn get_k8s_obj(&self) -> &PersistentVolumeClaim { 90 | &self.k8s_obj 91 | } 92 | } 93 | 94 | static PVC_TITLE: &str = "PersistentVolumeClaims"; 95 | 96 | pub struct PvcResource {} 97 | 98 | #[async_trait] 99 | impl AppResource for PvcResource { 100 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 101 | draw_resource_tab!( 102 | PVC_TITLE, 103 | block, 104 | f, 105 | app, 106 | area, 107 | Self::render, 108 | draw_block, 109 | app.data.pvcs 110 | ); 111 | } 112 | 113 | async fn get_resource(nw: &Network<'_>) { 114 | let items: Vec = nw 115 | .get_namespaced_resources(PersistentVolumeClaim::into) 116 | .await; 117 | 118 | let mut app = nw.app.lock().await; 119 | app.data.pvcs.set_items(items); 120 | } 121 | } 122 | 123 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 124 | let title = get_resource_title(app, PVC_TITLE, "", app.data.pvcs.items.len()); 125 | 126 | draw_resource_block( 127 | f, 128 | area, 129 | ResourceTableProps { 130 | title, 131 | inline_help: DESCRIBE_YAML_AND_ESC_HINT.into(), 132 | resource: &mut app.data.pvcs, 133 | table_headers: vec![ 134 | "Namespace", 135 | "Name", 136 | "Status", 137 | "Volume", 138 | "Capacity", 139 | "Access Modes", 140 | "Storage Class", 141 | "Age", 142 | ], 143 | column_widths: vec![ 144 | Constraint::Percentage(10), 145 | Constraint::Percentage(10), 146 | Constraint::Percentage(10), 147 | Constraint::Percentage(20), 148 | Constraint::Percentage(10), 149 | Constraint::Percentage(10), 150 | Constraint::Percentage(10), 151 | Constraint::Percentage(10), 152 | Constraint::Percentage(10), 153 | ], 154 | }, 155 | |c| { 156 | Row::new(vec![ 157 | Cell::from(c.namespace.to_owned()), 158 | Cell::from(c.name.to_owned()), 159 | Cell::from(c.status.to_owned()), 160 | Cell::from(c.volume.to_owned()), 161 | Cell::from(c.capacity.to_owned()), 162 | Cell::from(c.access_modes.to_owned()), 163 | Cell::from(c.storage_class.to_owned()), 164 | Cell::from(c.age.to_owned()), 165 | ]) 166 | .style(style_primary(app.light_theme)) 167 | }, 168 | app.light_theme, 169 | app.is_loading, 170 | app.data.selected.filter.to_owned(), 171 | ); 172 | } 173 | 174 | #[cfg(test)] 175 | mod tests { 176 | use super::*; 177 | use crate::app::test_utils::*; 178 | 179 | #[test] 180 | fn test_persistent_volume_claims_from_api() { 181 | let (pvc, pvc_list): (Vec, Vec<_>) = convert_resource_from_file("pvcs"); 182 | 183 | assert_eq!(pvc.len(), 3); 184 | assert_eq!( 185 | pvc[0], 186 | KubePVC { 187 | name: "data-consul-0".into(), 188 | namespace: "jhipster".into(), 189 | age: utils::to_age(Some(&get_time("2023-06-30T17:27:23Z")), Utc::now()), 190 | k8s_obj: pvc_list[0].clone(), 191 | status: "Bound".into(), 192 | volume: "pvc-149f1f3b-c0fd-471d-bc3e-d039369755ef".into(), 193 | capacity: "8Gi".into(), 194 | access_modes: "ReadWriteOnce".into(), 195 | storage_class: "gp2".into(), 196 | } 197 | ); 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /src/app/pvs.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use k8s_openapi::{ 3 | api::core::v1::PersistentVolume, apimachinery::pkg::api::resource::Quantity, chrono::Utc, 4 | }; 5 | use ratatui::{ 6 | layout::{Constraint, Rect}, 7 | widgets::{Cell, Row}, 8 | Frame, 9 | }; 10 | 11 | use super::{ 12 | models::{AppResource, KubeResource}, 13 | utils::{self}, 14 | ActiveBlock, App, 15 | }; 16 | use crate::{ 17 | draw_resource_tab, 18 | network::Network, 19 | ui::utils::{ 20 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 21 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 22 | DESCRIBE_YAML_AND_ESC_HINT, 23 | }, 24 | }; 25 | 26 | #[derive(Clone, Debug, PartialEq)] 27 | pub struct KubePV { 28 | pub name: String, 29 | pub capacity: String, 30 | pub access_modes: String, 31 | pub reclaim_policy: String, 32 | pub status: String, 33 | pub claim: String, 34 | pub storage_class: String, 35 | pub reason: String, 36 | pub age: String, 37 | k8s_obj: PersistentVolume, 38 | } 39 | 40 | impl From for KubePV { 41 | fn from(pvc: PersistentVolume) -> Self { 42 | let quantity = Quantity::default(); 43 | let capacity = pvc 44 | .spec 45 | .clone() 46 | .unwrap_or_default() 47 | .capacity 48 | .unwrap_or_default(); 49 | let capacity = capacity.get("storage").unwrap_or(&quantity); 50 | 51 | let claim = pvc.spec.clone().unwrap_or_default().claim_ref; 52 | 53 | let claim = format!( 54 | "{}/{}", 55 | claim 56 | .clone() 57 | .unwrap_or_default() 58 | .namespace 59 | .unwrap_or_default(), 60 | claim.unwrap_or_default().name.unwrap_or_default() 61 | ); 62 | 63 | KubePV { 64 | name: pvc.metadata.name.clone().unwrap_or_default(), 65 | age: utils::to_age(pvc.metadata.creation_timestamp.as_ref(), Utc::now()), 66 | status: pvc 67 | .status 68 | .clone() 69 | .unwrap_or_default() 70 | .phase 71 | .unwrap_or_default(), 72 | capacity: capacity.0.clone(), 73 | access_modes: pvc 74 | .spec 75 | .clone() 76 | .unwrap_or_default() 77 | .access_modes 78 | .unwrap_or_default() 79 | .join(","), 80 | reclaim_policy: pvc 81 | .spec 82 | .clone() 83 | .unwrap_or_default() 84 | .persistent_volume_reclaim_policy 85 | .unwrap_or_default(), 86 | claim, 87 | storage_class: pvc 88 | .spec 89 | .clone() 90 | .unwrap_or_default() 91 | .storage_class_name 92 | .unwrap_or_default(), 93 | reason: pvc 94 | .status 95 | .clone() 96 | .unwrap_or_default() 97 | .reason 98 | .unwrap_or_default(), 99 | k8s_obj: utils::sanitize_obj(pvc), 100 | } 101 | } 102 | } 103 | 104 | impl KubeResource for KubePV { 105 | fn get_name(&self) -> &String { 106 | &self.name 107 | } 108 | fn get_k8s_obj(&self) -> &PersistentVolume { 109 | &self.k8s_obj 110 | } 111 | } 112 | 113 | static PV_TITLE: &str = "PersistentVolumes"; 114 | 115 | pub struct PvResource {} 116 | 117 | #[async_trait] 118 | impl AppResource for PvResource { 119 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 120 | draw_resource_tab!( 121 | PV_TITLE, 122 | block, 123 | f, 124 | app, 125 | area, 126 | Self::render, 127 | draw_block, 128 | app.data.pvs 129 | ); 130 | } 131 | 132 | async fn get_resource(nw: &Network<'_>) { 133 | let items: Vec = nw.get_resources(PersistentVolume::into).await; 134 | 135 | let mut app = nw.app.lock().await; 136 | app.data.pvs.set_items(items); 137 | } 138 | } 139 | 140 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 141 | let title = get_resource_title(app, PV_TITLE, "", app.data.pvs.items.len()); 142 | 143 | draw_resource_block( 144 | f, 145 | area, 146 | ResourceTableProps { 147 | title, 148 | inline_help: DESCRIBE_YAML_AND_ESC_HINT.into(), 149 | resource: &mut app.data.pvs, 150 | table_headers: vec![ 151 | "Name", 152 | "Capacity", 153 | "Access Modes", 154 | "Reclaim Policy", 155 | "Status", 156 | "Claim", 157 | "Storage Class", 158 | "Reason", 159 | "Age", 160 | ], 161 | column_widths: vec![ 162 | Constraint::Percentage(20), 163 | Constraint::Percentage(10), 164 | Constraint::Percentage(10), 165 | Constraint::Percentage(10), 166 | Constraint::Percentage(10), 167 | Constraint::Percentage(10), 168 | Constraint::Percentage(10), 169 | Constraint::Percentage(10), 170 | Constraint::Percentage(10), 171 | ], 172 | }, 173 | |c| { 174 | Row::new(vec![ 175 | Cell::from(c.name.to_owned()), 176 | Cell::from(c.capacity.to_owned()), 177 | Cell::from(c.access_modes.to_owned()), 178 | Cell::from(c.reclaim_policy.to_owned()), 179 | Cell::from(c.status.to_owned()), 180 | Cell::from(c.claim.to_owned()), 181 | Cell::from(c.storage_class.to_owned()), 182 | Cell::from(c.reason.to_owned()), 183 | Cell::from(c.age.to_owned()), 184 | ]) 185 | .style(style_primary(app.light_theme)) 186 | }, 187 | app.light_theme, 188 | app.is_loading, 189 | app.data.selected.filter.to_owned(), 190 | ); 191 | } 192 | 193 | #[cfg(test)] 194 | mod tests { 195 | use super::*; 196 | use crate::app::test_utils::*; 197 | 198 | #[test] 199 | fn test_persistent_volumes_from_api() { 200 | let (pvc, pvc_list): (Vec, Vec<_>) = convert_resource_from_file("pvs"); 201 | 202 | assert_eq!(pvc.len(), 3); 203 | assert_eq!( 204 | pvc[0], 205 | KubePV { 206 | name: "pvc-149f1f3b-c0fd-471d-bc3e-d039369755ef".into(), 207 | age: utils::to_age(Some(&get_time("2023-06-30T17:27:26Z")), Utc::now()), 208 | k8s_obj: pvc_list[0].clone(), 209 | status: "Bound".into(), 210 | capacity: "8Gi".into(), 211 | access_modes: "ReadWriteOnce".into(), 212 | storage_class: "gp2".into(), 213 | reclaim_policy: "Delete".into(), 214 | claim: "jhipster/data-consul-0".into(), 215 | reason: "".into(), 216 | } 217 | ); 218 | } 219 | } 220 | -------------------------------------------------------------------------------- /src/app/replicasets.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use k8s_openapi::{api::apps::v1::ReplicaSet, chrono::Utc}; 3 | use ratatui::{ 4 | layout::{Constraint, Rect}, 5 | widgets::{Cell, Row}, 6 | Frame, 7 | }; 8 | 9 | use super::{ 10 | models::{AppResource, KubeResource}, 11 | utils::{self}, 12 | ActiveBlock, App, 13 | }; 14 | use crate::{ 15 | draw_resource_tab, 16 | network::Network, 17 | ui::utils::{ 18 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 19 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 20 | DESCRIBE_AND_YAML_HINT, 21 | }, 22 | }; 23 | 24 | #[derive(Clone, Debug, PartialEq)] 25 | pub struct KubeReplicaSet { 26 | pub name: String, 27 | pub namespace: String, 28 | pub desired: i32, 29 | pub current: i32, 30 | pub ready: i32, 31 | pub age: String, 32 | k8s_obj: ReplicaSet, 33 | } 34 | 35 | impl From for KubeReplicaSet { 36 | fn from(rps: ReplicaSet) -> Self { 37 | let (current, ready) = match rps.status.as_ref() { 38 | Some(s) => (s.replicas, s.ready_replicas.unwrap_or_default()), 39 | _ => (0, 0), 40 | }; 41 | 42 | KubeReplicaSet { 43 | name: rps.metadata.name.clone().unwrap_or_default(), 44 | namespace: rps.metadata.namespace.clone().unwrap_or_default(), 45 | age: utils::to_age(rps.metadata.creation_timestamp.as_ref(), Utc::now()), 46 | desired: rps 47 | .spec 48 | .as_ref() 49 | .map_or(0, |s| s.replicas.unwrap_or_default()), 50 | current, 51 | ready, 52 | k8s_obj: utils::sanitize_obj(rps), 53 | } 54 | } 55 | } 56 | 57 | impl KubeResource for KubeReplicaSet { 58 | fn get_name(&self) -> &String { 59 | &self.name 60 | } 61 | fn get_k8s_obj(&self) -> &ReplicaSet { 62 | &self.k8s_obj 63 | } 64 | } 65 | 66 | static REPLICA_SETS_TITLE: &str = "ReplicaSets"; 67 | 68 | pub struct ReplicaSetResource {} 69 | 70 | #[async_trait] 71 | impl AppResource for ReplicaSetResource { 72 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 73 | draw_resource_tab!( 74 | REPLICA_SETS_TITLE, 75 | block, 76 | f, 77 | app, 78 | area, 79 | Self::render, 80 | draw_block, 81 | app.data.replica_sets 82 | ); 83 | } 84 | 85 | async fn get_resource(nw: &Network<'_>) { 86 | let items: Vec = nw.get_namespaced_resources(ReplicaSet::into).await; 87 | 88 | let mut app = nw.app.lock().await; 89 | app.data.replica_sets.set_items(items); 90 | } 91 | } 92 | 93 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 94 | let title = get_resource_title( 95 | app, 96 | REPLICA_SETS_TITLE, 97 | "", 98 | app.data.replica_sets.items.len(), 99 | ); 100 | 101 | draw_resource_block( 102 | f, 103 | area, 104 | ResourceTableProps { 105 | title, 106 | inline_help: DESCRIBE_AND_YAML_HINT.into(), 107 | resource: &mut app.data.replica_sets, 108 | table_headers: vec!["Namespace", "Name", "Desired", "Current", "Ready", "Age"], 109 | column_widths: vec![ 110 | Constraint::Percentage(25), 111 | Constraint::Percentage(35), 112 | Constraint::Percentage(10), 113 | Constraint::Percentage(10), 114 | Constraint::Percentage(10), 115 | Constraint::Percentage(10), 116 | ], 117 | }, 118 | |c| { 119 | Row::new(vec![ 120 | Cell::from(c.namespace.to_owned()), 121 | Cell::from(c.name.to_owned()), 122 | Cell::from(c.desired.to_string()), 123 | Cell::from(c.current.to_string()), 124 | Cell::from(c.ready.to_string()), 125 | Cell::from(c.age.to_owned()), 126 | ]) 127 | .style(style_primary(app.light_theme)) 128 | }, 129 | app.light_theme, 130 | app.is_loading, 131 | app.data.selected.filter.to_owned(), 132 | ); 133 | } 134 | 135 | #[cfg(test)] 136 | mod tests { 137 | use super::*; 138 | use crate::app::test_utils::*; 139 | 140 | #[test] 141 | fn test_replica_sets_from_api() { 142 | let (rpls, rpls_list): (Vec, Vec<_>) = 143 | convert_resource_from_file("replicasets"); 144 | 145 | assert_eq!(rpls.len(), 4); 146 | assert_eq!( 147 | rpls[0], 148 | KubeReplicaSet { 149 | name: "metrics-server-86cbb8457f".into(), 150 | namespace: "kube-system".into(), 151 | age: utils::to_age(Some(&get_time("2021-05-10T21:48:19Z")), Utc::now()), 152 | k8s_obj: rpls_list[0].clone(), 153 | desired: 1, 154 | current: 1, 155 | ready: 1, 156 | } 157 | ); 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /src/app/serviceaccounts.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use k8s_openapi::{api::core::v1::ServiceAccount, chrono::Utc}; 3 | use ratatui::{ 4 | layout::{Constraint, Rect}, 5 | widgets::{Cell, Row}, 6 | Frame, 7 | }; 8 | 9 | use super::{ 10 | models::{AppResource, KubeResource}, 11 | utils::{self}, 12 | ActiveBlock, App, 13 | }; 14 | use crate::{ 15 | draw_resource_tab, 16 | network::Network, 17 | ui::utils::{ 18 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 19 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 20 | DESCRIBE_YAML_AND_ESC_HINT, 21 | }, 22 | }; 23 | 24 | #[derive(Clone, Debug, PartialEq)] 25 | pub struct KubeSvcAcct { 26 | pub namespace: String, 27 | pub name: String, 28 | pub secrets: i32, 29 | pub age: String, 30 | k8s_obj: ServiceAccount, 31 | } 32 | 33 | // Get length of a vector 34 | impl From for KubeSvcAcct { 35 | fn from(acct: ServiceAccount) -> Self { 36 | KubeSvcAcct { 37 | namespace: acct.metadata.namespace.clone().unwrap_or_default(), 38 | name: acct.metadata.name.clone().unwrap_or_default(), 39 | secrets: acct.secrets.clone().unwrap_or_default().len() as i32, 40 | age: utils::to_age(acct.metadata.creation_timestamp.as_ref(), Utc::now()), 41 | k8s_obj: utils::sanitize_obj(acct), 42 | } 43 | } 44 | } 45 | 46 | impl KubeResource for KubeSvcAcct { 47 | fn get_name(&self) -> &String { 48 | &self.name 49 | } 50 | fn get_k8s_obj(&self) -> &ServiceAccount { 51 | &self.k8s_obj 52 | } 53 | } 54 | 55 | static SVC_ACCT_TITLE: &str = "ServiceAccounts"; 56 | 57 | pub struct SvcAcctResource {} 58 | 59 | #[async_trait] 60 | impl AppResource for SvcAcctResource { 61 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 62 | draw_resource_tab!( 63 | SVC_ACCT_TITLE, 64 | block, 65 | f, 66 | app, 67 | area, 68 | Self::render, 69 | draw_block, 70 | app.data.service_accounts 71 | ); 72 | } 73 | 74 | async fn get_resource(nw: &Network<'_>) { 75 | let items: Vec = nw.get_namespaced_resources(ServiceAccount::into).await; 76 | 77 | let mut app = nw.app.lock().await; 78 | app.data.service_accounts.set_items(items); 79 | } 80 | } 81 | 82 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 83 | let title = get_resource_title( 84 | app, 85 | SVC_ACCT_TITLE, 86 | "", 87 | app.data.service_accounts.items.len(), 88 | ); 89 | 90 | draw_resource_block( 91 | f, 92 | area, 93 | ResourceTableProps { 94 | title, 95 | inline_help: DESCRIBE_YAML_AND_ESC_HINT.into(), 96 | resource: &mut app.data.service_accounts, 97 | table_headers: vec!["Namespace", "Name", "Secrets", "Age"], 98 | column_widths: vec![ 99 | Constraint::Percentage(30), 100 | Constraint::Percentage(30), 101 | Constraint::Percentage(20), 102 | Constraint::Percentage(20), 103 | ], 104 | }, 105 | |c| { 106 | Row::new(vec![ 107 | Cell::from(c.namespace.to_owned()), 108 | Cell::from(c.name.to_owned()), 109 | Cell::from(c.secrets.to_string()), 110 | Cell::from(c.age.to_owned()), 111 | ]) 112 | .style(style_primary(app.light_theme)) 113 | }, 114 | app.light_theme, 115 | app.is_loading, 116 | app.data.selected.filter.to_owned(), 117 | ); 118 | } 119 | 120 | #[cfg(test)] 121 | mod tests { 122 | use k8s_openapi::chrono::Utc; 123 | 124 | use crate::app::{ 125 | serviceaccounts::KubeSvcAcct, 126 | test_utils::{convert_resource_from_file, get_time}, 127 | utils, 128 | }; 129 | 130 | #[test] 131 | fn test_service_accounts_from_api() { 132 | let (serviceaccounts, serviceaccounts_list): (Vec, Vec<_>) = 133 | convert_resource_from_file("serviceaccounts"); 134 | 135 | assert_eq!(serviceaccounts.len(), 43); 136 | assert_eq!( 137 | serviceaccounts[0], 138 | KubeSvcAcct { 139 | namespace: "kube-node-lease".to_string(), 140 | name: "default".into(), 141 | secrets: 3, 142 | age: utils::to_age(Some(&get_time("2023-06-30T17:13:19Z")), Utc::now()), 143 | k8s_obj: serviceaccounts_list[0].clone(), 144 | } 145 | ) 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /src/app/statefulsets.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use k8s_openapi::{api::apps::v1::StatefulSet, chrono::Utc}; 3 | use ratatui::{ 4 | layout::{Constraint, Rect}, 5 | widgets::{Cell, Row}, 6 | Frame, 7 | }; 8 | 9 | use super::{ 10 | models::{AppResource, KubeResource}, 11 | utils::{self}, 12 | ActiveBlock, App, 13 | }; 14 | use crate::{ 15 | draw_resource_tab, 16 | network::Network, 17 | ui::utils::{ 18 | draw_describe_block, draw_resource_block, draw_yaml_block, get_describe_active, 19 | get_resource_title, style_primary, title_with_dual_style, ResourceTableProps, COPY_HINT, 20 | DESCRIBE_AND_YAML_HINT, 21 | }, 22 | }; 23 | 24 | #[derive(Clone, Debug, PartialEq)] 25 | pub struct KubeStatefulSet { 26 | pub name: String, 27 | pub namespace: String, 28 | pub ready: String, 29 | pub service: String, 30 | pub age: String, 31 | k8s_obj: StatefulSet, 32 | } 33 | 34 | impl From for KubeStatefulSet { 35 | fn from(stfs: StatefulSet) -> Self { 36 | let ready = match &stfs.status { 37 | Some(s) => format!("{}/{}", s.ready_replicas.unwrap_or_default(), s.replicas), 38 | _ => "".into(), 39 | }; 40 | 41 | KubeStatefulSet { 42 | name: stfs.metadata.name.clone().unwrap_or_default(), 43 | namespace: stfs.metadata.namespace.clone().unwrap_or_default(), 44 | age: utils::to_age(stfs.metadata.creation_timestamp.as_ref(), Utc::now()), 45 | service: stfs 46 | .spec 47 | .as_ref() 48 | .map_or("n/a".into(), |spec| spec.service_name.to_owned()), 49 | ready, 50 | k8s_obj: utils::sanitize_obj(stfs), 51 | } 52 | } 53 | } 54 | 55 | impl KubeResource for KubeStatefulSet { 56 | fn get_name(&self) -> &String { 57 | &self.name 58 | } 59 | fn get_k8s_obj(&self) -> &StatefulSet { 60 | &self.k8s_obj 61 | } 62 | } 63 | 64 | static STFS_TITLE: &str = "StatefulSets"; 65 | 66 | pub struct StatefulSetResource {} 67 | 68 | #[async_trait] 69 | impl AppResource for StatefulSetResource { 70 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 71 | draw_resource_tab!( 72 | STFS_TITLE, 73 | block, 74 | f, 75 | app, 76 | area, 77 | Self::render, 78 | draw_block, 79 | app.data.stateful_sets 80 | ); 81 | } 82 | 83 | async fn get_resource(nw: &Network<'_>) { 84 | let items: Vec = nw.get_namespaced_resources(StatefulSet::into).await; 85 | 86 | let mut app = nw.app.lock().await; 87 | app.data.stateful_sets.set_items(items); 88 | } 89 | } 90 | 91 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 92 | let title = get_resource_title(app, STFS_TITLE, "", app.data.stateful_sets.items.len()); 93 | 94 | draw_resource_block( 95 | f, 96 | area, 97 | ResourceTableProps { 98 | title, 99 | inline_help: DESCRIBE_AND_YAML_HINT.into(), 100 | resource: &mut app.data.stateful_sets, 101 | table_headers: vec!["Namespace", "Name", "Ready", "Service", "Age"], 102 | column_widths: vec![ 103 | Constraint::Percentage(25), 104 | Constraint::Percentage(30), 105 | Constraint::Percentage(10), 106 | Constraint::Percentage(25), 107 | Constraint::Percentage(10), 108 | ], 109 | }, 110 | |c| { 111 | Row::new(vec![ 112 | Cell::from(c.namespace.to_owned()), 113 | Cell::from(c.name.to_owned()), 114 | Cell::from(c.ready.to_owned()), 115 | Cell::from(c.service.to_owned()), 116 | Cell::from(c.age.to_owned()), 117 | ]) 118 | .style(style_primary(app.light_theme)) 119 | }, 120 | app.light_theme, 121 | app.is_loading, 122 | app.data.selected.filter.to_owned(), 123 | ); 124 | } 125 | 126 | #[cfg(test)] 127 | mod tests { 128 | use super::*; 129 | use crate::app::test_utils::*; 130 | 131 | #[test] 132 | fn test_stateful_sets_from_api() { 133 | let (stfs, stfs_list): (Vec, Vec<_>) = convert_resource_from_file("stfs"); 134 | 135 | assert_eq!(stfs.len(), 1); 136 | assert_eq!( 137 | stfs[0], 138 | KubeStatefulSet { 139 | name: "web".into(), 140 | namespace: "default".into(), 141 | age: utils::to_age(Some(&get_time("2021-04-25T14:23:47Z")), Utc::now()), 142 | k8s_obj: stfs_list[0].clone(), 143 | service: "nginx".into(), 144 | ready: "2/2".into(), 145 | } 146 | ); 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /src/app/storageclass.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use k8s_openapi::{api::storage::v1::StorageClass, chrono::Utc}; 3 | use ratatui::{ 4 | layout::{Constraint, Rect}, 5 | widgets::{Cell, Row}, 6 | Frame, 7 | }; 8 | 9 | use super::{ 10 | models::{AppResource, KubeResource}, 11 | utils::{self}, 12 | ActiveBlock, App, 13 | }; 14 | use crate::{ 15 | draw_resource_tab, 16 | network::Network, 17 | ui::utils::{ 18 | draw_describe_block, draw_resource_block, draw_yaml_block, get_cluster_wide_resource_title, 19 | get_describe_active, get_resource_title, style_primary, title_with_dual_style, 20 | ResourceTableProps, COPY_HINT, DESCRIBE_YAML_AND_ESC_HINT, 21 | }, 22 | }; 23 | 24 | #[derive(Clone, Debug, PartialEq)] 25 | pub struct KubeStorageClass { 26 | pub name: String, 27 | pub provisioner: String, 28 | pub reclaim_policy: String, 29 | pub volume_binding_mode: String, 30 | pub allow_volume_expansion: bool, 31 | pub age: String, 32 | k8s_obj: StorageClass, 33 | } 34 | 35 | impl From for KubeStorageClass { 36 | fn from(storage_class: StorageClass) -> Self { 37 | KubeStorageClass { 38 | name: storage_class.metadata.name.clone().unwrap_or_default(), 39 | provisioner: storage_class.provisioner.clone(), 40 | reclaim_policy: storage_class.reclaim_policy.clone().unwrap_or_default(), 41 | volume_binding_mode: storage_class 42 | .volume_binding_mode 43 | .clone() 44 | .unwrap_or_default(), 45 | allow_volume_expansion: storage_class.allow_volume_expansion.unwrap_or_default(), 46 | age: utils::to_age( 47 | storage_class.metadata.creation_timestamp.as_ref(), 48 | Utc::now(), 49 | ), 50 | k8s_obj: utils::sanitize_obj(storage_class), 51 | } 52 | } 53 | } 54 | 55 | impl KubeResource for KubeStorageClass { 56 | fn get_name(&self) -> &String { 57 | &self.name 58 | } 59 | fn get_k8s_obj(&self) -> &StorageClass { 60 | &self.k8s_obj 61 | } 62 | } 63 | 64 | static STORAGE_CLASSES_LABEL: &str = "StorageClasses"; 65 | 66 | pub struct StorageClassResource {} 67 | 68 | #[async_trait] 69 | impl AppResource for StorageClassResource { 70 | fn render(block: ActiveBlock, f: &mut Frame<'_>, app: &mut App, area: Rect) { 71 | draw_resource_tab!( 72 | STORAGE_CLASSES_LABEL, 73 | block, 74 | f, 75 | app, 76 | area, 77 | Self::render, 78 | draw_block, 79 | app.data.storage_classes 80 | ); 81 | } 82 | 83 | async fn get_resource(nw: &Network<'_>) { 84 | let items: Vec = nw.get_resources(StorageClass::into).await; 85 | 86 | let mut app = nw.app.lock().await; 87 | app.data.storage_classes.set_items(items); 88 | } 89 | } 90 | 91 | fn draw_block(f: &mut Frame<'_>, app: &mut App, area: Rect) { 92 | let title = get_cluster_wide_resource_title( 93 | STORAGE_CLASSES_LABEL, 94 | app.data.storage_classes.items.len(), 95 | "", 96 | ); 97 | 98 | draw_resource_block( 99 | f, 100 | area, 101 | ResourceTableProps { 102 | title, 103 | inline_help: DESCRIBE_YAML_AND_ESC_HINT.into(), 104 | resource: &mut app.data.storage_classes, 105 | table_headers: vec![ 106 | "Name", 107 | "Provisioner", 108 | "Reclaim Policy", 109 | "Volume Binding Mode", 110 | "Allow Volume Expansion", 111 | "Age", 112 | ], 113 | column_widths: vec![ 114 | Constraint::Percentage(10), 115 | Constraint::Percentage(20), 116 | Constraint::Percentage(10), 117 | Constraint::Percentage(20), 118 | Constraint::Percentage(20), 119 | Constraint::Percentage(10), 120 | ], 121 | }, 122 | |c| { 123 | Row::new(vec![ 124 | Cell::from(c.name.to_owned()), 125 | Cell::from(c.provisioner.to_owned()), 126 | Cell::from(c.reclaim_policy.to_owned()), 127 | Cell::from(c.volume_binding_mode.to_owned()), 128 | Cell::from(c.allow_volume_expansion.to_string()), 129 | Cell::from(c.age.to_owned()), 130 | ]) 131 | .style(style_primary(app.light_theme)) 132 | }, 133 | app.light_theme, 134 | app.is_loading, 135 | app.data.selected.filter.to_owned(), 136 | ); 137 | } 138 | 139 | #[cfg(test)] 140 | mod tests { 141 | use k8s_openapi::chrono::Utc; 142 | 143 | use crate::app::{ 144 | storageclass::KubeStorageClass, 145 | test_utils::{convert_resource_from_file, get_time}, 146 | utils, 147 | }; 148 | 149 | #[tokio::test] 150 | async fn test_storageclass_from_api() { 151 | let (storage_classes, storage_classes_list): (Vec, Vec<_>) = 152 | convert_resource_from_file("storageclass"); 153 | assert_eq!(storage_classes_list.len(), 4); 154 | assert_eq!( 155 | storage_classes[0], 156 | KubeStorageClass { 157 | name: "ebs-performance".into(), 158 | provisioner: "kubernetes.io/aws-ebs".into(), 159 | reclaim_policy: "Delete".into(), 160 | volume_binding_mode: "Immediate".into(), 161 | allow_volume_expansion: false, 162 | age: utils::to_age(Some(&get_time("2021-12-14T11:08:59Z")), Utc::now()), 163 | k8s_obj: storage_classes_list[0].clone(), 164 | } 165 | ); 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /src/banner.rs: -------------------------------------------------------------------------------- 1 | pub const BANNER: &str = r" 2 | ██╗ ██╗ 3 | ██║ ██╔╝ 4 | █████╔╝█████╗ 5 | ██╔═██╗╚════╝ 6 | ██║ ██╗ 7 | ╚═╝ ╚═╝ 8 | "; 9 | -------------------------------------------------------------------------------- /src/cmd/mod.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use anyhow::anyhow; 4 | use duct::cmd; 5 | use log::{error, info}; 6 | use regex::Regex; 7 | use serde_json::Value as JValue; 8 | use tokio::sync::Mutex; 9 | 10 | use crate::app::{self, models::ScrollableTxt, App, Cli}; 11 | 12 | #[derive(Debug, Eq, PartialEq)] 13 | pub enum IoCmdEvent { 14 | GetCliInfo, 15 | GetDescribe { 16 | kind: String, 17 | value: String, 18 | ns: Option, 19 | }, 20 | } 21 | 22 | #[derive(Clone)] 23 | pub struct CmdRunner<'a> { 24 | pub app: &'a Arc>, 25 | } 26 | 27 | static NOT_FOUND: &str = "Not found"; 28 | 29 | impl<'a> CmdRunner<'a> { 30 | pub fn new(app: &'a Arc>) -> Self { 31 | CmdRunner { app } 32 | } 33 | 34 | pub async fn handle_cmd_event(&mut self, io_event: IoCmdEvent) { 35 | match io_event { 36 | IoCmdEvent::GetCliInfo => { 37 | self.get_cli_info().await; 38 | } 39 | IoCmdEvent::GetDescribe { kind, value, ns } => { 40 | self.get_describe(kind, value, ns).await; 41 | } 42 | }; 43 | 44 | let mut app = self.app.lock().await; 45 | app.is_loading = false; 46 | } 47 | 48 | async fn handle_error(&self, e: anyhow::Error) { 49 | error!("{:?}", e); 50 | let mut app = self.app.lock().await; 51 | app.handle_error(e); 52 | } 53 | 54 | async fn get_cli_info(&self) { 55 | let mut clis: Vec = vec![]; 56 | 57 | let (version_c, version_s) = match cmd!("kubectl", "version", "-o", "json") 58 | .stderr_null() 59 | .read() 60 | { 61 | Ok(out) => { 62 | info!("kubectl version: {}", out); 63 | let v: serde_json::Result = serde_json::from_str(&out); 64 | match v { 65 | Ok(val) => ( 66 | Some( 67 | val["clientVersion"]["gitVersion"] 68 | .to_string() 69 | .replace('"', ""), 70 | ), 71 | Some( 72 | val["serverVersion"]["gitVersion"] 73 | .to_string() 74 | .replace('"', ""), 75 | ), 76 | ), 77 | _ => (None, None), 78 | } 79 | } 80 | _ => (None, None), 81 | }; 82 | 83 | clis.push(build_cli("kubectl client", version_c)); 84 | clis.push(build_cli("kubectl server", version_s)); 85 | 86 | let version = cmd!("docker", "version", "--format", "'{{.Client.Version}}'") 87 | .stderr_null() 88 | .read() 89 | .map_or(None, |out| { 90 | if out.is_empty() { 91 | None 92 | } else { 93 | Some(format!("v{}", out.replace('\'', ""))) 94 | } 95 | }); 96 | 97 | clis.push(build_cli("docker", version)); 98 | 99 | let version = cmd!("docker-compose", "version", "--short") 100 | .stderr_null() 101 | .read() 102 | .map_or(None, |out| { 103 | if out.is_empty() { 104 | cmd!("docker", "compose", "version", "--short") 105 | .stderr_null() 106 | .read() 107 | .map_or(None, |out| { 108 | if out.is_empty() { 109 | None 110 | } else { 111 | Some(format!("v{}", out.replace('\'', ""))) 112 | } 113 | }) 114 | } else { 115 | Some(format!("v{}", out.replace('\'', ""))) 116 | } 117 | }); 118 | 119 | clis.push(build_cli("docker-compose", version)); 120 | 121 | let version = get_info_by_regex("kind", &["version"], r"(v[0-9.]+)"); 122 | 123 | clis.push(build_cli("kind", version)); 124 | 125 | let version = get_info_by_regex("helm", &["version", "-c"], r"(v[0-9.]+)"); 126 | 127 | clis.push(build_cli("helm", version)); 128 | 129 | let version = get_info_by_regex("istioctl", &["version"], r"([0-9.]+)"); 130 | 131 | clis.push(build_cli("istioctl", version.map(|v| format!("v{}", v)))); 132 | 133 | let mut app = self.app.lock().await; 134 | app.data.clis = clis; 135 | } 136 | 137 | // TODO temp solution, should build this from API response 138 | async fn get_describe(&self, kind: String, value: String, ns: Option) { 139 | let mut args = vec!["describe", kind.as_str(), value.as_str()]; 140 | 141 | if let Some(ns) = ns.as_ref() { 142 | args.push("-n"); 143 | args.push(ns.as_str()); 144 | } 145 | 146 | let out = duct::cmd("kubectl", &args).stderr_null().read(); 147 | 148 | match out { 149 | Ok(out) => { 150 | let mut app = self.app.lock().await; 151 | app.data.describe_out = ScrollableTxt::with_string(out); 152 | } 153 | Err(e) => { 154 | self 155 | .handle_error(anyhow!(format!( 156 | "Error running {} describe. Make sure you have kubectl installed: {:?}", 157 | kind, e 158 | ))) 159 | .await 160 | } 161 | } 162 | } 163 | } 164 | 165 | // utils 166 | 167 | fn build_cli(name: &str, version: Option) -> app::Cli { 168 | app::Cli { 169 | name: name.to_owned(), 170 | status: version.is_some(), 171 | version: version.unwrap_or_else(|| NOT_FOUND.into()), 172 | } 173 | } 174 | 175 | /// execute a command and get info from it using regex 176 | fn get_info_by_regex(command: &str, args: &[&str], regex: &str) -> Option { 177 | match cmd(command, args).stderr_null().read() { 178 | Ok(out) => match Regex::new(regex) { 179 | Ok(re) => match re.captures(out.as_str()) { 180 | Some(cap) => cap.get(1).map(|text| text.as_str().into()), 181 | _ => None, 182 | }, 183 | _ => None, 184 | }, 185 | _ => None, 186 | } 187 | } 188 | 189 | #[cfg(test)] 190 | mod tests { 191 | #[test] 192 | fn test_get_info_by_regex() { 193 | use super::get_info_by_regex; 194 | 195 | assert_eq!( 196 | get_info_by_regex( 197 | "echo", 198 | &["Client: &version.Version{SemVer:\"v2.17.0\", GitCommit:\"a690bad98af45b015bd3da1a41f6218b1a451dbe\", GitTreeState:\"clean\"} \n Error: could not find tiller"], 199 | r"(v[0-9.]+)" 200 | ), 201 | Some("v2.17.0".into()) 202 | ); 203 | assert_eq!( 204 | get_info_by_regex( 205 | "echo", 206 | &["no running Istio pods in \"istio-system\"\n1.8.2"], 207 | r"([0-9.]+)" 208 | ), 209 | Some("1.8.2".into()) 210 | ); 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /src/event/events.rs: -------------------------------------------------------------------------------- 1 | // adapted from tui-rs/examples/crossterm_demo.rs 2 | use std::{ 3 | sync::mpsc, 4 | thread, 5 | time::{Duration, Instant}, 6 | }; 7 | 8 | use crossterm::event::{self, Event as CEvent, KeyEvent, MouseEvent}; 9 | 10 | #[derive(Debug, Clone, Copy)] 11 | /// Configuration for event handling. 12 | pub struct EventConfig { 13 | /// The tick rate at which the application will sent an tick event. 14 | pub tick_rate: Duration, 15 | } 16 | 17 | impl Default for EventConfig { 18 | fn default() -> EventConfig { 19 | EventConfig { 20 | tick_rate: Duration::from_millis(250), 21 | } 22 | } 23 | } 24 | 25 | /// An occurred event. 26 | pub enum Event { 27 | /// An input event occurred. 28 | Input(I), 29 | MouseInput(J), 30 | /// An tick event occurred. 31 | Tick, 32 | } 33 | 34 | /// A small event handler that wrap crossterm input and tick event. Each event 35 | /// type is handled in its own thread and returned to a common `Receiver` 36 | pub struct Events { 37 | rx: mpsc::Receiver>, 38 | // Need to be kept around to prevent disposing the sender side. 39 | _tx: mpsc::Sender>, 40 | } 41 | 42 | impl Events { 43 | /// Constructs an new instance of `Events` with the default config. 44 | pub fn new(tick_rate: u64) -> Events { 45 | Events::with_config(EventConfig { 46 | tick_rate: Duration::from_millis(tick_rate), 47 | }) 48 | } 49 | 50 | /// Constructs an new instance of `Events` from given config. 51 | pub fn with_config(config: EventConfig) -> Events { 52 | let (tx, rx) = mpsc::channel(); 53 | 54 | let tick_rate = config.tick_rate; 55 | 56 | let event_tx = tx.clone(); 57 | thread::spawn(move || { 58 | let mut last_tick = Instant::now(); 59 | loop { 60 | let timeout = tick_rate 61 | .checked_sub(last_tick.elapsed()) 62 | .unwrap_or_else(|| Duration::from_secs(0)); 63 | // poll for tick rate duration, if no event, sent tick event. 64 | if event::poll(timeout).unwrap() { 65 | match event::read().unwrap() { 66 | CEvent::Key(key_event) => handle_key_event(&event_tx, key_event), 67 | CEvent::Mouse(mouse_event) => { 68 | event_tx.send(Event::MouseInput(mouse_event)).unwrap(); 69 | } 70 | _ => {} 71 | } 72 | } 73 | if last_tick.elapsed() >= tick_rate { 74 | event_tx.send(Event::Tick).unwrap(); 75 | last_tick = Instant::now(); 76 | } 77 | } 78 | }); 79 | 80 | Events { rx, _tx: tx } 81 | } 82 | 83 | /// Attempts to read an event. 84 | /// This function will block the current thread. 85 | pub fn next(&self) -> Result, mpsc::RecvError> { 86 | self.rx.recv() 87 | } 88 | } 89 | 90 | #[cfg(target_os = "windows")] 91 | fn handle_key_event(event_tx: &mpsc::Sender>, key_event: KeyEvent) { 92 | if key_event.kind == event::KeyEventKind::Press { 93 | event_tx.send(Event::Input(key_event)).unwrap(); 94 | } 95 | } 96 | 97 | #[cfg(not(target_os = "windows"))] 98 | fn handle_key_event(event_tx: &mpsc::Sender>, key_event: KeyEvent) { 99 | event_tx.send(Event::Input(key_event)).unwrap(); 100 | } 101 | -------------------------------------------------------------------------------- /src/event/key.rs: -------------------------------------------------------------------------------- 1 | // from https://github.com/Rigellute/spotify-tui 2 | use std::fmt; 3 | 4 | use crossterm::event; 5 | 6 | /// Represents an key. 7 | #[derive(PartialEq, Eq, Clone, Copy, Hash, Debug)] 8 | pub enum Key { 9 | /// Both Enter (or Return) and numpad Enter 10 | Enter, 11 | Tab, 12 | Backspace, 13 | Esc, 14 | /// Left arrow 15 | Left, 16 | /// Right arrow 17 | Right, 18 | /// Up arrow 19 | Up, 20 | /// Down arrow 21 | Down, 22 | /// Insert key 23 | Ins, 24 | /// Delete key 25 | Delete, 26 | /// Home key 27 | Home, 28 | /// End key 29 | End, 30 | /// Page Up key 31 | PageUp, 32 | /// Page Down key 33 | PageDown, 34 | /// F0 key 35 | F0, 36 | /// F1 key 37 | F1, 38 | /// F2 key 39 | F2, 40 | /// F3 key 41 | F3, 42 | /// F4 key 43 | F4, 44 | /// F5 key 45 | F5, 46 | /// F6 key 47 | F6, 48 | /// F7 key 49 | F7, 50 | /// F8 key 51 | F8, 52 | /// F9 key 53 | F9, 54 | /// F10 key 55 | F10, 56 | /// F11 key 57 | F11, 58 | /// F12 key 59 | F12, 60 | Char(char), 61 | Ctrl(char), 62 | Alt(char), 63 | Unknown, 64 | } 65 | 66 | impl Key { 67 | /// Returns the function key corresponding to the given number 68 | /// 69 | /// 1 -> F1, etc... 70 | /// 71 | /// # Panics 72 | /// 73 | /// If `n == 0 || n > 12` 74 | pub fn from_f(n: u8) -> Key { 75 | match n { 76 | 0 => Key::F0, 77 | 1 => Key::F1, 78 | 2 => Key::F2, 79 | 3 => Key::F3, 80 | 4 => Key::F4, 81 | 5 => Key::F5, 82 | 6 => Key::F6, 83 | 7 => Key::F7, 84 | 8 => Key::F8, 85 | 9 => Key::F9, 86 | 10 => Key::F10, 87 | 11 => Key::F11, 88 | 12 => Key::F12, 89 | _ => panic!("unknown function key: F{}", n), 90 | } 91 | } 92 | } 93 | 94 | impl fmt::Display for Key { 95 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 96 | match *self { 97 | Key::Alt(' ') => write!(f, ""), 98 | Key::Ctrl(' ') => write!(f, ""), 99 | Key::Char(' ') => write!(f, ""), 100 | Key::Alt(c) => write!(f, "", c), 101 | Key::Ctrl(c) => write!(f, "", c), 102 | Key::Char(c) => write!(f, "<{}>", c), 103 | Key::Left | Key::Right | Key::Up | Key::Down => write!(f, "<{:?} Arrow Key>", self), 104 | _ => write!(f, "<{:?}>", self), 105 | } 106 | } 107 | } 108 | 109 | impl From for Key { 110 | fn from(key_event: event::KeyEvent) -> Self { 111 | match key_event { 112 | event::KeyEvent { 113 | code: event::KeyCode::Esc, 114 | .. 115 | } => Key::Esc, 116 | event::KeyEvent { 117 | code: event::KeyCode::Backspace, 118 | .. 119 | } => Key::Backspace, 120 | event::KeyEvent { 121 | code: event::KeyCode::Left, 122 | .. 123 | } => Key::Left, 124 | event::KeyEvent { 125 | code: event::KeyCode::Right, 126 | .. 127 | } => Key::Right, 128 | event::KeyEvent { 129 | code: event::KeyCode::Up, 130 | .. 131 | } => Key::Up, 132 | event::KeyEvent { 133 | code: event::KeyCode::Down, 134 | .. 135 | } => Key::Down, 136 | event::KeyEvent { 137 | code: event::KeyCode::Home, 138 | .. 139 | } => Key::Home, 140 | event::KeyEvent { 141 | code: event::KeyCode::End, 142 | .. 143 | } => Key::End, 144 | event::KeyEvent { 145 | code: event::KeyCode::PageUp, 146 | .. 147 | } => Key::PageUp, 148 | event::KeyEvent { 149 | code: event::KeyCode::PageDown, 150 | .. 151 | } => Key::PageDown, 152 | event::KeyEvent { 153 | code: event::KeyCode::Delete, 154 | .. 155 | } => Key::Delete, 156 | event::KeyEvent { 157 | code: event::KeyCode::Insert, 158 | .. 159 | } => Key::Ins, 160 | event::KeyEvent { 161 | code: event::KeyCode::F(n), 162 | .. 163 | } => Key::from_f(n), 164 | event::KeyEvent { 165 | code: event::KeyCode::Enter, 166 | .. 167 | } => Key::Enter, 168 | event::KeyEvent { 169 | code: event::KeyCode::Tab, 170 | .. 171 | } => Key::Tab, 172 | 173 | // First check for char + modifier 174 | event::KeyEvent { 175 | code: event::KeyCode::Char(c), 176 | modifiers: event::KeyModifiers::ALT, 177 | .. 178 | } => Key::Alt(c), 179 | event::KeyEvent { 180 | code: event::KeyCode::Char(c), 181 | modifiers: event::KeyModifiers::CONTROL, 182 | .. 183 | } => Key::Ctrl(c), 184 | 185 | event::KeyEvent { 186 | code: event::KeyCode::Char(c), 187 | .. 188 | } => Key::Char(c), 189 | 190 | _ => Key::Unknown, 191 | } 192 | } 193 | } 194 | 195 | #[cfg(test)] 196 | mod tests { 197 | use super::*; 198 | 199 | #[test] 200 | fn test_key_fmt() { 201 | assert_eq!(format!("{}", Key::Left), ""); 202 | assert_eq!(format!("{}", Key::Alt(' ')), ""); 203 | assert_eq!(format!("{}", Key::Alt('c')), ""); 204 | assert_eq!(format!("{}", Key::Char('c')), ""); 205 | assert_eq!(format!("{}", Key::Enter), ""); 206 | assert_eq!(format!("{}", Key::F10), ""); 207 | } 208 | #[test] 209 | fn test_key_from_event() { 210 | assert_eq!( 211 | Key::from(event::KeyEvent::from(event::KeyCode::Esc)), 212 | Key::Esc 213 | ); 214 | assert_eq!( 215 | Key::from(event::KeyEvent::from(event::KeyCode::F(2))), 216 | Key::F2 217 | ); 218 | assert_eq!( 219 | Key::from(event::KeyEvent::from(event::KeyCode::Char('J'))), 220 | Key::Char('J') 221 | ); 222 | assert_eq!( 223 | Key::from(event::KeyEvent { 224 | code: event::KeyCode::Char('c'), 225 | modifiers: event::KeyModifiers::ALT, 226 | kind: event::KeyEventKind::Press, 227 | state: event::KeyEventState::NONE, 228 | }), 229 | Key::Alt('c') 230 | ); 231 | assert_eq!( 232 | Key::from(event::KeyEvent { 233 | code: event::KeyCode::Char('c'), 234 | modifiers: event::KeyModifiers::CONTROL, 235 | kind: event::KeyEventKind::Press, 236 | state: event::KeyEventState::NONE 237 | }), 238 | Key::Ctrl('c') 239 | ); 240 | } 241 | } 242 | -------------------------------------------------------------------------------- /src/event/mod.rs: -------------------------------------------------------------------------------- 1 | mod events; 2 | mod key; 3 | 4 | pub use self::{ 5 | events::{Event, Events}, 6 | key::Key, 7 | }; 8 | -------------------------------------------------------------------------------- /src/network/stream.rs: -------------------------------------------------------------------------------- 1 | use std::{sync::Arc, time::Duration}; 2 | 3 | use anyhow::anyhow; 4 | use futures::AsyncBufReadExt; 5 | use k8s_openapi::api::core::v1::Pod; 6 | use kube::{api::LogParams, Api, Client}; 7 | use log::error; 8 | use tokio::sync::Mutex; 9 | use tokio_stream::StreamExt; 10 | 11 | use super::refresh_kube_config; 12 | use crate::app::{ActiveBlock, App}; 13 | 14 | #[derive(Debug, Eq, PartialEq)] 15 | pub enum IoStreamEvent { 16 | RefreshClient, 17 | GetPodLogs(bool), 18 | } 19 | 20 | #[derive(Clone)] 21 | pub struct NetworkStream<'a> { 22 | pub client: Client, 23 | pub app: &'a Arc>, 24 | } 25 | 26 | impl<'a> NetworkStream<'a> { 27 | pub fn new(client: Client, app: &'a Arc>) -> Self { 28 | NetworkStream { client, app } 29 | } 30 | 31 | pub async fn refresh_client(&mut self) { 32 | let context = { 33 | let app = self.app.lock().await; 34 | app.data.selected.context.clone() 35 | }; 36 | match refresh_kube_config(&context).await { 37 | Ok(client) => { 38 | self.client = client; 39 | } 40 | Err(e) => { 41 | self 42 | .handle_error(anyhow!("Failed to refresh client. {:}", e)) 43 | .await 44 | } 45 | } 46 | } 47 | 48 | pub async fn handle_network_stream_event(&mut self, io_event: IoStreamEvent) { 49 | match io_event { 50 | IoStreamEvent::RefreshClient => { 51 | self.refresh_client().await; 52 | } 53 | IoStreamEvent::GetPodLogs(tail) => { 54 | self.stream_container_logs(tail).await; 55 | } 56 | }; 57 | 58 | let mut app = self.app.lock().await; 59 | app.is_loading = false; 60 | } 61 | 62 | async fn handle_error(&self, e: anyhow::Error) { 63 | error!("{:?}", e); 64 | let mut app = self.app.lock().await; 65 | app.handle_error(e); 66 | } 67 | 68 | pub async fn stream_container_logs(&self, tail: bool) { 69 | let (namespace, pod_name) = { 70 | let app = self.app.lock().await; 71 | if let Some(p) = app.data.pods.get_selected_item_copy() { 72 | (p.namespace, p.name) 73 | } else { 74 | ( 75 | std::env::var("NAMESPACE").unwrap_or_else(|_| "default".into()), 76 | "".into(), 77 | ) 78 | } 79 | }; 80 | let cont_name = { 81 | let app = self.app.lock().await; 82 | if let Some(name) = &app.data.selected.container { 83 | name.to_owned() 84 | } else { 85 | "".into() 86 | } 87 | }; 88 | 89 | if pod_name.is_empty() || cont_name.is_empty() { 90 | return; 91 | } 92 | let api: Api = Api::namespaced(self.client.clone(), &namespace); 93 | let lp = LogParams { 94 | container: Some(cont_name.clone()), 95 | follow: true, 96 | previous: false, 97 | // timestamps: true, 98 | // tail only on first call to avoid duplicates on disconnect 99 | tail_lines: if tail { Some(10) } else { Some(0) }, 100 | ..Default::default() 101 | }; 102 | 103 | { 104 | let mut app = self.app.lock().await; 105 | app.is_streaming = true; 106 | } 107 | 108 | // TODO investigate why this gives wrong log some times 109 | match api.log_stream(&pod_name, &lp).await { 110 | Ok(logs) => { 111 | // set a timeout so we dont wait for next item and block the thread 112 | let logs = logs.lines(); 113 | let logs = logs.timeout(Duration::from_secs(2)); 114 | tokio::pin!(logs); 115 | 116 | #[allow(clippy::mixed_read_write_in_expression)] 117 | while let (true, Ok(Some(Ok(line)))) = ( 118 | { 119 | let app = self.app.lock().await; 120 | app.get_current_route().active_block == ActiveBlock::Logs 121 | && app.data.logs.id == cont_name 122 | }, 123 | logs.try_next().await, 124 | ) { 125 | let line = line.trim().to_string(); 126 | if !line.is_empty() { 127 | let mut app = self.app.lock().await; 128 | app.data.logs.add_record(line); 129 | } 130 | } 131 | } 132 | Err(e) => { 133 | self 134 | .handle_error(anyhow!("Failed to stream logs. {:}", e)) 135 | .await; 136 | } 137 | }; 138 | 139 | let mut app = self.app.lock().await; 140 | app.is_streaming = false; 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /src/ui/help.rs: -------------------------------------------------------------------------------- 1 | use ratatui::{ 2 | layout::{Constraint, Rect}, 3 | widgets::{Row, Table}, 4 | Frame, 5 | }; 6 | 7 | use super::{ 8 | utils::{ 9 | layout_block_active_span, style_highlight, style_primary, style_secondary, 10 | title_with_dual_style, vertical_chunks, 11 | }, 12 | HIGHLIGHT, 13 | }; 14 | use crate::app::App; 15 | 16 | pub fn draw_help(f: &mut Frame<'_>, app: &mut App, area: Rect) { 17 | let chunks = vertical_chunks(vec![Constraint::Percentage(100)], area); 18 | 19 | // Create a one-column table to avoid flickering due to non-determinism when 20 | // resolving constraints on widths of table columns. 21 | let format_row = 22 | |r: &Vec| -> Vec { vec![format!("{:50}{:40}{:20}", r[0], r[1], r[2])] }; 23 | 24 | let header = ["Key", "Action", "Context"]; 25 | let header = format_row(&header.iter().map(|s| s.to_string()).collect()); 26 | 27 | let help_docs = app 28 | .help_docs 29 | .items 30 | .iter() 31 | .map(format_row) 32 | .collect::>>(); 33 | let help_docs = &help_docs[0_usize..]; 34 | 35 | let rows = help_docs 36 | .iter() 37 | .map(|item| Row::new(item.clone()).style(style_primary(app.light_theme))); 38 | 39 | let title = title_with_dual_style(" Help ".into(), "| close ".into(), app.light_theme); 40 | 41 | let help_menu = Table::new(rows, [Constraint::Percentage(100)]) 42 | .header( 43 | Row::new(header) 44 | .style(style_secondary(app.light_theme)) 45 | .bottom_margin(0), 46 | ) 47 | .block(layout_block_active_span(title, app.light_theme)) 48 | .row_highlight_style(style_highlight()) 49 | .highlight_symbol(HIGHLIGHT); 50 | f.render_stateful_widget(help_menu, chunks[0], &mut app.help_docs.state); 51 | } 52 | 53 | #[cfg(test)] 54 | mod tests { 55 | use ratatui::{ 56 | backend::TestBackend, 57 | buffer::Buffer, 58 | layout::Position, 59 | style::{Modifier, Style}, 60 | Terminal, 61 | }; 62 | 63 | use super::*; 64 | use crate::ui::utils::{COLOR_CYAN, COLOR_WHITE, COLOR_YELLOW}; 65 | 66 | #[test] 67 | fn test_draw_help() { 68 | let backend = TestBackend::new(100, 7); 69 | let mut terminal = Terminal::new(backend).unwrap(); 70 | 71 | terminal 72 | .draw(|f| { 73 | let size = f.area(); 74 | let mut app = App::default(); 75 | draw_help(f, &mut app, size); 76 | }) 77 | .unwrap(); 78 | 79 | let mut expected = Buffer::with_lines(vec![ 80 | "┌ Help | close ──────────────────────────────────────────────────────────────────────────────┐", 81 | "│ Key Action Conte│", 82 | "│=> | Quit Gener│", 83 | "│ Close child page/Go back Gener│", 84 | "│ Help page Gener│", 85 | "│ Select table row Gener│", 86 | "└──────────────────────────────────────────────────────────────────────────────────────────────────┘", 87 | ]); 88 | // set row styles 89 | // First row heading style 90 | for col in 0..=99 { 91 | match col { 92 | 0 | 21..=99 => { 93 | expected 94 | .cell_mut(Position::new(col, 0)) 95 | .unwrap() 96 | .set_style(Style::default().fg(COLOR_YELLOW)); 97 | } 98 | 1..=6 => { 99 | expected.cell_mut(Position::new(col, 0)).unwrap().set_style( 100 | Style::default() 101 | .fg(COLOR_YELLOW) 102 | .add_modifier(Modifier::BOLD), 103 | ); 104 | } 105 | _ => { 106 | expected.cell_mut(Position::new(col, 0)).unwrap().set_style( 107 | Style::default() 108 | .fg(COLOR_WHITE) 109 | .add_modifier(Modifier::BOLD), 110 | ); 111 | } 112 | } 113 | } 114 | 115 | // second row table headings 116 | for col in 0..=99 { 117 | expected 118 | .cell_mut(Position::new(col, 1)) 119 | .unwrap() 120 | .set_style(Style::default().fg(COLOR_YELLOW)); 121 | } 122 | 123 | // first table data row style 124 | for col in 0..=99 { 125 | match col { 126 | 1..=98 => { 127 | expected.cell_mut(Position::new(col, 2)).unwrap().set_style( 128 | Style::default() 129 | .fg(COLOR_CYAN) 130 | .add_modifier(Modifier::REVERSED), 131 | ); 132 | } 133 | _ => { 134 | expected 135 | .cell_mut(Position::new(col, 2)) 136 | .unwrap() 137 | .set_style(Style::default().fg(COLOR_YELLOW)); 138 | } 139 | } 140 | } 141 | 142 | // rows 143 | for row in 3..=5 { 144 | for col in 0..=99 { 145 | match col { 146 | 1..=98 => { 147 | expected 148 | .cell_mut(Position::new(col, row)) 149 | .unwrap() 150 | .set_style(Style::default().fg(COLOR_CYAN)); 151 | } 152 | _ => { 153 | expected 154 | .cell_mut(Position::new(col, row)) 155 | .unwrap() 156 | .set_style(Style::default().fg(COLOR_YELLOW)); 157 | } 158 | } 159 | } 160 | } 161 | 162 | // last row 163 | for col in 0..=99 { 164 | expected 165 | .cell_mut(Position::new(col, 6)) 166 | .unwrap() 167 | .set_style(Style::default().fg(COLOR_YELLOW)); 168 | } 169 | 170 | terminal.backend().assert_buffer(&expected); 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /src/ui/mod.rs: -------------------------------------------------------------------------------- 1 | use rand::Rng; 2 | mod help; 3 | mod overview; 4 | pub mod resource_tabs; 5 | pub mod utils; 6 | 7 | use ratatui::{ 8 | layout::{Alignment, Constraint, Rect}, 9 | style::Modifier, 10 | text::{Line, Span, Text}, 11 | widgets::{Block, Borders, Paragraph, Tabs, Wrap}, 12 | Frame, 13 | }; 14 | 15 | use self::{ 16 | help::draw_help, 17 | overview::draw_overview, 18 | utils::{ 19 | horizontal_chunks_with_margin, style_default, style_failure, style_header, style_header_text, 20 | style_help, style_main_background, style_primary, style_secondary, vertical_chunks, 21 | }, 22 | }; 23 | use crate::app::{ 24 | contexts::ContextResource, metrics::UtilizationResource, models::AppResource, ActiveBlock, App, 25 | RouteId, 26 | }; 27 | 28 | pub static HIGHLIGHT: &str = "=> "; 29 | 30 | pub fn draw(f: &mut Frame<'_>, app: &mut App) { 31 | let block = Block::default().style(style_main_background(app.light_theme)); 32 | f.render_widget(block, f.area()); 33 | 34 | let chunks = if !app.api_error.is_empty() { 35 | let chunks = vertical_chunks( 36 | vec![ 37 | Constraint::Length(1), // title 38 | Constraint::Length(3), // header tabs 39 | Constraint::Length(3), // error 40 | Constraint::Min(0), // main tabs 41 | ], 42 | f.area(), 43 | ); 44 | draw_app_error(f, app, chunks[2]); 45 | chunks 46 | } else { 47 | vertical_chunks( 48 | vec![ 49 | Constraint::Length(1), // title 50 | Constraint::Length(3), // header tabs 51 | Constraint::Min(0), // main tabs 52 | ], 53 | f.area(), 54 | ) 55 | }; 56 | 57 | draw_app_title(f, app, chunks[0]); 58 | // draw header tabs amd text 59 | draw_app_header(f, app, chunks[1]); 60 | 61 | let last_chunk = chunks[chunks.len() - 1]; 62 | match app.get_current_route().id { 63 | RouteId::HelpMenu => { 64 | draw_help(f, app, last_chunk); 65 | } 66 | RouteId::Contexts => { 67 | ContextResource::render(ActiveBlock::Contexts, f, app, last_chunk); 68 | } 69 | RouteId::Utilization => { 70 | UtilizationResource::render(ActiveBlock::Utilization, f, app, last_chunk); 71 | } 72 | _ => { 73 | draw_overview(f, app, last_chunk); 74 | } 75 | } 76 | } 77 | 78 | fn draw_app_title(f: &mut Frame<'_>, app: &App, area: Rect) { 79 | let title = Paragraph::new(Span::styled( 80 | app.title, 81 | style_header_text(app.light_theme).add_modifier(Modifier::BOLD), 82 | )) 83 | .style(style_header()) 84 | .block(Block::default()) 85 | .alignment(Alignment::Left); 86 | f.render_widget(title, area); 87 | 88 | let text = format!( 89 | "v{} with ♥ in Rust {} ", 90 | env!("CARGO_PKG_VERSION"), 91 | nw_loading_indicator(app.is_loading) 92 | ); 93 | 94 | let meta = Paragraph::new(Span::styled(text, style_header_text(app.light_theme))) 95 | .style(style_header()) 96 | .block(Block::default()) 97 | .alignment(Alignment::Right); 98 | f.render_widget(meta, area); 99 | } 100 | 101 | // loading animation frames 102 | const FRAMES: &[&str] = &["⠋⠴", "⠦⠙", "⠏⠼", "⠧⠹", "⠯⠽"]; 103 | 104 | fn nw_loading_indicator<'a>(loading: bool) -> &'a str { 105 | if loading { 106 | FRAMES[rand::thread_rng().gen_range(0..FRAMES.len())] 107 | } else { 108 | "" 109 | } 110 | } 111 | 112 | fn draw_app_header(f: &mut Frame<'_>, app: &App, area: Rect) { 113 | let chunks = 114 | horizontal_chunks_with_margin(vec![Constraint::Length(60), Constraint::Min(0)], area, 1); 115 | 116 | let titles: Vec<_> = app 117 | .main_tabs 118 | .items 119 | .iter() 120 | .map(|t| Line::from(Span::styled(&t.title, style_default(app.light_theme)))) 121 | .collect(); 122 | let tabs = Tabs::new(titles) 123 | .block(Block::default().borders(Borders::ALL)) 124 | .highlight_style(style_secondary(app.light_theme)) 125 | .select(app.main_tabs.index); 126 | 127 | f.render_widget(tabs, area); 128 | draw_header_text(f, app, chunks[1]); 129 | } 130 | 131 | fn draw_header_text(f: &mut Frame<'_>, app: &App, area: Rect) { 132 | let text = match app.get_current_route().id { 133 | RouteId::Contexts => vec![Line::from("<↑↓> scroll | select | help ")], 134 | RouteId::Home => vec![Line::from( 135 | "<←→> switch tabs | select block | <↑↓> scroll | select | help ", 136 | )], 137 | RouteId::Utilization => vec![Line::from( 138 | "<↑↓> scroll | cycle through grouping | help ", 139 | )], 140 | RouteId::HelpMenu => vec![], 141 | }; 142 | let paragraph = Paragraph::new(text) 143 | .style(style_help(app.light_theme)) 144 | .block(Block::default()) 145 | .alignment(Alignment::Right); 146 | f.render_widget(paragraph, area); 147 | } 148 | 149 | fn draw_app_error(f: &mut Frame<'_>, app: &App, size: Rect) { 150 | let block = Block::default() 151 | .title(" Error | close ") 152 | .style(style_failure(app.light_theme)) 153 | .borders(Borders::ALL); 154 | 155 | let text = Text::from(app.api_error.clone()); 156 | let text = text.patch_style(style_failure(app.light_theme)); 157 | 158 | let paragraph = Paragraph::new(text) 159 | .style(style_primary(app.light_theme)) 160 | .block(block) 161 | .wrap(Wrap { trim: true }); 162 | f.render_widget(paragraph, size); 163 | } 164 | -------------------------------------------------------------------------------- /test_data/clusterrole_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRoleBinding 5 | metadata: 6 | creationTimestamp: "2022-03-02T16:50:53Z" 7 | name: admin-user 8 | resourceVersion: "70549225" 9 | uid: e86a4046-d74b-457e-9e93-6269a675284d 10 | managedFields: [] 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: cluster-admin 15 | subjects: 16 | - kind: ServiceAccount 17 | name: power-user 18 | namespace: kube-system 19 | - kind: ServiceAccount 20 | name: admin-user 21 | namespace: kube-system 22 | - apiVersion: rbac.authorization.k8s.io/v1 23 | kind: ClusterRoleBinding 24 | metadata: 25 | annotations: 26 | kubectl.kubernetes.io/last-applied-configuration: | 27 | {"apiVersion":"rbac.authorization.k8s.io/v1","kind":"ClusterRoleBinding","metadata":{"annotations":{},"name":"aws-node"},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"ClusterRole","name":"aws-node"},"subjects":[{"kind":"ServiceAccount","name":"aws-node","namespace":"kube-system"}]} 28 | meta.helm.sh/release-name: aws-cni 29 | meta.helm.sh/release-namespace: kube-system 30 | creationTimestamp: "2022-03-02T16:42:18Z" 31 | labels: 32 | app.kubernetes.io/managed-by: Helm 33 | name: aws-node 34 | resourceVersion: "70549265" 35 | uid: f6dd32c5-f853-4a8b-a98b-0726d4702cbe 36 | managedFields: [] 37 | roleRef: 38 | apiGroup: rbac.authorization.k8s.io 39 | kind: ClusterRole 40 | name: aws-node 41 | subjects: 42 | - kind: ServiceAccount 43 | name: aws-node 44 | namespace: kube-system 45 | kind: List 46 | metadata: 47 | resourceVersion: "" 48 | selfLink: "" 49 | -------------------------------------------------------------------------------- /test_data/cronjobs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: batch/v1 4 | kind: CronJob 5 | metadata: 6 | annotations: 7 | kubectl.kubernetes.io/last-applied-configuration: | 8 | {"apiVersion":"batch/v1","kind":"CronJob","metadata":{"annotations":{},"name":"hello","namespace":"default"},"spec":{"jobTemplate":{"spec":{"template":{"spec":{"containers":[{"command":["/bin/sh","-c","date; echo Hello from the Kubernetes cluster"],"image":"busybox","imagePullPolicy":"IfNotPresent","name":"hello"}],"restartPolicy":"OnFailure"}}}},"schedule":"*/1 * * * *"}} 9 | creationTimestamp: "2021-07-05T09:37:21Z" 10 | managedFields: [] 11 | name: hello 12 | namespace: default 13 | resourceVersion: "769" 14 | uid: 2e0820c5-0271-4ac0-97e1-ca4ec9fb5d8e 15 | spec: 16 | concurrencyPolicy: Allow 17 | failedJobsHistoryLimit: 1 18 | jobTemplate: 19 | metadata: 20 | creationTimestamp: null 21 | spec: 22 | template: 23 | metadata: 24 | creationTimestamp: null 25 | spec: 26 | containers: 27 | - command: 28 | - /bin/sh 29 | - -c 30 | - date; echo Hello from the Kubernetes cluster 31 | image: busybox 32 | imagePullPolicy: IfNotPresent 33 | name: hello 34 | resources: {} 35 | terminationMessagePath: /dev/termination-log 36 | terminationMessagePolicy: File 37 | dnsPolicy: ClusterFirst 38 | restartPolicy: OnFailure 39 | schedulerName: default-scheduler 40 | securityContext: {} 41 | terminationGracePeriodSeconds: 30 42 | schedule: '*/1 * * * *' 43 | successfulJobsHistoryLimit: 3 44 | suspend: false 45 | status: 46 | lastScheduleTime: "2021-07-05T09:39:00Z" 47 | kind: List 48 | metadata: 49 | resourceVersion: "" 50 | selfLink: "" 51 | -------------------------------------------------------------------------------- /test_data/daemonsets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: apps/v1 4 | kind: DaemonSet 5 | metadata: 6 | annotations: 7 | deprecated.daemonset.template.generation: "1" 8 | objectset.rio.cattle.io/applied: H4sIAAAAAAAA/8xUTW/jNhD9K8WcKUVaf8QW0MMim0PQrmPY3l4CIxhRo5g1RQrkSI1h6L8XlJ21s/kqtj306OHj8+N7o7eHrTIFZPAFqbJmSQwCsFZ/kPPKGsgA69pftCkIqIixQEbI9mCwIsjAt1LnETukUm1B9GNfowxn2yanyO88UwUCpCNkZc1KVeQZqxoy02gtQGNO2gdOm/9Jkj1x7JSNJTJripW92KDfQAblIB1ejtLRpzIvL5PxaFQMZJKOi8F4QtMkTSfTwaQcE4IIsqQ17KzW5OLtwJ+xGVuQJ02SrQusqD1BJwCNsdxLfFeMKg7PPvGDeBNs/zLkood2CxlctKn45Tdlil+X5Fol6cN7R49P7n4Mf837TkAPWFBJjowkD9nd/nnIfb7HTTjJeyGg6V8/pXwwyqdllJbpOBpigtG0vMwjORlRmabpZU6XIfGTQxm7hrp1J8DXJIO9pwT2UCHLze/f1wDr+sVmdZ0ApqrWyNRfOVvFf7BZr1G+vyW+lT+8vjtTH66hMuQOVh6ROo9q6ziaJCBAVfgQhg6N3JC72GpV1+QinWdtEn+KAyag32bYWM9z6xiySSJOf3ka1c6ylVZDBqurOXRrAWTac77l4up+frtYgYAWdRNGkwQ68R3w5Xq5up8vble3Z5Ce7EfMhyw387PzNImHgzidDON0nPTKHHnbuH759t3Rnnmj9dxqJXeQwU05szx35MmEDvIkG6d4d2UN0yP3rmONudKKFR1SLQrI7mB2vbr//OXrzQzWXXem6snM4XDwb/M4UJwCGQ4HLxLpZz8VSWD/DzJ5jeZ/EspaAFtN7qle7/awpcAfyjhyVlMcGssZYvLh86vQ86FZ63Crb+rrR+XZgwAqS5IMGczsUm6oaDT1D3+f8vipR7VGQz/FfOUUK4n6c1FY42+N3r1K061DVTR1gUxLdsj0sAvO8K4OKSys1so8fOvPQYB79ruvw8dvBltUGnNNkKVdXz2M3PQOy8Y5Mjxrqpzck8wCskSA6WdflffPxgV55ah4+8aCsNhBlnTd3wEAAP//kF8EoBAIAAA 9 | objectset.rio.cattle.io/id: svccontroller 10 | objectset.rio.cattle.io/owner-gvk: /v1, Kind=Service 11 | objectset.rio.cattle.io/owner-name: traefik 12 | objectset.rio.cattle.io/owner-namespace: kube-system 13 | creationTimestamp: "2021-07-05T09:36:45Z" 14 | generation: 1 15 | labels: 16 | objectset.rio.cattle.io/hash: f31475152fbf70655d3c016d368e90118938f6ea 17 | svccontroller.k3s.cattle.io/nodeselector: "false" 18 | managedFields: [] 19 | name: svclb-traefik 20 | namespace: kube-system 21 | ownerReferences: 22 | - apiVersion: v1 23 | controller: true 24 | kind: Service 25 | name: traefik 26 | uid: 9eb35b9f-1f16-4a0a-9f7b-c85ef1117be7 27 | resourceVersion: "631" 28 | uid: ddce45fd-18ca-49fc-97d3-a8a5b7309f0c 29 | spec: 30 | revisionHistoryLimit: 10 31 | selector: 32 | matchLabels: 33 | app: svclb-traefik 34 | template: 35 | metadata: 36 | creationTimestamp: null 37 | labels: 38 | app: svclb-traefik 39 | svccontroller.k3s.cattle.io/svcname: traefik 40 | spec: 41 | containers: 42 | - env: 43 | - name: SRC_PORT 44 | value: "80" 45 | - name: DEST_PROTO 46 | value: TCP 47 | - name: DEST_PORT 48 | value: "80" 49 | - name: DEST_IP 50 | value: 10.43.184.160 51 | image: rancher/klipper-lb:v0.2.0 52 | imagePullPolicy: IfNotPresent 53 | name: lb-port-80 54 | ports: 55 | - containerPort: 80 56 | hostPort: 80 57 | name: lb-port-80 58 | protocol: TCP 59 | resources: {} 60 | securityContext: 61 | capabilities: 62 | add: 63 | - NET_ADMIN 64 | terminationMessagePath: /dev/termination-log 65 | terminationMessagePolicy: File 66 | - env: 67 | - name: SRC_PORT 68 | value: "443" 69 | - name: DEST_PROTO 70 | value: TCP 71 | - name: DEST_PORT 72 | value: "443" 73 | - name: DEST_IP 74 | value: 10.43.184.160 75 | image: rancher/klipper-lb:v0.2.0 76 | imagePullPolicy: IfNotPresent 77 | name: lb-port-443 78 | ports: 79 | - containerPort: 443 80 | hostPort: 443 81 | name: lb-port-443 82 | protocol: TCP 83 | resources: {} 84 | securityContext: 85 | capabilities: 86 | add: 87 | - NET_ADMIN 88 | terminationMessagePath: /dev/termination-log 89 | terminationMessagePolicy: File 90 | dnsPolicy: ClusterFirst 91 | restartPolicy: Always 92 | schedulerName: default-scheduler 93 | securityContext: {} 94 | terminationGracePeriodSeconds: 30 95 | tolerations: 96 | - effect: NoSchedule 97 | key: node-role.kubernetes.io/master 98 | operator: Exists 99 | - effect: NoSchedule 100 | key: node-role.kubernetes.io/control-plane 101 | operator: Exists 102 | - key: CriticalAddonsOnly 103 | operator: Exists 104 | updateStrategy: 105 | rollingUpdate: 106 | maxSurge: 0 107 | maxUnavailable: 1 108 | type: RollingUpdate 109 | status: 110 | currentNumberScheduled: 1 111 | desiredNumberScheduled: 1 112 | numberAvailable: 1 113 | numberMisscheduled: 0 114 | numberReady: 1 115 | observedGeneration: 1 116 | updatedNumberScheduled: 1 117 | kind: List 118 | metadata: 119 | resourceVersion: "" 120 | selfLink: "" 121 | -------------------------------------------------------------------------------- /test_data/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: networking.k8s.io/v1 4 | kind: Ingress 5 | metadata: 6 | creationTimestamp: "2023-05-24T16:14:32Z" 7 | generation: 1 8 | name: ingdefault 9 | namespace: default 10 | managedFields: [] 11 | resourceVersion: "39551" 12 | uid: 5facbac2-4678-4702-a731-4c616c0805b2 13 | spec: 14 | defaultBackend: 15 | service: 16 | name: defaultsvc 17 | port: 18 | name: http 19 | ingressClassName: default 20 | rules: 21 | - host: foo.com 22 | http: 23 | paths: 24 | - backend: 25 | service: 26 | name: svc 27 | port: 28 | number: 8080 29 | path: / 30 | pathType: Prefix 31 | tls: 32 | - hosts: 33 | - foo.com 34 | secretName: secret1 35 | status: 36 | loadBalancer: {} 37 | - apiVersion: networking.k8s.io/v1 38 | kind: Ingress 39 | metadata: 40 | creationTimestamp: "2023-05-24T16:20:48Z" 41 | generation: 1 42 | name: test 43 | namespace: default 44 | resourceVersion: "40022" 45 | managedFields: [] 46 | uid: b56ea7ab-5f82-4112-bd7b-cf5fa1d31891 47 | spec: 48 | defaultBackend: 49 | service: 50 | name: test 51 | port: 52 | number: 5701 53 | ingressClassName: nginx 54 | status: 55 | loadBalancer: 56 | ingress: 57 | - ip: 192.168.49.2 58 | - apiVersion: networking.k8s.io/v1 59 | kind: Ingress 60 | metadata: 61 | annotations: 62 | kubectl.kubernetes.io/last-applied-configuration: | 63 | {"apiVersion":"networking.k8s.io/v1","kind":"Ingress","metadata":{"annotations":{},"name":"test-ingress","namespace":"dev"},"spec":{"ingressClassName":"nginx","rules":[{"host":"demo.apps.mlopshub.com","http":{"paths":[{"backend":{"service":{"name":"hello-service","port":{"number":80}}},"path":"/","pathType":"Prefix"}]}}]}} 64 | creationTimestamp: "2023-05-24T16:22:23Z" 65 | managedFields: [] 66 | generation: 1 67 | name: test-ingress 68 | namespace: dev 69 | resourceVersion: "40095" 70 | uid: 75a4a34a-0859-4e8f-857b-c6769c8f79e0 71 | spec: 72 | ingressClassName: nginx 73 | rules: 74 | - host: demo.apps.mlopshub.com 75 | http: 76 | paths: 77 | - backend: 78 | service: 79 | name: hello-service 80 | port: 81 | number: 80 82 | path: / 83 | pathType: Prefix 84 | status: 85 | loadBalancer: 86 | ingress: 87 | - ip: 192.168.49.2 88 | kind: List 89 | metadata: 90 | resourceVersion: "" 91 | -------------------------------------------------------------------------------- /test_data/node_metrics.yaml: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "NodeMetricsList", 3 | "apiVersion": "metrics.k8s.io/v1beta1", 4 | "metadata": { 5 | "selfLink": "/apis/metrics.k8s.io/v1beta1/nodes" 6 | }, 7 | "items": [ 8 | { 9 | "metadata": { 10 | "name": "k3d-my-kdash-cluster-server-0", 11 | "selfLink": "/apis/metrics.k8s.io/v1beta1/nodes/k3d-my-kdash-cluster-server-0", 12 | "creationTimestamp": "2021-06-08T08:38:53Z" 13 | }, 14 | "timestamp": "2021-06-08T08:38:43Z", 15 | "window": "30s", 16 | "usage": { 17 | "cpu": "162953844n", 18 | "memory": "582980Ki" 19 | } 20 | }, 21 | { 22 | "metadata": { 23 | "name": "k3d-my-kdash-cluster-server-1", 24 | "selfLink": "/apis/metrics.k8s.io/v1beta1/nodes/k3d-my-kdash-cluster-server-0", 25 | "creationTimestamp": "2021-06-08T08:38:53Z" 26 | }, 27 | "timestamp": "2021-06-08T08:38:43Z", 28 | "window": "30s", 29 | "usage": { 30 | "cpu": "102953844n", 31 | "memory": "282980Ki" 32 | } 33 | } 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /test_data/nodes.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: Node 5 | metadata: 6 | annotations: 7 | flannel.alpha.coreos.com/backend-data: '{"VtepMAC":"c2:9c:34:d2:b7:16"}' 8 | flannel.alpha.coreos.com/backend-type: vxlan 9 | flannel.alpha.coreos.com/kube-subnet-manager: "true" 10 | flannel.alpha.coreos.com/public-ip: 172.20.0.2 11 | k3s.io/node-args: '["server","--tls-san","0.0.0.0"]' 12 | k3s.io/node-config-hash: D7CBSUVNY5FSJVW3U5ZIAK74FHUOS5TZOOMQE6FTIWUTHA7QHGKQ==== 13 | k3s.io/node-env: '{"K3S_KUBECONFIG_OUTPUT":"/output/kubeconfig.yaml","K3S_TOKEN":"********"}' 14 | node.alpha.kubernetes.io/ttl: "0" 15 | volumes.kubernetes.io/controller-managed-attach-detach: "true" 16 | creationTimestamp: "2021-05-10T21:48:07Z" 17 | finalizers: 18 | - wrangler.cattle.io/node 19 | labels: 20 | beta.kubernetes.io/arch: amd64 21 | beta.kubernetes.io/instance-type: k3s 22 | beta.kubernetes.io/os: linux 23 | k3s.io/hostname: gke-hello-hipster-default-pool-9e6f6ffb-q16l 24 | k3s.io/internal-ip: 172.20.0.2 25 | kubernetes.io/arch: amd64 26 | kubernetes.io/hostname: gke-hello-hipster-default-pool-9e6f6ffb-q16l 27 | kubernetes.io/os: linux 28 | node-role.kubernetes.io/control-plane: "true" 29 | node-role.kubernetes.io/master: "true" 30 | node.kubernetes.io/instance-type: k3s 31 | managedFields: [] 32 | name: gke-hello-hipster-default-pool-9e6f6ffb-q16l 33 | resourceVersion: "8762" 34 | uid: 32e5afcb-a116-456b-992a-b52518d7b0b0 35 | spec: 36 | podCIDR: 10.42.0.0/24 37 | podCIDRs: 38 | - 10.42.0.0/24 39 | providerID: k3s://gke-hello-hipster-default-pool-9e6f6ffb-q16l 40 | taints: 41 | - effect: NoSchedule 42 | key: node.kubernetes.io/disk-pressure 43 | timeAdded: "2021-05-10T22:01:09Z" 44 | status: 45 | addresses: 46 | - address: 172.20.0.2 47 | type: InternalIP 48 | - address: gke-hello-hipster-default-pool-9e6f6ffb-q16l 49 | type: Hostname 50 | allocatable: 51 | cpu: "8" 52 | ephemeral-storage: "69773255011" 53 | hugepages-1Gi: "0" 54 | hugepages-2Mi: "0" 55 | memory: 32734444Ki 56 | pods: "110" 57 | capacity: 58 | cpu: "8" 59 | ephemeral-storage: 71724152Ki 60 | hugepages-1Gi: "0" 61 | hugepages-2Mi: "0" 62 | memory: 32734444Ki 63 | pods: "110" 64 | conditions: 65 | - lastHeartbeatTime: "2021-05-10T21:48:20Z" 66 | lastTransitionTime: "2021-05-10T21:48:20Z" 67 | message: Flannel is running on this node 68 | reason: FlannelIsUp 69 | status: "False" 70 | type: NetworkUnavailable 71 | - lastHeartbeatTime: "2021-05-11T10:36:51Z" 72 | lastTransitionTime: "2021-05-10T21:48:06Z" 73 | message: kubelet has sufficient memory available 74 | reason: KubeletHasSufficientMemory 75 | status: "False" 76 | type: MemoryPressure 77 | - lastHeartbeatTime: "2021-05-11T10:36:51Z" 78 | lastTransitionTime: "2021-05-10T22:01:09Z" 79 | message: kubelet has disk pressure 80 | reason: KubeletHasDiskPressure 81 | status: "True" 82 | type: DiskPressure 83 | - lastHeartbeatTime: "2021-05-11T10:36:51Z" 84 | lastTransitionTime: "2021-05-10T21:48:06Z" 85 | message: kubelet has sufficient PID available 86 | reason: KubeletHasSufficientPID 87 | status: "False" 88 | type: PIDPressure 89 | - lastHeartbeatTime: "2021-05-11T10:36:51Z" 90 | lastTransitionTime: "2021-05-10T21:48:17Z" 91 | message: kubelet is posting ready status 92 | reason: KubeletReady 93 | status: "True" 94 | type: Ready 95 | daemonEndpoints: 96 | kubeletEndpoint: 97 | Port: 10250 98 | images: 99 | - names: 100 | - docker.io/rancher/library-traefik@sha256:3ba3ed48c4632f2b02671923950b30b5b7f1b556e559ce15446d1f5d648a037d 101 | - docker.io/rancher/library-traefik:1.7.19 102 | sizeBytes: 24011762 103 | - names: 104 | - docker.io/rancher/local-path-provisioner@sha256:9666b1635fec95d4e2251661e135c90678b8f45fd0f8324c55db99c80e2a958c 105 | - docker.io/rancher/local-path-provisioner:v0.0.19 106 | sizeBytes: 13585626 107 | - names: 108 | - docker.io/rancher/coredns-coredns@sha256:8b675d12eb9faf3121475b12db478ac2cf5129046d92137aa9067dd04f3b4e10 109 | - docker.io/rancher/coredns-coredns:1.8.0 110 | sizeBytes: 12944537 111 | - names: 112 | - docker.io/rancher/metrics-server@sha256:b85628b103169d7db52a32a48b46d8942accb7bde3709c0a4888a23d035f9f1e 113 | - docker.io/rancher/metrics-server:v0.3.6 114 | sizeBytes: 10543877 115 | - names: 116 | - docker.io/rancher/klipper-lb@sha256:2fb97818f5d64096d635bc72501a6cb2c8b88d5d16bc031cf71b5b6460925e4a 117 | - docker.io/rancher/klipper-lb:v0.1.2 118 | sizeBytes: 2708293 119 | nodeInfo: 120 | architecture: amd64 121 | bootID: 6ac9f263-9a7f-4d0d-9c22-0e51e9f3b06c 122 | containerRuntimeVersion: containerd://1.4.4-k3s1 123 | kernelVersion: 5.11.13-100.fc32.x86_64 124 | kubeProxyVersion: v1.20.6+k3s1 125 | kubeletVersion: v1.20.6+k3s1 126 | machineID: "" 127 | operatingSystem: linux 128 | osImage: Unknown 129 | systemUUID: 4c4c4544-0039-4e10-8047-b5c04f544332 130 | kind: List 131 | metadata: 132 | resourceVersion: "" 133 | selfLink: "" 134 | -------------------------------------------------------------------------------- /test_data/ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: Namespace 5 | metadata: 6 | creationTimestamp: "2021-05-10T21:48:02Z" 7 | managedFields: [] 8 | name: default 9 | resourceVersion: "4" 10 | uid: 9cbc82d2-bc3b-4997-bedf-662e86110097 11 | spec: 12 | finalizers: 13 | - kubernetes 14 | status: 15 | phase: Active 16 | - apiVersion: v1 17 | kind: Namespace 18 | metadata: 19 | creationTimestamp: "2021-05-10T21:48:02Z" 20 | managedFields: [] 21 | name: kube-system 22 | resourceVersion: "16" 23 | uid: 6905938f-7c22-453b-8a26-e633808fcff3 24 | spec: 25 | finalizers: 26 | - kubernetes 27 | status: 28 | phase: Active 29 | - apiVersion: v1 30 | kind: Namespace 31 | metadata: 32 | creationTimestamp: "2021-05-10T21:48:02Z" 33 | managedFields: [] 34 | name: kube-public 35 | resourceVersion: "20" 36 | uid: 4b8edf12-46b0-4e80-843b-e0c47f301e4c 37 | spec: 38 | finalizers: 39 | - kubernetes 40 | status: 41 | phase: Active 42 | - apiVersion: v1 43 | kind: Namespace 44 | metadata: 45 | creationTimestamp: "2021-05-10T21:48:02Z" 46 | managedFields: [] 47 | name: kube-node-lease 48 | resourceVersion: "23" 49 | uid: b6a2061b-3172-405d-b408-67c6de68e656 50 | spec: 51 | finalizers: 52 | - kubernetes 53 | status: 54 | phase: Active 55 | kind: List 56 | metadata: 57 | resourceVersion: "" 58 | selfLink: "" 59 | -------------------------------------------------------------------------------- /test_data/pvcs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | annotations: 7 | pv.kubernetes.io/bind-completed: "yes" 8 | pv.kubernetes.io/bound-by-controller: "yes" 9 | volume.beta.kubernetes.io/storage-provisioner: ebs.csi.aws.com 10 | volume.kubernetes.io/selected-node: ip-10-0-10-105.eu-west-1.compute.internal 11 | volume.kubernetes.io/storage-provisioner: ebs.csi.aws.com 12 | creationTimestamp: "2023-06-30T17:27:23Z" 13 | finalizers: 14 | - kubernetes.io/pvc-protection 15 | labels: 16 | app: consul 17 | app.kubernetes.io/name: consul 18 | managedFields: [] 19 | name: data-consul-0 20 | namespace: jhipster 21 | resourceVersion: "3975" 22 | uid: 149f1f3b-c0fd-471d-bc3e-d039369755ef 23 | spec: 24 | accessModes: 25 | - ReadWriteOnce 26 | resources: 27 | requests: 28 | storage: 8Gi 29 | storageClassName: gp2 30 | volumeMode: Filesystem 31 | volumeName: pvc-149f1f3b-c0fd-471d-bc3e-d039369755ef 32 | status: 33 | accessModes: 34 | - ReadWriteOnce 35 | capacity: 36 | storage: 8Gi 37 | phase: Bound 38 | - apiVersion: v1 39 | kind: PersistentVolumeClaim 40 | metadata: 41 | annotations: 42 | pv.kubernetes.io/bind-completed: "yes" 43 | pv.kubernetes.io/bound-by-controller: "yes" 44 | volume.beta.kubernetes.io/storage-provisioner: ebs.csi.aws.com 45 | volume.kubernetes.io/selected-node: ip-10-0-12-221.eu-west-1.compute.internal 46 | volume.kubernetes.io/storage-provisioner: ebs.csi.aws.com 47 | creationTimestamp: "2023-06-30T17:27:23Z" 48 | finalizers: 49 | - kubernetes.io/pvc-protection 50 | labels: 51 | app: consul 52 | app.kubernetes.io/name: consul 53 | name: data-consul-1 54 | namespace: jhipster 55 | resourceVersion: "3972" 56 | uid: c5a5b78c-732a-4c44-8c2c-4682296da68c 57 | spec: 58 | accessModes: 59 | - ReadWriteOnce 60 | resources: 61 | requests: 62 | storage: 8Gi 63 | storageClassName: gp2 64 | volumeMode: Filesystem 65 | volumeName: pvc-c5a5b78c-732a-4c44-8c2c-4682296da68c 66 | status: 67 | accessModes: 68 | - ReadWriteOnce 69 | capacity: 70 | storage: 8Gi 71 | phase: Bound 72 | - apiVersion: v1 73 | kind: PersistentVolumeClaim 74 | metadata: 75 | annotations: 76 | pv.kubernetes.io/bind-completed: "yes" 77 | pv.kubernetes.io/bound-by-controller: "yes" 78 | volume.beta.kubernetes.io/storage-provisioner: ebs.csi.aws.com 79 | volume.kubernetes.io/selected-node: ip-10-0-11-179.eu-west-1.compute.internal 80 | volume.kubernetes.io/storage-provisioner: ebs.csi.aws.com 81 | creationTimestamp: "2023-06-30T17:27:23Z" 82 | finalizers: 83 | - kubernetes.io/pvc-protection 84 | labels: 85 | app: consul 86 | app.kubernetes.io/name: consul 87 | name: data-consul-2 88 | namespace: jhipster 89 | resourceVersion: "3985" 90 | uid: d2ccb749-9fdc-4503-81e9-f268073e1487 91 | spec: 92 | accessModes: 93 | - ReadWriteOnce 94 | resources: 95 | requests: 96 | storage: 8Gi 97 | storageClassName: gp2 98 | volumeMode: Filesystem 99 | volumeName: pvc-d2ccb749-9fdc-4503-81e9-f268073e1487 100 | status: 101 | accessModes: 102 | - ReadWriteOnce 103 | capacity: 104 | storage: 8Gi 105 | phase: Bound 106 | kind: List 107 | metadata: 108 | resourceVersion: "" 109 | -------------------------------------------------------------------------------- /test_data/pvs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: PersistentVolume 5 | metadata: 6 | annotations: 7 | pv.kubernetes.io/migrated-to: ebs.csi.aws.com 8 | pv.kubernetes.io/provisioned-by: kubernetes.io/aws-ebs 9 | volume.kubernetes.io/provisioner-deletion-secret-name: "" 10 | volume.kubernetes.io/provisioner-deletion-secret-namespace: "" 11 | creationTimestamp: "2023-06-30T17:27:26Z" 12 | finalizers: 13 | - kubernetes.io/pv-protection 14 | - external-attacher/ebs-csi-aws-com 15 | managedFields: [] 16 | labels: 17 | topology.kubernetes.io/region: eu-west-1 18 | topology.kubernetes.io/zone: eu-west-1a 19 | name: pvc-149f1f3b-c0fd-471d-bc3e-d039369755ef 20 | resourceVersion: "4003" 21 | uid: cc4fb787-2543-4f7f-bec1-635fc4c1f555 22 | spec: 23 | accessModes: 24 | - ReadWriteOnce 25 | awsElasticBlockStore: 26 | fsType: ext4 27 | volumeID: vol-004bb053d7ab1ea55 28 | capacity: 29 | storage: 8Gi 30 | claimRef: 31 | apiVersion: v1 32 | kind: PersistentVolumeClaim 33 | name: data-consul-0 34 | namespace: jhipster 35 | resourceVersion: "3817" 36 | uid: 149f1f3b-c0fd-471d-bc3e-d039369755ef 37 | nodeAffinity: 38 | required: 39 | nodeSelectorTerms: 40 | - matchExpressions: 41 | - key: topology.kubernetes.io/zone 42 | operator: In 43 | values: 44 | - eu-west-1a 45 | - key: topology.kubernetes.io/region 46 | operator: In 47 | values: 48 | - eu-west-1 49 | persistentVolumeReclaimPolicy: Delete 50 | storageClassName: gp2 51 | volumeMode: Filesystem 52 | status: 53 | phase: Bound 54 | - apiVersion: v1 55 | kind: PersistentVolume 56 | metadata: 57 | annotations: 58 | pv.kubernetes.io/migrated-to: ebs.csi.aws.com 59 | pv.kubernetes.io/provisioned-by: kubernetes.io/aws-ebs 60 | volume.kubernetes.io/provisioner-deletion-secret-name: "" 61 | volume.kubernetes.io/provisioner-deletion-secret-namespace: "" 62 | creationTimestamp: "2023-06-30T17:27:26Z" 63 | finalizers: 64 | - kubernetes.io/pv-protection 65 | - external-attacher/ebs-csi-aws-com 66 | labels: 67 | topology.kubernetes.io/region: eu-west-1 68 | topology.kubernetes.io/zone: eu-west-1c 69 | name: pvc-c5a5b78c-732a-4c44-8c2c-4682296da68c 70 | resourceVersion: "4004" 71 | uid: 04f0b878-a2f6-4dbb-80a0-fc1d6a84e426 72 | spec: 73 | accessModes: 74 | - ReadWriteOnce 75 | awsElasticBlockStore: 76 | fsType: ext4 77 | volumeID: vol-005f365b0f59043d3 78 | capacity: 79 | storage: 8Gi 80 | claimRef: 81 | apiVersion: v1 82 | kind: PersistentVolumeClaim 83 | name: data-consul-1 84 | namespace: jhipster 85 | resourceVersion: "3826" 86 | uid: c5a5b78c-732a-4c44-8c2c-4682296da68c 87 | nodeAffinity: 88 | required: 89 | nodeSelectorTerms: 90 | - matchExpressions: 91 | - key: topology.kubernetes.io/zone 92 | operator: In 93 | values: 94 | - eu-west-1c 95 | - key: topology.kubernetes.io/region 96 | operator: In 97 | values: 98 | - eu-west-1 99 | persistentVolumeReclaimPolicy: Delete 100 | storageClassName: gp2 101 | volumeMode: Filesystem 102 | status: 103 | phase: Bound 104 | - apiVersion: v1 105 | kind: PersistentVolume 106 | metadata: 107 | annotations: 108 | pv.kubernetes.io/migrated-to: ebs.csi.aws.com 109 | pv.kubernetes.io/provisioned-by: kubernetes.io/aws-ebs 110 | volume.kubernetes.io/provisioner-deletion-secret-name: "" 111 | volume.kubernetes.io/provisioner-deletion-secret-namespace: "" 112 | creationTimestamp: "2023-06-30T17:27:26Z" 113 | finalizers: 114 | - kubernetes.io/pv-protection 115 | - external-attacher/ebs-csi-aws-com 116 | labels: 117 | topology.kubernetes.io/region: eu-west-1 118 | topology.kubernetes.io/zone: eu-west-1b 119 | name: pvc-d2ccb749-9fdc-4503-81e9-f268073e1487 120 | resourceVersion: "4012" 121 | uid: 8aff308a-4652-41ca-996c-be5bffb43e0e 122 | spec: 123 | accessModes: 124 | - ReadWriteOnce 125 | awsElasticBlockStore: 126 | fsType: ext4 127 | volumeID: vol-06f16eaaced9a2f4f 128 | capacity: 129 | storage: 8Gi 130 | claimRef: 131 | apiVersion: v1 132 | kind: PersistentVolumeClaim 133 | name: data-consul-2 134 | namespace: jhipster 135 | resourceVersion: "3833" 136 | uid: d2ccb749-9fdc-4503-81e9-f268073e1487 137 | nodeAffinity: 138 | required: 139 | nodeSelectorTerms: 140 | - matchExpressions: 141 | - key: topology.kubernetes.io/zone 142 | operator: In 143 | values: 144 | - eu-west-1b 145 | - key: topology.kubernetes.io/region 146 | operator: In 147 | values: 148 | - eu-west-1 149 | persistentVolumeReclaimPolicy: Delete 150 | storageClassName: gp2 151 | volumeMode: Filesystem 152 | status: 153 | phase: Bound 154 | kind: List 155 | metadata: 156 | resourceVersion: "" 157 | -------------------------------------------------------------------------------- /test_data/replication_controllers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: ReplicationController 5 | metadata: 6 | annotations: 7 | kubectl.kubernetes.io/last-applied-configuration: | 8 | {"apiVersion":"v1","kind":"ReplicationController","metadata":{"annotations":{},"name":"nginx","namespace":"default"},"spec":{"replicas":3,"selector":{"app":"nginx"},"template":{"metadata":{"labels":{"app":"nginx"},"name":"nginx"},"spec":{"containers":[{"image":"nginx","name":"nginx","ports":[{"containerPort":80}]}]}}}} 9 | creationTimestamp: "2021-07-27T14:37:49Z" 10 | generation: 1 11 | labels: 12 | app: nginx 13 | managedFields: [] 14 | name: nginx 15 | namespace: default 16 | resourceVersion: "511472" 17 | uid: f18e7ad9-411e-4ede-badd-35f333cd66c9 18 | spec: 19 | replicas: 3 20 | selector: 21 | app: nginx 22 | template: 23 | metadata: 24 | creationTimestamp: null 25 | labels: 26 | app: nginx 27 | name: nginx 28 | spec: 29 | containers: 30 | - image: nginx 31 | imagePullPolicy: Always 32 | name: nginx 33 | ports: 34 | - containerPort: 80 35 | protocol: TCP 36 | resources: {} 37 | terminationMessagePath: /dev/termination-log 38 | terminationMessagePolicy: File 39 | dnsPolicy: ClusterFirst 40 | restartPolicy: Always 41 | schedulerName: default-scheduler 42 | securityContext: {} 43 | terminationGracePeriodSeconds: 30 44 | status: 45 | availableReplicas: 3 46 | fullyLabeledReplicas: 3 47 | observedGeneration: 1 48 | readyReplicas: 3 49 | replicas: 3 50 | - apiVersion: v1 51 | kind: ReplicationController 52 | metadata: 53 | annotations: 54 | kubectl.kubernetes.io/last-applied-configuration: | 55 | {"apiVersion":"v1","kind":"ReplicationController","metadata":{"annotations":{},"name":"nginx-new","namespace":"default"},"spec":{"replicas":3,"selector":{"app":"nginx"},"template":{"metadata":{"labels":{"app":"nginx"},"name":"nginx"},"spec":{"containers":[{"image":"nginx","name":"nginx","ports":[{"containerPort":80}]},{"image":"nginx","name":"nginx2","ports":[{"containerPort":80}]}]}}}} 56 | creationTimestamp: "2021-07-27T14:45:24Z" 57 | generation: 1 58 | managedFields: [] 59 | labels: 60 | app: nginx 61 | name: nginx-new 62 | namespace: default 63 | resourceVersion: "512079" 64 | uid: 4f2df1cd-bb5b-4128-9913-2856f7244d6b 65 | spec: 66 | replicas: 3 67 | selector: 68 | app: nginx 69 | template: 70 | metadata: 71 | creationTimestamp: null 72 | labels: 73 | app: nginx 74 | name: nginx 75 | spec: 76 | containers: 77 | - image: nginx 78 | imagePullPolicy: Always 79 | name: nginx 80 | ports: 81 | - containerPort: 80 82 | protocol: TCP 83 | resources: {} 84 | terminationMessagePath: /dev/termination-log 85 | terminationMessagePolicy: File 86 | - image: nginx 87 | imagePullPolicy: Always 88 | name: nginx2 89 | ports: 90 | - containerPort: 80 91 | protocol: TCP 92 | resources: {} 93 | terminationMessagePath: /dev/termination-log 94 | terminationMessagePolicy: File 95 | dnsPolicy: ClusterFirst 96 | restartPolicy: Always 97 | schedulerName: default-scheduler 98 | securityContext: {} 99 | terminationGracePeriodSeconds: 30 100 | status: 101 | fullyLabeledReplicas: 3 102 | observedGeneration: 1 103 | replicas: 3 104 | kind: List 105 | metadata: 106 | resourceVersion: "" 107 | selfLink: "" 108 | -------------------------------------------------------------------------------- /test_data/role_bindings.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: RoleBinding 5 | metadata: 6 | annotations: 7 | operator-sdk/primary-resource: monitoring/kiali 8 | operator-sdk/primary-resource-type: Kiali.kiali.io 9 | creationTimestamp: "2022-06-27T16:33:07Z" 10 | labels: 11 | app: kiali 12 | app.kubernetes.io/instance: kiali 13 | app.kubernetes.io/name: kiali 14 | app.kubernetes.io/part-of: kiali 15 | app.kubernetes.io/version: v1.41.0 16 | version: v1.41.0 17 | name: kiali 18 | namespace: default 19 | resourceVersion: "102143364" 20 | uid: f0de3ac8-1680-4794-88c3-927a42cccce0 21 | managedFields: [] 22 | roleRef: 23 | apiGroup: rbac.authorization.k8s.io 24 | kind: Role 25 | name: kiali-viewer 26 | subjects: 27 | - kind: ServiceAccount 28 | name: kiali-service-account 29 | namespace: monitoring 30 | kind: List 31 | metadata: 32 | resourceVersion: "" 33 | selfLink: "" 34 | -------------------------------------------------------------------------------- /test_data/roles.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: Role 5 | metadata: 6 | annotations: 7 | operator-sdk/primary-resource: monitoring/kiali 8 | operator-sdk/primary-resource-type: Kiali.kiali.io 9 | creationTimestamp: "2022-06-27T16:33:06Z" 10 | labels: 11 | app: kiali 12 | app.kubernetes.io/instance: kiali 13 | app.kubernetes.io/name: kiali 14 | app.kubernetes.io/part-of: kiali 15 | app.kubernetes.io/version: v1.41.0 16 | version: v1.41.0 17 | name: kiali-viewer 18 | namespace: default 19 | resourceVersion: "102143312" 20 | uid: 78ce4200-d457-44e7-b6a2-9602ff6b44b7 21 | managedFields: [] 22 | rules: 23 | - apiGroups: 24 | - "" 25 | resources: 26 | - configmaps 27 | - endpoints 28 | - pods/log 29 | verbs: 30 | - get 31 | - list 32 | - watch 33 | - apiGroups: 34 | - "" 35 | resources: 36 | - namespaces 37 | - pods 38 | - replicationcontrollers 39 | - services 40 | verbs: 41 | - get 42 | - list 43 | - watch 44 | - apiGroups: 45 | - "" 46 | resources: 47 | - pods/portforward 48 | verbs: 49 | - create 50 | - post 51 | - apiGroups: 52 | - extensions 53 | - apps 54 | resources: 55 | - daemonsets 56 | - deployments 57 | - replicasets 58 | - statefulsets 59 | verbs: 60 | - get 61 | - list 62 | - watch 63 | - apiGroups: 64 | - batch 65 | resources: 66 | - cronjobs 67 | - jobs 68 | verbs: 69 | - get 70 | - list 71 | - watch 72 | - apiGroups: 73 | - networking.istio.io 74 | - security.istio.io 75 | resources: 76 | - '*' 77 | verbs: 78 | - get 79 | - list 80 | - watch 81 | - apiGroups: 82 | - authentication.k8s.io 83 | resources: 84 | - tokenreviews 85 | verbs: 86 | - create 87 | kind: List 88 | metadata: 89 | resourceVersion: "" 90 | selfLink: "" 91 | -------------------------------------------------------------------------------- /test_data/secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | data: 5 | ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyTWpVME56YzNOVGt3SGhjTk1qRXdOekExTURrek5UVTVXaGNOTXpFd056QXpNRGt6TlRVNQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyTWpVME56YzNOVGt3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSSTEzY3NmMGM1ZEViVS8wY25aaXBJcnR0c21uNVVKRlV3ZEx5OE9OdzAKRlVvSzU3UGVWSTZnbXFOdG5veWNwamE5bi9TdUpBK2xXcXFQTm9nYmlRTzdvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVFKQWdycEdZczdsTUt0OVBXcmpoCnlSdWhhS3d3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUlTN2gyYlc0c2VlRUx1cGw2SmhYV2dpY0pLMTVKYmwKQUFkanM1bWZIY2NxQWlFQW1hVkxRdDJWNTBDOFpMT3NSNUxmM0ZsRkg3cXBGdDNSTXRvMHBlR0ZxQjQ9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K 6 | namespace: a3ViZS1wdWJsaWM= 7 | token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNkltcDBVMjlPZVRFNFYwRnJkQzFGVURVNU4wNVJhVWRCUVZaa2RIZFpUMWszZFc1cmJHVkxXRGhqTUUwaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUpyZFdKbExYQjFZbXhwWXlJc0ltdDFZbVZ5Ym1WMFpYTXVhVzh2YzJWeWRtbGpaV0ZqWTI5MWJuUXZjMlZqY21WMExtNWhiV1VpT2lKa1pXWmhkV3gwTFhSdmEyVnVMWEo0WkRoMklpd2lhM1ZpWlhKdVpYUmxjeTVwYnk5elpYSjJhV05sWVdOamIzVnVkQzl6WlhKMmFXTmxMV0ZqWTI5MWJuUXVibUZ0WlNJNkltUmxabUYxYkhRaUxDSnJkV0psY201bGRHVnpMbWx2TDNObGNuWnBZMlZoWTJOdmRXNTBMM05sY25acFkyVXRZV05qYjNWdWRDNTFhV1FpT2lKa05HSmpZalZrT0MxallqVTFMVFJpWXpFdFlUZG1aQzB4TnpObE9USXdPVGMyTURJaUxDSnpkV0lpT2lKemVYTjBaVzA2YzJWeWRtbGpaV0ZqWTI5MWJuUTZhM1ZpWlMxd2RXSnNhV002WkdWbVlYVnNkQ0o5LkhtcTNCaGRhcFVJZHMyTUtwVE50bmxfeXE1alRZcG9kZUotTUpENklwZHNXTFVuaHZrdEx1RHkteWZEZ01vXzU1WE9PU3A0TWFUcWJ0MDd1bmhfeUdyR2FjbWQ4bzdQek1UaVBEa09ObkdqTjNYSlVCMmpnNXd3OVhRMEM1Q193Y096Z09PNG5yUE1pc1lEc0RHY19ETnpaZjVGd0JNNnoteDkzT0xxMlVSVmZ2LXZ2NGNlQzA1ZC0xVFNETEV5VDUxTHF2Sjl1ME03cWluWWJ6SnNpemRXOFVNNm1jNTZNYTUyZ1NFTEM1RGxqWlZ1Z1hMOUhvajduRDZaQVVIZGpyeGRycWswbVZLTmVaUUtFbWJMSlhzR0dnM2MtZnY2RU80NjJBdmxRdkUwZ1hhLVRyd0lVdmVzQXhHNGZUNkQxYzBPMTduMFJOcDc2bWVBZkNHT3Uwdw== 8 | kind: Secret 9 | metadata: 10 | annotations: 11 | kubernetes.io/service-account.name: default 12 | kubernetes.io/service-account.uid: d4bcb5d8-cb55-4bc1-a7fd-173e92097602 13 | creationTimestamp: "2021-07-05T09:36:17Z" 14 | managedFields: [] 15 | name: default-token-rxd8v 16 | namespace: kube-public 17 | resourceVersion: "412" 18 | uid: 1715a9d3-ebab-4a44-9890-d559cb0376ea 19 | type: kubernetes.io/service-account-token 20 | - apiVersion: v1 21 | data: 22 | ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyTWpVME56YzNOVGt3SGhjTk1qRXdOekExTURrek5UVTVXaGNOTXpFd056QXpNRGt6TlRVNQpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyTWpVME56YzNOVGt3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFSSTEzY3NmMGM1ZEViVS8wY25aaXBJcnR0c21uNVVKRlV3ZEx5OE9OdzAKRlVvSzU3UGVWSTZnbXFOdG5veWNwamE5bi9TdUpBK2xXcXFQTm9nYmlRTzdvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVFKQWdycEdZczdsTUt0OVBXcmpoCnlSdWhhS3d3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQUlTN2gyYlc0c2VlRUx1cGw2SmhYV2dpY0pLMTVKYmwKQUFkanM1bWZIY2NxQWlFQW1hVkxRdDJWNTBDOFpMT3NSNUxmM0ZsRkg3cXBGdDNSTXRvMHBlR0ZxQjQ9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K 23 | namespace: ZGVmYXVsdA== 24 | token: ZXlKaGJHY2lPaUpTVXpJMU5pSXNJbXRwWkNJNkltcDBVMjlPZVRFNFYwRnJkQzFGVURVNU4wNVJhVWRCUVZaa2RIZFpUMWszZFc1cmJHVkxXRGhqTUUwaWZRLmV5SnBjM01pT2lKcmRXSmxjbTVsZEdWekwzTmxjblpwWTJWaFkyTnZkVzUwSWl3aWEzVmlaWEp1WlhSbGN5NXBieTl6WlhKMmFXTmxZV05qYjNWdWRDOXVZVzFsYzNCaFkyVWlPaUprWldaaGRXeDBJaXdpYTNWaVpYSnVaWFJsY3k1cGJ5OXpaWEoyYVdObFlXTmpiM1Z1ZEM5elpXTnlaWFF1Ym1GdFpTSTZJbVJsWm1GMWJIUXRkRzlyWlc0dGNuSjRaRzBpTENKcmRXSmxjbTVsZEdWekxtbHZMM05sY25acFkyVmhZMk52ZFc1MEwzTmxjblpwWTJVdFlXTmpiM1Z1ZEM1dVlXMWxJam9pWkdWbVlYVnNkQ0lzSW10MVltVnlibVYwWlhNdWFXOHZjMlZ5ZG1salpXRmpZMjkxYm5RdmMyVnlkbWxqWlMxaFkyTnZkVzUwTG5WcFpDSTZJakZtWkRGaU9UYzFMVGxtWlRFdE5EZGlPQzFpTnpFM0xUSXdabVk1T0RJNU9UQmxNeUlzSW5OMVlpSTZJbk41YzNSbGJUcHpaWEoyYVdObFlXTmpiM1Z1ZERwa1pXWmhkV3gwT21SbFptRjFiSFFpZlEuNlRjY3AtRWFveVZjUDZ5M1ZhSkxHU0RwWXRZZFhCZHdoTzI2RzdGeGRJa3NSclJQQWk2Q2dRdzUyRk84LW12SlAzTDNHUnBiczM0eXpNQlltb1llVndqWjNVRkw1MUk4ZXhMMzMyZzlQYkVzODVGYWZxOFdraE55bG5zWW5aazBuSjgxV2otNTNfQWtSbDBCdDBmNFE0dFU5RUpVT2wydVJqWldZeVFtQjkxTV84dnpDTlNLTmpVTXdqUmFiUFZYSnpnOHNZOEpSMHh1WTdkWmxjNWg3Z05QN0hKRlgwQXlxS3VGVHFzRzhDcmIzdGl4QzBiWGh5WGFfZE0wNFNqWHpfT0NmTEMtdlpCT3pRNUUxbFBCem0zbmh1WklRcnJfZVphSkpZZ3c3Q2llWWUycXEyUW13WFR2ZS0wX24zTGdVTkRVY0tNcC1CVVFibTZ6eFhKcUJB 25 | kind: Secret 26 | metadata: 27 | annotations: 28 | kubernetes.io/service-account.name: default 29 | kubernetes.io/service-account.uid: 1fd1b975-9fe1-47b8-b717-20ff982990e3 30 | creationTimestamp: "2021-07-05T09:36:17Z" 31 | managedFields: [] 32 | name: default-token-rrxdm 33 | namespace: default 34 | resourceVersion: "411" 35 | uid: 692191f3-5c00-49d4-b2df-7dcf23d5cdb5 36 | type: kubernetes.io/service-account-token 37 | kind: List 38 | metadata: 39 | resourceVersion: "" 40 | selfLink: "" 41 | -------------------------------------------------------------------------------- /test_data/stfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: apps/v1 4 | kind: StatefulSet 5 | metadata: 6 | annotations: 7 | kubectl.kubernetes.io/last-applied-configuration: | 8 | {"apiVersion":"apps/v1","kind":"StatefulSet","metadata":{"annotations":{},"name":"web","namespace":"default"},"spec":{"replicas":2,"selector":{"matchLabels":{"app":"nginx"}},"serviceName":"nginx","template":{"metadata":{"labels":{"app":"nginx"}},"spec":{"containers":[{"image":"k8s.gcr.io/nginx-slim:0.8","name":"nginx","ports":[{"containerPort":80,"name":"web"}],"volumeMounts":[{"mountPath":"/usr/share/nginx/html","name":"www"}]}]}},"volumeClaimTemplates":[{"metadata":{"name":"www"},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}}}]}} 9 | creationTimestamp: "2021-04-25T14:23:47Z" 10 | generation: 1 11 | managedFields: [] 12 | name: web 13 | namespace: default 14 | resourceVersion: "600405" 15 | uid: ae5c6192-c82a-444d-8897-62cb65e2940a 16 | spec: 17 | podManagementPolicy: OrderedReady 18 | replicas: 2 19 | revisionHistoryLimit: 10 20 | selector: 21 | matchLabels: 22 | app: nginx 23 | serviceName: nginx 24 | template: 25 | metadata: 26 | creationTimestamp: null 27 | labels: 28 | app: nginx 29 | spec: 30 | containers: 31 | - image: k8s.gcr.io/nginx-slim:0.8 32 | imagePullPolicy: IfNotPresent 33 | name: nginx 34 | ports: 35 | - containerPort: 80 36 | name: web 37 | protocol: TCP 38 | resources: {} 39 | terminationMessagePath: /dev/termination-log 40 | terminationMessagePolicy: File 41 | volumeMounts: 42 | - mountPath: /usr/share/nginx/html 43 | name: www 44 | dnsPolicy: ClusterFirst 45 | restartPolicy: Always 46 | schedulerName: default-scheduler 47 | securityContext: {} 48 | terminationGracePeriodSeconds: 30 49 | updateStrategy: 50 | rollingUpdate: 51 | partition: 0 52 | type: RollingUpdate 53 | volumeClaimTemplates: 54 | - apiVersion: v1 55 | kind: PersistentVolumeClaim 56 | metadata: 57 | creationTimestamp: null 58 | name: www 59 | spec: 60 | accessModes: 61 | - ReadWriteOnce 62 | resources: 63 | requests: 64 | storage: 1Gi 65 | volumeMode: Filesystem 66 | status: 67 | phase: Pending 68 | status: 69 | collisionCount: 0 70 | currentReplicas: 2 71 | currentRevision: web-b46f789c4 72 | observedGeneration: 1 73 | readyReplicas: 2 74 | replicas: 2 75 | updateRevision: web-b46f789c4 76 | updatedReplicas: 2 77 | kind: List 78 | metadata: 79 | resourceVersion: "" 80 | selfLink: "" 81 | -------------------------------------------------------------------------------- /test_data/storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: storage.k8s.io/v1 4 | kind: StorageClass 5 | metadata: 6 | annotations: 7 | meta.helm.sh/release-name: prometheus 8 | meta.helm.sh/release-namespace: monitoring 9 | creationTimestamp: "2021-12-14T11:08:59Z" 10 | labels: 11 | app.kubernetes.io/managed-by: Helm 12 | helm.toolkit.fluxcd.io/name: prometheus 13 | helm.toolkit.fluxcd.io/namespace: flux-system 14 | name: ebs-performance 15 | resourceVersion: "98487651" 16 | uid: 4c55b509-35f6-4539-91f3-5efc04502287 17 | managedFields: [] 18 | parameters: 19 | iopsPerGB: "30" 20 | type: io1 21 | provisioner: kubernetes.io/aws-ebs 22 | reclaimPolicy: Delete 23 | volumeBindingMode: Immediate 24 | - allowVolumeExpansion: true 25 | apiVersion: storage.k8s.io/v1 26 | kind: StorageClass 27 | metadata: 28 | annotations: 29 | meta.helm.sh/release-name: prometheus 30 | meta.helm.sh/release-namespace: monitoring 31 | creationTimestamp: "2021-12-14T11:08:59Z" 32 | labels: 33 | app.kubernetes.io/managed-by: Helm 34 | helm.toolkit.fluxcd.io/name: prometheus 35 | helm.toolkit.fluxcd.io/namespace: flux-system 36 | name: ebs-standard 37 | resourceVersion: "98487650" 38 | uid: 38ba70fb-25a8-4d9f-a1d3-2407de9e9128 39 | managedFields: [] 40 | parameters: 41 | type: gp2 42 | provisioner: kubernetes.io/aws-ebs 43 | reclaimPolicy: Delete 44 | volumeBindingMode: Immediate 45 | - apiVersion: storage.k8s.io/v1 46 | kind: StorageClass 47 | metadata: 48 | annotations: 49 | meta.helm.sh/release-name: prometheus 50 | meta.helm.sh/release-namespace: monitoring 51 | creationTimestamp: "2021-12-14T11:08:59Z" 52 | labels: 53 | app.kubernetes.io/managed-by: Helm 54 | helm.toolkit.fluxcd.io/name: prometheus 55 | helm.toolkit.fluxcd.io/namespace: flux-system 56 | name: efs-sc 57 | resourceVersion: "98487652" 58 | uid: d60d2b3f-6e91-4fa1-add0-9383c0a8c6ea 59 | managedFields: [] 60 | provisioner: efs.csi.aws.com 61 | reclaimPolicy: Delete 62 | volumeBindingMode: Immediate 63 | - apiVersion: storage.k8s.io/v1 64 | kind: StorageClass 65 | metadata: 66 | annotations: 67 | kubectl.kubernetes.io/last-applied-configuration: | 68 | {"apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{"annotations":{"storageclass.kubernetes.io/is-default-class":"true"},"name":"gp2"},"parameters":{"fsType":"ext4","type":"gp2"},"provisioner":"kubernetes.io/aws-ebs","volumeBindingMode":"WaitForFirstConsumer"} 69 | storageclass.kubernetes.io/is-default-class: "true" 70 | creationTimestamp: "2021-12-14T11:04:25Z" 71 | name: gp2 72 | resourceVersion: "183" 73 | uid: 330bc0b5-b40c-4327-82ab-ca6f53b553cc 74 | managedFields: [] 75 | parameters: 76 | fsType: ext4 77 | type: gp2 78 | provisioner: kubernetes.io/aws-ebs 79 | reclaimPolicy: Delete 80 | volumeBindingMode: WaitForFirstConsumer 81 | kind: List 82 | metadata: 83 | resourceVersion: "" 84 | selfLink: "" 85 | -------------------------------------------------------------------------------- /test_data/svcs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: v1 4 | kind: Service 5 | metadata: 6 | creationTimestamp: "2021-05-10T21:48:03Z" 7 | labels: 8 | component: apiserver 9 | provider: kubernetes 10 | managedFields: [] 11 | name: kubernetes 12 | namespace: default 13 | resourceVersion: "189" 14 | uid: e5b251bc-ac4f-48d2-8725-51c98ecd0e7c 15 | spec: 16 | clusterIP: 10.43.0.1 17 | clusterIPs: 18 | - 10.43.0.1 19 | ports: 20 | - name: https 21 | port: 443 22 | protocol: TCP 23 | targetPort: 6443 24 | sessionAffinity: None 25 | type: ClusterIP 26 | status: 27 | loadBalancer: {} 28 | - apiVersion: v1 29 | kind: Service 30 | metadata: 31 | annotations: 32 | objectset.rio.cattle.io/applied: '{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"objectset.rio.cattle.io/id":"","objectset.rio.cattle.io/owner-gvk":"k3s.cattle.io/v1, 33 | Kind=Addon","objectset.rio.cattle.io/owner-name":"coredns","objectset.rio.cattle.io/owner-namespace":"kube-system","prometheus.io/port":"9153","prometheus.io/scrape":"true"},"labels":{"k8s-app":"kube-dns","kubernetes.io/cluster-service":"true","kubernetes.io/name":"CoreDNS","objectset.rio.cattle.io/hash":"bce283298811743a0386ab510f2f67ef74240c57"},"name":"kube-dns","namespace":"kube-system"},"spec":{"clusterIP":"10.43.0.10","ports":[{"name":"dns","port":53,"protocol":"UDP"},{"name":"dns-tcp","port":53,"protocol":"TCP"},{"name":"metrics","port":9153,"protocol":"TCP"}],"selector":{"k8s-app":"kube-dns"}}}' 34 | objectset.rio.cattle.io/id: "" 35 | objectset.rio.cattle.io/owner-gvk: k3s.cattle.io/v1, Kind=Addon 36 | objectset.rio.cattle.io/owner-name: coredns 37 | objectset.rio.cattle.io/owner-namespace: kube-system 38 | prometheus.io/port: "9153" 39 | prometheus.io/scrape: "true" 40 | creationTimestamp: "2021-05-10T21:48:06Z" 41 | labels: 42 | k8s-app: kube-dns 43 | kubernetes.io/cluster-service: "true" 44 | kubernetes.io/name: CoreDNS 45 | objectset.rio.cattle.io/hash: bce283298811743a0386ab510f2f67ef74240c57 46 | managedFields: [] 47 | name: kube-dns 48 | namespace: kube-system 49 | resourceVersion: "217" 50 | uid: 33f3f7d1-53d7-423e-b648-1c3f8ce40548 51 | spec: 52 | clusterIP: 10.43.0.10 53 | clusterIPs: 54 | - 10.43.0.10 55 | ports: 56 | - name: dns 57 | port: 53 58 | protocol: UDP 59 | targetPort: 53 60 | - name: dns-tcp 61 | port: 53 62 | protocol: TCP 63 | targetPort: 53 64 | - name: metrics 65 | port: 9153 66 | protocol: TCP 67 | targetPort: 9153 68 | selector: 69 | k8s-app: kube-dns 70 | sessionAffinity: None 71 | type: ClusterIP 72 | status: 73 | loadBalancer: {} 74 | - apiVersion: v1 75 | kind: Service 76 | metadata: 77 | annotations: 78 | objectset.rio.cattle.io/applied: '{"apiVersion":"v1","kind":"Service","metadata":{"annotations":{"objectset.rio.cattle.io/id":"","objectset.rio.cattle.io/owner-gvk":"k3s.cattle.io/v1, 79 | Kind=Addon","objectset.rio.cattle.io/owner-name":"metrics-server-service","objectset.rio.cattle.io/owner-namespace":"kube-system"},"labels":{"kubernetes.io/cluster-service":"true","kubernetes.io/name":"Metrics-server","objectset.rio.cattle.io/hash":"a5d3bc601c871e123fa32b27f549b6ea770bcf4a"},"name":"metrics-server","namespace":"kube-system"},"spec":{"ports":[{"port":443,"protocol":"TCP","targetPort":443}],"selector":{"k8s-app":"metrics-server"}}}' 80 | objectset.rio.cattle.io/id: "" 81 | objectset.rio.cattle.io/owner-gvk: k3s.cattle.io/v1, Kind=Addon 82 | objectset.rio.cattle.io/owner-name: metrics-server-service 83 | objectset.rio.cattle.io/owner-namespace: kube-system 84 | creationTimestamp: "2021-05-10T21:48:06Z" 85 | labels: 86 | kubernetes.io/cluster-service: "true" 87 | kubernetes.io/name: Metrics-server 88 | objectset.rio.cattle.io/hash: a5d3bc601c871e123fa32b27f549b6ea770bcf4a 89 | managedFields: [] 90 | name: metrics-server 91 | namespace: kube-system 92 | resourceVersion: "240" 93 | uid: 35de3e90-1a1a-4994-b557-2e41926732d9 94 | spec: 95 | clusterIP: 10.43.93.186 96 | clusterIPs: 97 | - 10.43.93.186 98 | ports: 99 | - port: 443 100 | protocol: TCP 101 | targetPort: 443 102 | selector: 103 | k8s-app: metrics-server 104 | sessionAffinity: None 105 | type: ClusterIP 106 | status: 107 | loadBalancer: {} 108 | - apiVersion: v1 109 | kind: Service 110 | metadata: 111 | annotations: 112 | meta.helm.sh/release-name: traefik 113 | meta.helm.sh/release-namespace: kube-system 114 | creationTimestamp: "2021-05-10T21:48:35Z" 115 | labels: 116 | app: traefik 117 | app.kubernetes.io/managed-by: Helm 118 | chart: traefik-1.81.0 119 | heritage: Helm 120 | release: traefik 121 | managedFields: [] 122 | name: traefik-prometheus 123 | namespace: kube-system 124 | resourceVersion: "499" 125 | uid: cc5b10e4-9cd8-414a-ba87-c1fcb4336bd0 126 | spec: 127 | clusterIP: 10.43.9.106 128 | clusterIPs: 129 | - 10.43.9.106 130 | ports: 131 | - name: metrics 132 | port: 9100 133 | protocol: TCP 134 | targetPort: metrics 135 | selector: 136 | app: traefik 137 | release: traefik 138 | sessionAffinity: None 139 | type: ClusterIP 140 | status: 141 | loadBalancer: {} 142 | - apiVersion: v1 143 | kind: Service 144 | metadata: 145 | annotations: 146 | meta.helm.sh/release-name: traefik 147 | meta.helm.sh/release-namespace: kube-system 148 | creationTimestamp: "2021-05-10T21:48:35Z" 149 | labels: 150 | app: traefik 151 | app.kubernetes.io/managed-by: Helm 152 | chart: traefik-1.81.0 153 | heritage: Helm 154 | release: traefik 155 | managedFields: [] 156 | name: traefik 157 | namespace: kube-system 158 | resourceVersion: "557" 159 | uid: d108e7b6-a032-48a1-9782-ca0674c54aeb 160 | spec: 161 | clusterIP: 10.43.235.227 162 | clusterIPs: 163 | - 10.43.235.227 164 | externalTrafficPolicy: Cluster 165 | ports: 166 | - name: http 167 | nodePort: 30723 168 | port: 80 169 | protocol: TCP 170 | targetPort: http 171 | - name: https 172 | nodePort: 31954 173 | port: 443 174 | protocol: TCP 175 | targetPort: https 176 | selector: 177 | app: traefik 178 | release: traefik 179 | sessionAffinity: None 180 | type: LoadBalancer 181 | status: 182 | loadBalancer: 183 | ingress: 184 | - ip: 172.20.0.2 185 | kind: List 186 | metadata: 187 | resourceVersion: "" 188 | selfLink: "" 189 | -------------------------------------------------------------------------------- /test_data/test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: sample-network-policy-4 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | app: webapp 10 | app3: webapp3 11 | policyTypes: 12 | - Egress 13 | - Ingress 14 | ingress: 15 | - from: 16 | - podSelector: 17 | matchLabels: 18 | app: webapp 19 | ports: 20 | - port: 443 21 | - port: 80 22 | - from: 23 | - podSelector: {} 24 | - from: 25 | - ipBlock: 26 | cidr: 172.16.0.0/16 27 | ports: 28 | - port: 443 29 | - port: 80 30 | - from: 31 | - ipBlock: 32 | cidr: 0.0.0.0/0 33 | ports: 34 | - port: 443 35 | - port: 80 36 | egress: 37 | - to: 38 | - namespaceSelector: {} 39 | podSelector: 40 | matchLabels: 41 | k8s-app: kube-dns 42 | ports: 43 | - port: 53 44 | protocol: UDP 45 | - to: 46 | - podSelector: {} 47 | - to: 48 | - podSelector: 49 | matchLabels: 50 | app: webapp 51 | ports: 52 | - port: 443 53 | - port: 80 --------------------------------------------------------------------------------