├── .dockerignore ├── .flake8 ├── .gitattributes ├── .gitignore ├── .pre-commit-config.yaml ├── .travis.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.rst ├── app ├── .eslintignore ├── .eslintrc.yml ├── README.rst ├── package-lock.json ├── package.json ├── src │ ├── app.js │ ├── bars.js │ ├── cluster.js │ ├── config.js │ ├── filters.js │ ├── node.js │ ├── pod.js │ ├── selectbox.js │ ├── themes.js │ ├── tooltip.js │ ├── utils.js │ └── vendor │ │ ├── addWheelListener.js │ │ └── json_delta.js └── webpack.config.js ├── deploy ├── deployment.yaml ├── kustomization.yaml ├── rbac.yaml ├── redis-deployment.yaml ├── redis-service.yaml └── service.yaml ├── docs ├── .gitignore ├── Makefile ├── access-control.rst ├── conf.py ├── getting-started.rst ├── index.rst ├── multiple-clusters.rst ├── ui-options.rst └── user-guide.rst ├── examples └── unassigned-pod.yaml ├── kube-ops-view-logo.png ├── kube-ops-view-logo.svg ├── kube_ops_view ├── __init__.py ├── __main__.py ├── backoff.py ├── cluster_discovery.py ├── kubernetes.py ├── main.py ├── mock.py ├── oauth.py ├── static │ ├── favicon.ico │ └── sharetechmono.woff2 ├── stores.py ├── templates │ ├── index.html │ └── screen-tokens.html ├── update.py └── utils.py ├── poetry.lock ├── pyproject.toml ├── screenshot.png └── tests └── test_mock.py /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | !pyproject.toml 3 | !poetry.lock 4 | !kube_ops_view 5 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length=240 3 | ignore=E722,W503,E402,E203 4 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text eol=lf 2 | 3 | *.png binary 4 | *.jpg binary 5 | *.ico binary 6 | *.woff2 binary 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.swp 3 | *.pyc 4 | .idea/ 5 | **/node_modules/ 6 | kube_ops_view/static/build/ 7 | *-secret 8 | npm-debug.log* 9 | .tox/ 10 | *.egg* 11 | dist/ 12 | scm-source.json 13 | .cache/ 14 | .coverage 15 | .pytest_cache/ 16 | .mypy_cache 17 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | minimum_pre_commit_version: 1.21.0 3 | repos: 4 | # meta 5 | 6 | - repo: meta 7 | hooks: 8 | - id: check-hooks-apply 9 | - id: check-useless-excludes 10 | 11 | - repo: https://codeberg.org/hjacobs/kube-manifest-lint 12 | rev: 0.2.0 13 | hooks: 14 | - id: kube-manifest-lint 15 | exclude: kustomization.yaml 16 | 17 | # formatters 18 | 19 | - repo: https://github.com/asottile/reorder_python_imports 20 | rev: v2.2.0 21 | hooks: 22 | - id: reorder-python-imports 23 | 24 | - repo: https://github.com/ambv/black 25 | rev: 19.10b0 26 | hooks: 27 | - id: black 28 | 29 | - repo: https://github.com/asottile/pyupgrade 30 | rev: v2.3.0 31 | hooks: 32 | - id: pyupgrade 33 | stages: [push] 34 | 35 | # linters 36 | 37 | - repo: https://github.com/PyCQA/bandit 38 | rev: 1.6.2 39 | hooks: 40 | - id: bandit 41 | args: ["-x", "tests"] 42 | stages: [push] 43 | 44 | - repo: https://github.com/PyCQA/pydocstyle 45 | rev: 5.0.2 46 | hooks: 47 | - id: pydocstyle 48 | args: ["--ignore=D10,D21,D202"] 49 | 50 | - repo: local 51 | hooks: 52 | 53 | - id: safety 54 | name: safety 55 | entry: safety 56 | language: system 57 | pass_filenames: false 58 | args: ["check", "--bare"] 59 | stages: [push] 60 | 61 | - id: poetry 62 | name: poetry 63 | description: Validates the structure of the pyproject.toml file 64 | entry: poetry check 65 | language: system 66 | pass_filenames: false 67 | files: ^pyproject.toml$ 68 | stages: [push] 69 | 70 | - repo: https://github.com/adrienverge/yamllint 71 | rev: v1.23.0 72 | hooks: 73 | - id: yamllint 74 | args: ["--strict", "-d", "{rules: {line-length: {max: 180}}}"] 75 | 76 | - repo: https://github.com/pre-commit/mirrors-mypy 77 | rev: v0.770 78 | hooks: 79 | - id: mypy 80 | 81 | - repo: https://github.com/pryorda/dockerfilelint-precommit-hooks 82 | rev: v0.1.0 83 | hooks: 84 | - id: dockerfilelint 85 | stages: [commit] # required 86 | 87 | # miscellaneous 88 | 89 | - repo: https://github.com/pre-commit/pre-commit-hooks 90 | rev: v2.5.0 91 | hooks: 92 | - id: check-added-large-files 93 | - id: check-docstring-first 94 | - id: debug-statements 95 | - id: end-of-file-fixer 96 | - id: flake8 97 | additional_dependencies: ["flake8-bugbear"] 98 | - id: trailing-whitespace 99 | - id: check-ast 100 | - id: check-builtin-literals 101 | - id: detect-private-key 102 | - id: mixed-line-ending 103 | - id: name-tests-test 104 | args: ["--django"] 105 | 106 | - repo: https://github.com/pre-commit/pygrep-hooks 107 | rev: v1.5.1 108 | hooks: 109 | # - id: rst-backticks 110 | - id: python-use-type-annotations 111 | - id: python-no-log-warn 112 | - id: python-no-eval 113 | - id: python-check-mock-methods 114 | - id: python-check-blanket-noqa 115 | 116 | # commit-msg 117 | # http://jorisroovers.com/gitlint/#using-gitlint-through-pre-commit 118 | 119 | - repo: https://github.com/jorisroovers/gitlint 120 | rev: v0.13.1 121 | hooks: 122 | - id: gitlint 123 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: bionic 2 | sudo: yes 3 | language: python 4 | python: 5 | - "3.7" 6 | services: 7 | - docker 8 | install: 9 | - pip install poetry 10 | - nvm install 7.4 11 | - npm install -g eslint 12 | script: 13 | - make test docker 14 | after_success: 15 | - coveralls 16 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8-slim 2 | 3 | WORKDIR / 4 | 5 | RUN apt-get update && apt-get install --yes --no-install-recommends gcc && rm -rf /var/lib/apt/lists/* 6 | 7 | RUN pip3 install poetry 8 | 9 | COPY poetry.lock / 10 | COPY pyproject.toml / 11 | 12 | RUN poetry config virtualenvs.create false && \ 13 | poetry install --no-interaction --no-dev --no-ansi 14 | 15 | FROM python:3.8-slim 16 | 17 | WORKDIR / 18 | 19 | # copy pre-built packages to this image 20 | COPY --from=0 /usr/local/lib/python3.8/site-packages /usr/local/lib/python3.8/site-packages 21 | 22 | # now copy the actual code we will execute (poetry install above was just for dependencies) 23 | COPY kube_ops_view /kube_ops_view 24 | 25 | ARG VERSION=dev 26 | 27 | RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" /kube_ops_view/__init__.py 28 | 29 | ENTRYPOINT ["python3", "-m", "kube_ops_view"] 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: clean test appjs docker push mock 2 | 3 | IMAGE ?= hjacobs/kube-ops-view 4 | VERSION ?= $(shell git describe --tags --always --dirty) 5 | TAG ?= $(VERSION) 6 | TTYFLAGS = $(shell test -t 0 && echo "-it") 7 | 8 | default: docker 9 | 10 | .PHONY: install 11 | install: 12 | poetry install 13 | 14 | clean: 15 | rm -fr kube_ops_view/static/build 16 | 17 | .PHONY: lint 18 | lint: install 19 | poetry run pre-commit run --all-files 20 | 21 | test: lint install 22 | poetry run coverage run --source=kube_ops_view -m py.test -v 23 | poetry run coverage report 24 | 25 | version: 26 | sed -i "s/kube-ops-view:.*/kube-ops-view:$(VERSION)/" deploy/*.yaml 27 | 28 | appjs: 29 | docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app -e NPM_CONFIG_CACHE=/tmp node:14.0-slim npm install 30 | docker run $(TTYFLAGS) -u $$(id -u) -v $$(pwd):/workdir -w /workdir/app -e NPM_CONFIG_CACHE=/tmp node:14.0-slim npm run build 31 | 32 | docker: appjs 33 | docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" . 34 | @echo 'Docker image $(IMAGE):$(TAG) can now be used.' 35 | 36 | docker-arm: appjs 37 | docker run --rm --privileged multiarch/qemu-user-static --reset -p yes 38 | docker buildx create --name arm-node --append --use --platform "linux/arm" 39 | docker buildx build --build-arg "VERSION=$(VERSION)" --platform "linux/arm" -t $(IMAGE):$(TAG) --load . 40 | @echo 'Docker image $(IMAGE):$(TAG) can now be used.' 41 | 42 | push: docker 43 | docker push "$(IMAGE):$(TAG)" 44 | docker tag "$(IMAGE):$(TAG)" "$(IMAGE):latest" 45 | docker push "$(IMAGE):latest" 46 | 47 | mock: 48 | docker run $(TTYFLAGS) -p 8080:8080 "$(IMAGE):$(TAG)" --mock \ 49 | --node-link-url-template "https://kube-web-view.example.org/clusters/{cluster}/nodes/{name}" \ 50 | --pod-link-url-template "https://kube-web-view.example.org/clusters/{cluster}/namespaces/{namespace}/pods/{name}" 51 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Moved to https://codeberg.org/hjacobs/kube-ops-view 2 | -------------------------------------------------------------------------------- /app/.eslintignore: -------------------------------------------------------------------------------- 1 | src/vendor/*.js 2 | -------------------------------------------------------------------------------- /app/.eslintrc.yml: -------------------------------------------------------------------------------- 1 | parserOptions: 2 | sourceType: module 3 | env: 4 | browser: true 5 | node: true 6 | es6: true 7 | extends: 'eslint:recommended' 8 | rules: 9 | indent: 10 | - error 11 | - 4 12 | linebreak-style: 13 | - error 14 | - unix 15 | quotes: 16 | - error 17 | - single 18 | prefer-const: 19 | - error 20 | no-redeclare: 21 | - error 22 | no-unused-vars: 23 | - warn 24 | - argsIgnorePattern: "^_" 25 | semi: 26 | - error 27 | - never 28 | -------------------------------------------------------------------------------- /app/README.rst: -------------------------------------------------------------------------------- 1 | This directory contains the EcmaScript frontend code of Kubernetes Operational View and is only needed during build time. 2 | 3 | The JavaScript application bundle (webpack) will be generated to ``kube_ops_view/static/build/app*.js`` by running: 4 | 5 | .. code-block:: bash 6 | 7 | $ npm install 8 | $ npm run build 9 | 10 | Frontend development is supported by watching the source code and continuously recompiling the webpack: 11 | 12 | .. code-block:: bash 13 | 14 | $ npm start 15 | 16 | Windows uses slightly different scripts. This will build the JavaScript app in Windows: 17 | 18 | .. code-block:: powershell 19 | 20 | $ npm install 21 | $ npm run buildwin 22 | 23 | This will start the code watch and rebuild script in Windows: 24 | 25 | .. code-block:: powershell 26 | 27 | $ npm run startwin 28 | -------------------------------------------------------------------------------- /app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "kube-ops-view", 3 | "version": "1.0.0", 4 | "description": "=========================== Kubernetes Operational View ===========================", 5 | "main": "src/app.js", 6 | "config": { 7 | "buildDir": "../kube_ops_view/static/build" 8 | }, 9 | "scripts": { 10 | "prestart": "npm install", 11 | "start": "NODE_ENV=development webpack --watch", 12 | "startwin": "SET NODE_ENV=development & webpack --watch", 13 | "webpack": "webpack -p --config ./webpack.config.js", 14 | "build": "NODE_ENV=production npm run webpack", 15 | "buildwin": "SET NODE_ENV=production & npm run webpack", 16 | "prewebpack": "npm run clean", 17 | "lint": "eslint ./src/**/*.js", 18 | "clean": "rimraf $npm_package_config_buildDir && mkdir $npm_package_config_buildDir" 19 | }, 20 | "repository": { 21 | "type": "git", 22 | "url": "git+https://github.com/hjacobs/kube-ops-view.git" 23 | }, 24 | "author": "", 25 | "license": "ISC", 26 | "bugs": { 27 | "url": "https://github.com/hjacobs/kube-ops-view/issues" 28 | }, 29 | "homepage": "https://github.com/hjacobs/kube-ops-view#readme", 30 | "dependencies": { 31 | "pixi.js": "^4.8.5", 32 | "@pixi/filter-crt": "^2.7.0", 33 | "babel-runtime": "^6.26.0", 34 | "babel-polyfill": "^6.26.0" 35 | }, 36 | "devDependencies": { 37 | "babel-core": "^6.26.2", 38 | "babel-loader": "^7.1.4", 39 | "babel-plugin-transform-runtime": "^6.23.0", 40 | "babel-preset-env": "^1.7.0", 41 | "babel-preset-es2015": "^6.24.1", 42 | "brfs": "^1.4.3", 43 | "eslint": "^4.19.1", 44 | "eslint-loader": "^2.0.0", 45 | "rimraf": "^2.6.2", 46 | "transform-loader": "^0.2.4", 47 | "webpack": "^4.43.0", 48 | "webpack-cli": "^3.1.2" 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /app/src/app.js: -------------------------------------------------------------------------------- 1 | import Tooltip from './tooltip.js' 2 | import Cluster from './cluster.js' 3 | import {Pod, ALL_PODS, ALL_SORTS, ALL_STATUS_FILTERS} from './pod.js' 4 | import SelectBox from './selectbox' 5 | import {Theme, ALL_THEMES} from './themes.js' 6 | import {DESATURATION_FILTER} from './filters.js' 7 | import {JSON_delta} from './vendor/json_delta.js' 8 | import Config from './config.js' 9 | 10 | const PIXI = require('pixi.js') 11 | 12 | const addWheelListener = require('./vendor/addWheelListener') 13 | 14 | 15 | export default class App { 16 | 17 | constructor(config) { 18 | const params = this.parseLocationHash() 19 | 20 | this.config = Config.fromParams(params) 21 | this.config.nodeLinkUrlTemplate = config['node_link_url_template'] 22 | this.config.podLinkUrlTemplate = config['pod_link_url_template'] 23 | this.config.route_prefix = config['route_prefix'] 24 | 25 | this.filterString = (params.get('q') && decodeURIComponent(params.get('q'))) || '' 26 | this.selectedClusters = new Set((params.get('clusters') || '').split(',').filter(x => x)) 27 | this.seenPods = new Set() 28 | 29 | // check localStorage, use the first function as a default option 30 | const indexSorterFn = ALL_SORTS.findIndex(obj => obj.text === (localStorage.getItem('sorterFn') || ALL_SORTS[0].text)) 31 | this.sorterFn = ALL_SORTS[indexSorterFn].value 32 | 33 | // filterFn 34 | const indexStatusFilterFn = ALL_STATUS_FILTERS.findIndex(obj => obj.text === (localStorage.getItem('statusFilterFn') || ALL_STATUS_FILTERS[0].text)) 35 | this.statusFilterFn = ALL_STATUS_FILTERS[indexStatusFilterFn].value 36 | 37 | this.theme = Theme.get(localStorage.getItem('theme')) 38 | this.eventSource = null 39 | this.connectTime = null 40 | this.keepAliveTimer = null 41 | this.clusters = new Map() 42 | this.clusterStatuses = new Map() 43 | this.viewContainerTargetPosition = new PIXI.Point() 44 | this.bootstrapping = true 45 | 46 | this.startDrawingPodsAt = 24 47 | this.defaultPodsPerRow = 6 48 | this.defaultWidthOfNodePx = 105 49 | this.defaultHeightOfNodePx = 115 50 | this.sizeOfPodPx = 13 51 | this.heightOfTopHandlePx = 15 52 | } 53 | 54 | parseLocationHash() { 55 | // hash startswith # 56 | const hash = document.location.hash.substring(1) 57 | const params = new Map() 58 | for (const pair of hash.split(';')) { 59 | const keyValue = pair.split('=', 2) 60 | if (keyValue.length == 2) { 61 | params.set(keyValue[0], keyValue[1]) 62 | } 63 | } 64 | return params 65 | } 66 | 67 | changeLocationHash(key, value) { 68 | const params = this.parseLocationHash() 69 | params.set(key, value) 70 | const pairs = [] 71 | for (const [key, value] of params) { 72 | if (value) { 73 | pairs.push(key + '=' + encodeURIComponent(value)) 74 | } 75 | } 76 | 77 | document.location.hash = '#' + pairs.sort().join(';') 78 | } 79 | 80 | nameMatches(pod, searchString) { 81 | const name = pod.name 82 | return name && name.includes(searchString) 83 | } 84 | 85 | labelMatches(pod, name, value) { 86 | const labels = pod.labels 87 | return labels && labels[name] === value 88 | } 89 | 90 | namespaceMatches(pod, value) { 91 | return pod.namespace === value 92 | } 93 | 94 | createMatchesFunctionForQuery(query) { 95 | if (query.startsWith('namespace=')) { 96 | // filter by namespace 97 | const value = query.split('namespace=', 2)[1] 98 | return pod => this.namespaceMatches(pod, value) 99 | } else if (query.includes('=')) { 100 | // filter by label 101 | const [label, value] = query.split('=', 2) 102 | return pod => this.labelMatches(pod, label, value) 103 | } 104 | else { 105 | // filter by name 106 | return pod => this.nameMatches(pod, query) 107 | } 108 | } 109 | 110 | 111 | filter() { 112 | const searchString = this.filterString 113 | if (this.searchText) { 114 | // this.searchText might be undefined (dashboard mode) 115 | this.searchText.text = searchString 116 | } 117 | this.changeLocationHash('q', searchString) 118 | const elementDisplayFilter = DESATURATION_FILTER 119 | const filterableElements = [] 120 | const matchesQuery = this.createMatchesFunctionForQuery(searchString) 121 | for (const cluster of this.viewContainer.children) { 122 | for (const node of cluster.children) { 123 | if (node.pod) { // node is actually unassigned pod 124 | filterableElements.push(node) 125 | } 126 | for (const pod of node.children) { 127 | if (pod.pod) { 128 | filterableElements.push(pod) 129 | } 130 | } 131 | } 132 | } 133 | 134 | filterableElements.forEach(value => { 135 | if (!matchesQuery(value.pod)) { 136 | value.filters = [elementDisplayFilter] 137 | } else { 138 | // TODO: pod might have other filters set.. 139 | value.filters = [] 140 | } 141 | }) 142 | 143 | filterableElements.forEach(value => { 144 | if (!this.statusFilterFn(value.pod)) { 145 | value.filters = [elementDisplayFilter] 146 | } 147 | }) 148 | } 149 | 150 | initialize() { 151 | App.current = this 152 | 153 | // create the renderer 154 | const noWebGL = this.config.renderer === 'canvas' 155 | const renderer = PIXI.autoDetectRenderer(256, 256, {resolution: 2}, noWebGL) 156 | renderer.view.style.display = 'block' 157 | renderer.autoResize = true 158 | renderer.resize(window.innerWidth, window.innerHeight) 159 | 160 | window.onresize = function () { 161 | renderer.resize(window.innerWidth, window.innerHeight) 162 | } 163 | 164 | //Add the canvas to the HTML document 165 | document.body.appendChild(renderer.view) 166 | this.renderer = renderer 167 | 168 | //Create a container object called the `stage` 169 | this.stage = new PIXI.Container() 170 | 171 | this.registerEventListeners() 172 | setInterval(this.pruneUnavailableClusters.bind(this), 5 * 1000) 173 | 174 | if (this.config.reloadIntervalSeconds) { 175 | setTimeout(function () { 176 | location.reload(false) 177 | }, this.config.reloadIntervalSeconds * 1000) 178 | } 179 | } 180 | 181 | registerEventListeners() { 182 | function downHandler(event) { 183 | const panAmount = 20 184 | if (event.key == 'ArrowLeft') { 185 | this.viewContainerTargetPosition.x += panAmount 186 | } 187 | else if (event.key == 'ArrowRight') { 188 | this.viewContainerTargetPosition.x -= panAmount 189 | } 190 | if (event.key == 'ArrowUp') { 191 | this.viewContainerTargetPosition.y += panAmount 192 | } 193 | else if (event.key == 'ArrowDown') { 194 | this.viewContainerTargetPosition.y -= panAmount 195 | } 196 | if (event.key == 'PageUp') { 197 | this.viewContainerTargetPosition.y += window.innerHeight 198 | } 199 | else if (event.key == 'PageDown') { 200 | this.viewContainerTargetPosition.y -= window.innerHeight 201 | } 202 | else if (event.key == 'Home') { 203 | this.viewContainerTargetPosition.x = 20 204 | this.viewContainerTargetPosition.y = this.config.dashboardMode ? 20 : 40 205 | } 206 | else if (event.key && event.key.length == 1 && !event.ctrlKey && !event.metaKey) { 207 | this.filterString += event.key 208 | this.filter() 209 | event.preventDefault() 210 | } 211 | else if (event.key == 'Backspace') { 212 | this.filterString = this.filterString.slice(0, Math.max(0, this.filterString.length - 1)) 213 | this.filter() 214 | event.preventDefault() 215 | } 216 | } 217 | 218 | var isDragging = false, 219 | prevX, prevY 220 | 221 | function mouseDownHandler(event) { 222 | if (event.button == 0 || event.button == 1) { 223 | prevX = event.clientX 224 | prevY = event.clientY 225 | isDragging = true 226 | this.renderer.view.style.cursor = 'move' 227 | } 228 | } 229 | 230 | function mouseMoveHandler(event) { 231 | if (!isDragging) { 232 | return 233 | } 234 | var dx = event.clientX - prevX 235 | var dy = event.clientY - prevY 236 | 237 | this.viewContainer.x += dx 238 | this.viewContainer.y += dy 239 | // stop any current move animation 240 | this.viewContainerTargetPosition.x = this.viewContainer.x 241 | this.viewContainerTargetPosition.y = this.viewContainer.y 242 | prevX = event.clientX 243 | prevY = event.clientY 244 | } 245 | 246 | function mouseUpHandler(_event) { 247 | isDragging = false 248 | this.renderer.view.style.cursor = 'default' 249 | } 250 | 251 | function touchStartHandler(event) { 252 | if (event.touches.length == 1) { 253 | const touch = event.touches[0] 254 | prevX = touch.clientX 255 | prevY = touch.clientY 256 | isDragging = true 257 | } 258 | } 259 | 260 | function touchMoveHandler(event) { 261 | if (!isDragging) { 262 | return 263 | } 264 | if (event.touches.length == 1) { 265 | const touch = event.touches[0] 266 | var dx = touch.clientX - prevX 267 | var dy = touch.clientY - prevY 268 | 269 | this.viewContainer.x += dx 270 | this.viewContainer.y += dy 271 | // stop any current move animation 272 | this.viewContainerTargetPosition.x = this.viewContainer.x 273 | this.viewContainerTargetPosition.y = this.viewContainer.y 274 | prevX = touch.clientX 275 | prevY = touch.clientY 276 | } 277 | } 278 | 279 | function touchEndHandler(_event) { 280 | isDragging = false 281 | } 282 | 283 | addEventListener('keydown', downHandler.bind(this), false) 284 | addEventListener('mousedown', mouseDownHandler.bind(this), false) 285 | addEventListener('mousemove', mouseMoveHandler.bind(this), false) 286 | addEventListener('mouseup', mouseUpHandler.bind(this), false) 287 | addEventListener('touchstart', touchStartHandler.bind(this), false) 288 | addEventListener('touchmove', touchMoveHandler.bind(this), false) 289 | addEventListener('touchend', touchEndHandler.bind(this), false) 290 | 291 | const that = this 292 | const interactionObj = new PIXI.interaction.InteractionData() 293 | 294 | function getLocalCoordinates(x, y) { 295 | return interactionObj.getLocalPosition(that.viewContainer, undefined, {x: x, y: y}) 296 | } 297 | 298 | const minScale = 1 / 32 299 | const maxScale = 32 300 | 301 | function zoom(x, y, isZoomIn) { 302 | const direction = isZoomIn ? 1 : -1 303 | const factor = (1 + direction * 0.1) 304 | const newScale = Math.min(Math.max(that.viewContainer.scale.x * factor, minScale), maxScale) 305 | that.viewContainer.scale.set(newScale) 306 | 307 | // zoom around one point on ViewContainer 308 | const beforeTransform = getLocalCoordinates(x, y) 309 | that.viewContainer.updateTransform() 310 | const afterTransform = getLocalCoordinates(x, y) 311 | 312 | that.viewContainer.x += (afterTransform.x - beforeTransform.x) * newScale 313 | that.viewContainer.y += (afterTransform.y - beforeTransform.y) * newScale 314 | 315 | // stop any current move animation 316 | that.viewContainerTargetPosition.x = that.viewContainer.x 317 | that.viewContainerTargetPosition.y = that.viewContainer.y 318 | } 319 | 320 | addWheelListener(this.renderer.view, function (e) { 321 | zoom(e.clientX, e.clientY, e.deltaY < 0) 322 | }) 323 | } 324 | 325 | drawMenuBar() { 326 | const menuBar = new PIXI.Graphics() 327 | menuBar.beginFill(this.theme.secondaryColor, 1) 328 | menuBar.drawRect(0, 0, this.renderer.width, 28) 329 | menuBar.lineStyle(2, this.theme.secondaryColor, 1) 330 | menuBar.moveTo(0, 28) 331 | menuBar.lineTo(this.renderer.width, 28) 332 | menuBar.lineStyle(1, this.theme.primaryColor, 1) 333 | menuBar.drawRect(20, 3, 200, 22) 334 | this.stage.addChild(menuBar) 335 | 336 | const searchPrompt = new PIXI.Text('>', { 337 | fontFamily: 'ShareTechMono', 338 | fontSize: 14, 339 | fill: this.theme.primaryColor 340 | }) 341 | searchPrompt.x = 26 342 | searchPrompt.y = 8 343 | PIXI.ticker.shared.add(function (_) { 344 | var v = Math.sin((PIXI.ticker.shared.lastTime % 2000) / 2000. * Math.PI) 345 | searchPrompt.alpha = v 346 | }) 347 | this.stage.addChild(searchPrompt) 348 | 349 | const searchText = new PIXI.Text('', {fontFamily: 'ShareTechMono', fontSize: 14, fill: this.theme.primaryColor}) 350 | searchText.x = 40 351 | searchText.y = 8 352 | this.stage.addChild(searchText) 353 | 354 | const app = this 355 | const selectBox = new SelectBox(ALL_SORTS, this.sorterFn, function (text, value) { 356 | app.changeSorting(text, value) 357 | }) 358 | selectBox.x = 265 359 | selectBox.y = 3 360 | menuBar.addChild(selectBox.draw()) 361 | 362 | const themeOptions = Object.keys(ALL_THEMES).sort().map(name => { 363 | return {text: name.toUpperCase(), value: name} 364 | }) 365 | const themeSelector = new SelectBox(themeOptions, this.theme.name, function (text, value) { 366 | app.switchTheme(text, value) 367 | }) 368 | themeSelector.x = 420 369 | themeSelector.y = 3 370 | menuBar.addChild(themeSelector.draw()) 371 | 372 | 373 | const statusFilterBox = new SelectBox(ALL_STATUS_FILTERS, this.statusFilterFn, function (text, value) { 374 | app.changeStatusFilter(text, value) 375 | }) 376 | statusFilterBox.x = 585 377 | statusFilterBox.y = 3 378 | menuBar.addChild(statusFilterBox.draw()) 379 | 380 | this.searchText = searchText 381 | } 382 | 383 | draw() { 384 | this.stage.removeChildren() 385 | this.theme.apply(this.stage) 386 | 387 | const viewContainer = new PIXI.Container() 388 | viewContainer.scale.set(this.config.initialScale) 389 | viewContainer.x = 20 390 | viewContainer.y = this.config.dashboardMode ? 20 : 40 391 | this.viewContainerTargetPosition.x = viewContainer.x 392 | this.viewContainerTargetPosition.y = viewContainer.y 393 | this.stage.addChild(viewContainer) 394 | 395 | if (!this.config.dashboardMode) { 396 | this.drawMenuBar() 397 | } 398 | 399 | const tooltip = this.tooltip || new Tooltip() 400 | tooltip.draw() 401 | this.stage.addChild(tooltip) 402 | 403 | this.viewContainer = viewContainer 404 | this.tooltip = tooltip 405 | } 406 | 407 | animatePodCreation(originalPod, globalPosition) { 408 | const pod = new Pod(originalPod.pod, null, this.tooltip) 409 | pod.draw() 410 | pod.blendMode = PIXI.BLEND_MODES.ADD 411 | pod.interactive = false 412 | const targetPosition = globalPosition 413 | const angle = Math.random() * Math.PI * 2 414 | const cos = Math.cos(angle) 415 | const sin = Math.sin(angle) 416 | const distance = Math.max(200, Math.random() * Math.min(this.renderer.width, this.renderer.height)) 417 | // blur filter looks cool, but has huge performance penalty 418 | // const blur = new PIXI.filters.BlurFilter(20, 2) 419 | // pod.filters = [blur] 420 | pod.pivot.x = pod.width / 2 421 | pod.pivot.y = pod.height / 2 422 | pod.alpha = 0 423 | pod._progress = 0 424 | originalPod.visible = false 425 | const that = this 426 | const tick = function (t) { 427 | // progress goes from 0 to 1 428 | const progress = Math.min(1, pod._progress + (0.01 * t)) 429 | const scale = 1 + ((1 - progress) * 140) 430 | pod._progress = progress 431 | pod.x = targetPosition.x + (distance * cos * (1 - progress)) 432 | pod.y = targetPosition.y + (distance * sin * (1 - progress)) 433 | pod.alpha = progress 434 | pod.rotation = progress * progress * Math.PI * 2 435 | // blur.blur = (1 - alpha) * 20 436 | pod.scale.set(scale) 437 | if (progress >= 1) { 438 | PIXI.ticker.shared.remove(tick) 439 | that.stage.removeChild(pod) 440 | pod.destroy() 441 | originalPod.visible = true 442 | } 443 | } 444 | PIXI.ticker.shared.add(tick) 445 | this.stage.addChild(pod) 446 | } 447 | 448 | animatePodDeletion(originalPod, globalPosition) { 449 | const pod = new Pod(originalPod.pod, null, this.tooltip) 450 | pod.draw() 451 | pod.blendMode = PIXI.BLEND_MODES.ADD 452 | const globalCenter = new PIXI.Point(globalPosition.x + pod.width / 2, globalPosition.y + pod.height / 2) 453 | const blur = new PIXI.filters.BlurFilter(4) 454 | pod.filters = [blur] 455 | pod.position = globalPosition.clone() 456 | pod.alpha = 1 457 | pod._progress = 1 458 | originalPod.destroy() 459 | const that = this 460 | const tick = function (t) { 461 | // progress goes from 1 to 0 462 | const progress = Math.max(0, pod._progress - (0.02 * t)) 463 | const scale = 1 + ((1 - progress) * 8) 464 | pod._progress = progress 465 | pod.alpha = progress 466 | pod.scale.set(scale) 467 | pod.position.set(globalCenter.x - pod.width / 2, globalCenter.y - pod.height / 2) 468 | 469 | if (progress <= 0) { 470 | PIXI.ticker.shared.remove(tick) 471 | that.stage.removeChild(pod) 472 | pod.destroy() 473 | } 474 | } 475 | PIXI.ticker.shared.add(tick) 476 | this.stage.addChild(pod) 477 | } 478 | 479 | update() { 480 | // make sure we create a copy (this.clusters might get modified) 481 | const clusters = Array.from(this.clusters.entries()).sort().map(idCluster => idCluster[1]) 482 | const that = this 483 | let changes = 0 484 | const podKeys = new Set() 485 | for (const cluster of clusters) { 486 | for (const node of Object.values(cluster.nodes)) { 487 | for (const pod of Object.values(node.pods)) { 488 | podKeys.add(cluster.id + '/' + pod.namespace + '/' + pod.name) 489 | } 490 | } 491 | for (const pod of Object.values(cluster.unassigned_pods)) { 492 | podKeys.add(cluster.id + '/' + pod.namespace + '/' + pod.name) 493 | } 494 | } 495 | for (const key of Object.keys(ALL_PODS)) { 496 | const pod = ALL_PODS[key] 497 | if (!podKeys.has(key)) { 498 | // pod was deleted 499 | delete ALL_PODS[key] 500 | this.seenPods.delete(key) 501 | if (changes < 10) { 502 | // NOTE: we need to do this BEFORE removeChildren() 503 | // to get correct global coordinates 504 | const globalPos = pod.toGlobal({x: 0, y: 0}) 505 | window.setTimeout(function () { 506 | that.animatePodDeletion(pod, globalPos) 507 | }, 100 * changes) 508 | } else { 509 | pod.destroy() 510 | } 511 | changes++ 512 | } 513 | } 514 | const clusterComponentById = {} 515 | for (const component of this.viewContainer.children) { 516 | clusterComponentById[component.cluster.id] = component 517 | } 518 | let y = 0 519 | const clusterIds = new Set() 520 | for (const cluster of clusters) { 521 | if (!this.selectedClusters.size || this.selectedClusters.has(cluster.id)) { 522 | clusterIds.add(cluster.id) 523 | const status = this.clusterStatuses.get(cluster.id) 524 | let clusterBox = clusterComponentById[cluster.id] 525 | if (!clusterBox) { 526 | clusterBox = new Cluster(cluster, status, this.tooltip, this.config) 527 | this.viewContainer.addChild(clusterBox) 528 | } else { 529 | clusterBox.cluster = cluster 530 | clusterBox.status = status 531 | } 532 | clusterBox.draw() 533 | clusterBox.x = 0 534 | clusterBox.y = y 535 | y += clusterBox.height + 10 536 | } 537 | } 538 | for (const component of this.viewContainer.children) { 539 | if (!clusterIds.has(component.cluster.id)) { 540 | this.viewContainer.removeChild(component) 541 | } 542 | } 543 | this.filter() 544 | 545 | for (const key of Object.keys(ALL_PODS)) { 546 | const pod = ALL_PODS[key] 547 | if (!this.seenPods.has(key)) { 548 | // pod was created 549 | this.seenPods.add(key) 550 | if (!this.bootstrapping && changes < 10) { 551 | const globalPos = pod.toGlobal({x: 0, y: 0}) 552 | window.setTimeout(function () { 553 | that.animatePodCreation(pod, globalPos) 554 | }, 100 * changes) 555 | } 556 | changes++ 557 | } 558 | } 559 | } 560 | 561 | tick(time) { 562 | const deltaX = this.viewContainerTargetPosition.x - this.viewContainer.x 563 | const deltaY = this.viewContainerTargetPosition.y - this.viewContainer.y 564 | if (Math.abs(deltaX) < 20 && Math.abs(deltaY) < 20) { 565 | this.viewContainer.position.x = this.viewContainerTargetPosition.x 566 | this.viewContainer.position.y = this.viewContainerTargetPosition.y 567 | } else { 568 | if (Math.abs(deltaX) > time) { 569 | this.viewContainer.x += time * Math.sign(deltaX) * Math.max(10, Math.abs(deltaX) / 10) 570 | } 571 | if (Math.abs(deltaY) > time) { 572 | this.viewContainer.y += time * Math.sign(deltaY) * Math.max(10, Math.abs(deltaY) / 10) 573 | } 574 | } 575 | this.renderer.render(this.stage) 576 | } 577 | 578 | changeSorting(text, newSortFunction) { 579 | this.sorterFn = newSortFunction 580 | localStorage.setItem('sorterFn', text) 581 | this.update() 582 | } 583 | 584 | changeStatusFilter(text, newStatusFilterFunction) { 585 | this.statusFilterFn = newStatusFilterFunction 586 | localStorage.setItem('statusFilterFn', text) 587 | this.update() 588 | } 589 | 590 | switchTheme(text, newTheme) { 591 | this.theme = Theme.get(newTheme) 592 | this.draw() 593 | this.update() 594 | localStorage.setItem('theme', newTheme) 595 | } 596 | 597 | toggleCluster(clusterId) { 598 | if (this.selectedClusters.has(clusterId)) { 599 | this.selectedClusters.delete(clusterId) 600 | } else { 601 | this.selectedClusters.add(clusterId) 602 | } 603 | this.changeLocationHash('clusters', Array.from(this.selectedClusters).join(',')) 604 | // make sure we are updating our EventSource filter 605 | this.connect() 606 | this.update() 607 | } 608 | 609 | keepAlive() { 610 | if (this.keepAliveTimer != null) { 611 | clearTimeout(this.keepAliveTimer) 612 | } 613 | this.keepAliveTimer = setTimeout(this.connect.bind(this), this.config.keepAliveSeconds * 1000) 614 | if (this.connectTime != null) { 615 | const now = Date.now() 616 | if (now - this.connectTime > this.config.maxConnectionLifetimeSeconds * 1000) { 617 | // maximum connection lifetime exceeded => reconnect 618 | this.connect() 619 | } 620 | } 621 | } 622 | 623 | pruneUnavailableClusters() { 624 | let updateNeeded = false 625 | const nowSeconds = Date.now() / 1000 626 | for (const [clusterId, statusObj] of this.clusterStatuses.entries()) { 627 | const lastQueryTime = statusObj.last_query_time || 0 628 | if (lastQueryTime < nowSeconds - this.config.maxDataAgeSeconds) { 629 | this.clusters.delete(clusterId) 630 | updateNeeded = true 631 | } else if (lastQueryTime < nowSeconds - 20) { 632 | updateNeeded = true 633 | } 634 | } 635 | if (updateNeeded) { 636 | this.update() 637 | } 638 | } 639 | 640 | disconnect() { 641 | if (this.eventSource != null) { 642 | this.eventSource.close() 643 | this.eventSource = null 644 | this.connectTime = null 645 | } 646 | } 647 | 648 | refreshLastQueryTime(clusterId) { 649 | let statusObj = this.clusterStatuses.get(clusterId) 650 | if (!statusObj) { 651 | statusObj = {} 652 | } 653 | statusObj.last_query_time = Date.now() / 1000 654 | this.clusterStatuses.set(clusterId, statusObj) 655 | } 656 | 657 | connect() { 658 | // first close the old connection 659 | this.disconnect() 660 | const that = this 661 | // NOTE: path must be relative to work with kubectl proxy out of the box 662 | let url = this.config.route_prefix + (this.config.route_prefix === '/' ? 'events' : '/events') 663 | const clusterIds = Array.from(this.selectedClusters).join(',') 664 | if (clusterIds) { 665 | url += '?cluster_ids=' + clusterIds 666 | } 667 | const eventSource = this.eventSource = new EventSource(url, {credentials: 'include'}) 668 | this.keepAlive() 669 | eventSource.onerror = function (_event) { 670 | that._errors++ 671 | if (that._errors <= 1) { 672 | // immediately reconnect on first error 673 | that.connect() 674 | } else { 675 | // rely on keep-alive timer to reconnect 676 | that.disconnect() 677 | } 678 | } 679 | eventSource.addEventListener('clusterupdate', function (event) { 680 | that._errors = 0 681 | that.keepAlive() 682 | const cluster = JSON.parse(event.data) 683 | const status = that.clusterStatuses.get(cluster.id) 684 | const nowSeconds = Date.now() / 1000 685 | if (status && status.last_query_time < nowSeconds - that.config.maxDataAgeSeconds) { 686 | // outdated data => ignore 687 | } else { 688 | that.clusters.set(cluster.id, cluster) 689 | that.update() 690 | } 691 | }) 692 | eventSource.addEventListener('clusterdelta', function (event) { 693 | that._errors = 0 694 | that.keepAlive() 695 | const data = JSON.parse(event.data) 696 | // we received some delta => we know that the cluster query succeeded! 697 | that.refreshLastQueryTime(data.cluster_id) 698 | let cluster = that.clusters.get(data.cluster_id) 699 | if (cluster && data.delta) { 700 | // deep copy cluster object (patch function mutates inplace!) 701 | cluster = JSON.parse(JSON.stringify(cluster)) 702 | cluster = JSON_delta.patch(cluster, data.delta) 703 | that.clusters.set(cluster.id, cluster) 704 | that.update() 705 | } 706 | }) 707 | eventSource.addEventListener('clusterstatus', function (event) { 708 | that._errors = 0 709 | that.keepAlive() 710 | const data = JSON.parse(event.data) 711 | that.clusterStatuses.set(data.cluster_id, data.status) 712 | }) 713 | eventSource.addEventListener('bootstrapend', function (_event) { 714 | that._errors = 0 715 | that.keepAlive() 716 | that.bootstrapping = false 717 | }) 718 | this.connectTime = Date.now() 719 | } 720 | 721 | run() { 722 | this.initialize() 723 | this.draw() 724 | this.connect() 725 | 726 | PIXI.ticker.shared.add(this.tick, this) 727 | } 728 | } 729 | 730 | module.exports = App 731 | -------------------------------------------------------------------------------- /app/src/bars.js: -------------------------------------------------------------------------------- 1 | import {FACTORS, getBarColor} from './utils' 2 | import App from './app' 3 | 4 | const PIXI = require('pixi.js') 5 | 6 | export default class Bars extends PIXI.Graphics { 7 | constructor(entity, resources, tooltip) { 8 | super() 9 | this.entity = entity 10 | this.resources = resources 11 | this.tooltip = tooltip 12 | } 13 | 14 | draw() { 15 | const bars = this 16 | 17 | const barHeightPx = bars.entity.heightOfNodePx - (App.current.heightOfTopHandlePx + 5 + 3) 18 | const heightOfNodeWoPaddingPx = bars.entity.heightOfNodePx - 5 19 | 20 | bars.beginFill(App.current.theme.primaryColor, 0.1) 21 | bars.drawRect(5, heightOfNodeWoPaddingPx - barHeightPx, 15, barHeightPx) 22 | bars.endFill() 23 | 24 | // CPU 25 | const cpuHeight = barHeightPx / bars.resources.cpu.capacity 26 | bars.interactive = true 27 | bars.lineStyle(0, 0xaaffaa, 1) 28 | bars.beginFill(getBarColor(bars.resources.cpu.requested, bars.resources.cpu.capacity - bars.resources.cpu.reserved), 1) 29 | bars.drawRect(5, heightOfNodeWoPaddingPx - (bars.resources.cpu.requested + bars.resources.cpu.reserved) * cpuHeight, 2.5, (bars.resources.cpu.requested + bars.resources.cpu.reserved) * cpuHeight) 30 | bars.beginFill(getBarColor(bars.resources.cpu.used, bars.resources.cpu.capacity), 1) 31 | bars.drawRect(7.5, heightOfNodeWoPaddingPx - bars.resources.cpu.used * cpuHeight, 2.5, bars.resources.cpu.used * cpuHeight) 32 | bars.endFill() 33 | bars.lineStyle(1, App.current.theme.primaryColor, 1) 34 | bars.drawRect(5, heightOfNodeWoPaddingPx - bars.resources.cpu.reserved * cpuHeight, 5, bars.resources.cpu.reserved * cpuHeight) 35 | 36 | // Memory 37 | const scale = bars.resources.memory.capacity / barHeightPx 38 | bars.lineStyle(0, 0xaaffaa, 1) 39 | bars.beginFill(getBarColor(bars.resources.memory.requested, bars.resources.memory.capacity - bars.resources.memory.reserved), 1) 40 | bars.drawRect(14, heightOfNodeWoPaddingPx - (bars.resources.memory.requested + bars.resources.memory.reserved) / scale, 2.5, (bars.resources.memory.requested + bars.resources.memory.reserved) / scale) 41 | bars.beginFill(getBarColor(bars.resources.memory.used, bars.resources.memory.capacity), 1) 42 | bars.drawRect(16.5, heightOfNodeWoPaddingPx - bars.resources.memory.used / scale, 2.5, bars.resources.memory.used / scale) 43 | bars.endFill() 44 | bars.lineStyle(1, App.current.theme.primaryColor, 1) 45 | bars.drawRect(14, heightOfNodeWoPaddingPx - bars.resources.memory.reserved / scale, 5, bars.resources.memory.reserved / scale) 46 | 47 | bars.lineStyle(1, App.current.theme.primaryColor, 1) 48 | for (var i = 0; i < bars.resources.cpu.capacity; i++) { 49 | bars.drawRect(5, heightOfNodeWoPaddingPx - (i + 1) * cpuHeight, 5, cpuHeight) 50 | } 51 | 52 | bars.drawRect(14, heightOfNodeWoPaddingPx - bars.resources.memory.capacity / scale, 5, bars.resources.memory.capacity / scale) 53 | 54 | bars.on('mouseover', function () { 55 | let s = 'CPU: \n' 56 | const {capacity: cpuCap, reserved: cpuRes, requested: cpuReq, used: cpuUsed} = bars.resources.cpu 57 | s += '\t\t Capacity : ' + cpuCap + '\n' 58 | s += '\t\t Reserved : ' + cpuRes.toFixed(2) + '\n' 59 | s += '\t\t Requested : ' + cpuReq.toFixed(2) + '\n' 60 | s += '\t\t Used : ' + cpuUsed.toFixed(2) + '\n' 61 | s += '\nMemory: \n' 62 | 63 | const {capacity: memCap, reserved: memRes, requested: memReq, used: memUsed} = bars.resources.memory 64 | s += '\t\t Capacity : ' + (memCap / FACTORS.Gi).toFixed(2) + ' GiB\n' 65 | s += '\t\t Reserved : ' + (memRes / FACTORS.Gi).toFixed(2) + ' GiB\n' 66 | s += '\t\t Requested : ' + (memReq / FACTORS.Gi).toFixed(2) + ' GiB\n' 67 | s += '\t\t Used : ' + (memUsed / FACTORS.Gi).toFixed(2) + ' GiB\n' 68 | 69 | s += '\nPods: \n' 70 | const {capacity: podsCap, used: podsUsed} = bars.resources.pods 71 | s += '\t\t Capacity : ' + podsCap + '\n' 72 | s += '\t\t Used : ' + podsUsed + '\n' 73 | 74 | bars.tooltip.setText(s) 75 | bars.tooltip.position = bars.toGlobal(new PIXI.Point(22, 16)) 76 | bars.tooltip.visible = true 77 | }) 78 | bars.on('mouseout', function () { 79 | bars.tooltip.visible = false 80 | }) 81 | 82 | return bars 83 | } 84 | 85 | } 86 | -------------------------------------------------------------------------------- /app/src/cluster.js: -------------------------------------------------------------------------------- 1 | import { Node, isMaster } from './node.js' 2 | import { Pod } from './pod.js' 3 | import App from './app.js' 4 | const PIXI = require('pixi.js') 5 | 6 | export default class Cluster extends PIXI.Graphics { 7 | constructor (cluster, status, tooltip, config) { 8 | super() 9 | this.cluster = cluster 10 | this.status = status 11 | this.tooltip = tooltip 12 | this.config = config 13 | } 14 | 15 | destroy() { 16 | if (this.tick) { 17 | PIXI.ticker.shared.remove(this.tick, this) 18 | } 19 | super.destroy() 20 | } 21 | 22 | pulsate(_time) { 23 | const v = Math.sin((PIXI.ticker.shared.lastTime % 1000) / 1000. * Math.PI) 24 | this.alpha = 0.4 + (v * 0.6) 25 | } 26 | 27 | draw () { 28 | this.removeChildren() 29 | this.clear() 30 | const left = 10 31 | const top = 20 32 | const padding = 5 33 | let masterX = left 34 | let masterY = top 35 | let masterWidth = 0 36 | let masterHeight = 0 37 | let workerX = left 38 | let workerY = top 39 | let workerWidth = 0 40 | let workerHeight = 0 41 | const workerNodes = [] 42 | 43 | let maxPodsInWorkers = 0 44 | let maxPodsInMasters = 0 45 | // get the largest number of pods (workers and masters) 46 | for (const n of Object.values(this.cluster.nodes)) { 47 | const podsInNode = Object.values(n.pods).length 48 | 49 | if (isMaster(n.labels)) { 50 | if (podsInNode >= maxPodsInMasters) { 51 | maxPodsInMasters = podsInNode 52 | } 53 | } else { 54 | if (podsInNode >= maxPodsInWorkers) { 55 | maxPodsInWorkers = podsInNode 56 | } 57 | } 58 | } 59 | 60 | // with maxPodsInWorkers we can calculate the size of all nodes in the cluster 61 | this.podsPerRowWorker = Math.max( 62 | App.current.defaultPodsPerRow, 63 | Math.ceil(Math.sqrt(maxPodsInWorkers)) 64 | ) 65 | this.podsPerRowMaster = Math.max( 66 | App.current.defaultPodsPerRow, 67 | Math.ceil(Math.sqrt(maxPodsInMasters)) 68 | ) 69 | 70 | this.widthOfWorkerNodePx = Math.max( 71 | App.current.defaultWidthOfNodePx, 72 | Math.floor(this.podsPerRowWorker * App.current.sizeOfPodPx + App.current.startDrawingPodsAt + 2) 73 | ) 74 | this.widthOfMasterNodePx = Math.max( 75 | App.current.defaultWidthOfNodePx, 76 | Math.floor(this.podsPerRowMaster * App.current.sizeOfPodPx + App.current.startDrawingPodsAt + 2) 77 | ) 78 | 79 | this.heightOfWorkerNodePx = Math.max( 80 | App.current.defaultHeightOfNodePx, 81 | Math.floor(this.podsPerRowWorker * App.current.sizeOfPodPx + App.current.heightOfTopHandlePx + (App.current.sizeOfPodPx * 2) + 2) 82 | ) 83 | this.heightOfMasterNodePx = Math.max( 84 | App.current.defaultHeightOfNodePx, 85 | Math.floor(this.podsPerRowMaster * App.current.sizeOfPodPx + App.current.heightOfTopHandlePx + (App.current.sizeOfPodPx * 2) + 2) 86 | ) 87 | 88 | const maxWidth = (window.innerWidth * (1/this.config.initialScale)) - (this.heightOfWorkerNodePx * 1.2) 89 | 90 | for (const nodeName of Object.keys(this.cluster.nodes).sort()) { 91 | const node = this.cluster.nodes[nodeName] 92 | let nodeBox = null 93 | 94 | if (isMaster(node.labels)) { 95 | nodeBox = new Node(node, this, this.tooltip, this.podsPerRowMaster, this.widthOfMasterNodePx, this.heightOfMasterNodePx) 96 | nodeBox.draw() 97 | 98 | if (masterX > maxWidth) { 99 | masterWidth = masterX 100 | masterX = left 101 | masterY += this.heightOfMasterNodePx + padding 102 | masterHeight += this.heightOfMasterNodePx + padding 103 | } 104 | if (masterHeight == 0) { 105 | masterHeight = this.heightOfMasterNodePx + padding 106 | } 107 | nodeBox.x = masterX 108 | nodeBox.y = masterY 109 | masterX += this.widthOfMasterNodePx + padding 110 | } else { 111 | nodeBox = new Node(node, this, this.tooltip, this.podsPerRowWorker, this.widthOfWorkerNodePx, this.heightOfWorkerNodePx) 112 | nodeBox.draw() 113 | 114 | if (workerX > maxWidth) { 115 | workerWidth = workerX 116 | workerX = left 117 | workerY += this.heightOfWorkerNodePx + padding 118 | workerHeight += this.heightOfWorkerNodePx + padding 119 | } 120 | workerNodes.push(nodeBox) 121 | if (workerHeight == 0) { 122 | workerHeight = this.heightOfWorkerNodePx + padding 123 | } 124 | nodeBox.x = workerX 125 | nodeBox.y = workerY 126 | workerX += this.widthOfWorkerNodePx + padding 127 | } 128 | this.addChild(nodeBox) 129 | } 130 | for (const nodeBox of workerNodes) { 131 | nodeBox.y += masterHeight 132 | } 133 | 134 | /* 135 | Place unassigned pods to the right of the master nodes, or 136 | to the right of the worker nodes if there were no masters. 137 | */ 138 | var unassignedX = masterX === left ? workerX : masterX 139 | 140 | for (const pod of Object.values(this.cluster.unassigned_pods)) { 141 | var podBox = Pod.getOrCreate(pod, this, this.tooltip) 142 | podBox.x = unassignedX 143 | podBox.y = masterY 144 | podBox.draw() 145 | this.addChild(podBox) 146 | unassignedX += 20 147 | } 148 | 149 | this.lineStyle(2, App.current.theme.primaryColor, 1) 150 | const width = Math.max(masterX, masterWidth, workerX, workerWidth, unassignedX) 151 | this.drawRect(0, 0, width, top + masterHeight + workerHeight) 152 | 153 | const topHandle = this.topHandle = new PIXI.Graphics() 154 | topHandle.beginFill(App.current.theme.primaryColor, 1) 155 | topHandle.drawRect(0, 0, width, App.current.heightOfTopHandlePx) 156 | topHandle.endFill() 157 | topHandle.interactive = true 158 | topHandle.buttonMode = true 159 | const that = this 160 | topHandle.on('click', function(_event) { 161 | App.current.toggleCluster(that.cluster.id) 162 | }) 163 | const text = new PIXI.Text(''.concat(this.cluster.api_server_url, ' (', this.cluster.id, ')'), {fontFamily: 'ShareTechMono', fontSize: 10, fill: 0x000000}) 164 | text.x = 2 165 | text.y = 2 166 | topHandle.addChild(text) 167 | this.addChild(topHandle) 168 | 169 | let newTick = null 170 | const nowSeconds = Date.now() / 1000 171 | if (this.status && this.status.last_query_time < nowSeconds - 20) { 172 | newTick = this.pulsate 173 | } 174 | 175 | if (newTick && newTick != this.tick) { 176 | this.tick = newTick 177 | // important: only register new listener if it does not exist yet! 178 | // (otherwise we leak listeners) 179 | PIXI.ticker.shared.add(this.tick, this) 180 | } else if (!newTick && this.tick) { 181 | PIXI.ticker.shared.remove(this.tick, this) 182 | this.tick = null 183 | this.alpha = 1 184 | this.tint = 0xffffff 185 | } 186 | } 187 | 188 | } 189 | -------------------------------------------------------------------------------- /app/src/config.js: -------------------------------------------------------------------------------- 1 | const TRUTHY_VALUES = new Set(['1', 'true']) 2 | 3 | export default class Config { 4 | 5 | constructor() { 6 | this.dashboardMode = false 7 | this.reloadIntervalSeconds = 0 8 | this.initialScale = 1.0 9 | this.renderer = 'auto' 10 | // make sure we got activity at least every 20 seconds 11 | this.keepAliveSeconds = 20 12 | // always reconnect after 5 minutes 13 | this.maxConnectionLifetimeSeconds = 300 14 | // consider cluster data older than 1 minute outdated 15 | this.maxDataAgeSeconds = 60 16 | 17 | this.nodeLinkUrlTemplate = null 18 | this.podLinkUrlTemplate = null 19 | } 20 | 21 | static fromParams(params) { 22 | const config = new Config() 23 | config.dashboardMode = TRUTHY_VALUES.has(params.get('dashboard')) 24 | config.reloadIntervalSeconds = parseInt(params.get('reload')) || 0 25 | config.initialScale = parseFloat(params.get('scale')) || 1.0 26 | config.renderer = params.get('renderer') || 'auto' 27 | return config 28 | } 29 | 30 | 31 | } 32 | -------------------------------------------------------------------------------- /app/src/filters.js: -------------------------------------------------------------------------------- 1 | const PIXI = require('pixi.js') 2 | const BRIGHTNESS_FILTER = new PIXI.filters.ColorMatrixFilter() 3 | BRIGHTNESS_FILTER.brightness(1.3) 4 | 5 | const DESATURATION_FILTER = new PIXI.filters.ColorMatrixFilter() 6 | DESATURATION_FILTER.desaturate() 7 | 8 | export { BRIGHTNESS_FILTER, DESATURATION_FILTER } 9 | -------------------------------------------------------------------------------- /app/src/node.js: -------------------------------------------------------------------------------- 1 | import {Pod} from './pod.js' 2 | import Bars from './bars.js' 3 | import {parseResource} from './utils.js' 4 | import App from './app' 5 | const PIXI = require('pixi.js') 6 | 7 | 8 | export const isMaster = (labels) => { 9 | for (var key in labels) { 10 | if (key == 'node-role.kubernetes.io/master' || 11 | key == 'kubernetes.io/role' && labels[key] == 'master' || 12 | key == 'master' && labels[key] == 'true' ) { 13 | return true 14 | } 15 | } 16 | } 17 | 18 | export class Node extends PIXI.Graphics { 19 | constructor(node, cluster, tooltip, podsPerRow, widthOfNodePx, heightOfNodePx) { 20 | super() 21 | this.node = node 22 | this.cluster = cluster 23 | this.tooltip = tooltip 24 | this.podsPerRow = podsPerRow 25 | this.widthOfNodePx = widthOfNodePx 26 | this.heightOfNodePx = heightOfNodePx 27 | } 28 | 29 | getResourceUsage() { 30 | const resources = {} 31 | for (const key of Object.keys(this.node.status.capacity)) { 32 | resources[key] = { 33 | 'capacity': parseResource(this.node.status.capacity[key]), 34 | 'reserved': 0, 35 | 'requested': 0, 36 | 'used': 0 37 | } 38 | const allocatable = this.node.status.allocatable[key] 39 | if (allocatable) { 40 | resources[key]['reserved'] = resources[key]['capacity'] - parseResource(allocatable) 41 | } 42 | } 43 | if (this.node.usage) { 44 | for (const key of Object.keys(this.node.usage)) { 45 | resources[key]['used'] = parseResource(this.node.usage[key]) 46 | } 47 | } 48 | let numberOfPods = 0 49 | for (const pod of Object.values(this.node.pods)) { 50 | numberOfPods++ 51 | // do not account for completed jobs 52 | if (pod.phase != 'Succeeded') { 53 | for (const container of pod.containers) { 54 | if (container.resources && container.resources.requests) { 55 | for (const key of Object.keys(container.resources.requests)) { 56 | resources[key].requested += parseResource(container.resources.requests[key]) 57 | } 58 | } 59 | } 60 | } 61 | } 62 | resources['pods'].requested = numberOfPods 63 | resources['pods'].used = numberOfPods 64 | return resources 65 | } 66 | 67 | draw() { 68 | const nodeBox = this 69 | const topHandle = new PIXI.Graphics() 70 | topHandle.beginFill(App.current.theme.primaryColor, 1) 71 | topHandle.drawRect(0, 0, this.widthOfNodePx, App.current.heightOfTopHandlePx) 72 | topHandle.endFill() 73 | 74 | // there is about 2.83 letters per pod 75 | const roomForText = Math.floor(2.83 * this.podsPerRow) 76 | const ellipsizedNodeName = this.node.name.length > roomForText ? this.node.name.substring(0, roomForText).concat('…') : this.node.name 77 | const text = new PIXI.Text(ellipsizedNodeName, {fontFamily: 'ShareTechMono', fontSize: 10, fill: 0x000000}) 78 | text.x = 2 79 | text.y = 2 80 | topHandle.addChild(text) 81 | nodeBox.addChild(topHandle) 82 | nodeBox.lineStyle(2, App.current.theme.primaryColor, 1) 83 | nodeBox.beginFill(App.current.theme.secondaryColor, 1) 84 | nodeBox.drawRect(0, 0, this.widthOfNodePx, this.heightOfNodePx) 85 | nodeBox.endFill() 86 | nodeBox.lineStyle(2, 0xaaaaaa, 1) 87 | topHandle.interactive = true 88 | topHandle.on('mouseover', function () { 89 | let s = nodeBox.node.name 90 | s += '\nLabels:' 91 | for (const key of Object.keys(nodeBox.node.labels).sort()) { 92 | s += '\n ' + key + ': ' + nodeBox.node.labels[key] 93 | } 94 | nodeBox.tooltip.setText(s) 95 | nodeBox.tooltip.position = nodeBox.toGlobal(new PIXI.Point(0, App.current.heightOfTopHandlePx)) 96 | nodeBox.tooltip.visible = true 97 | }) 98 | topHandle.on('mouseout', function () { 99 | nodeBox.tooltip.visible = false 100 | }) 101 | if (App.current.config.nodeLinkUrlTemplate !== null) { 102 | topHandle.buttonMode = true 103 | topHandle.on('click', function() { 104 | location.href = App.current.config.nodeLinkUrlTemplate.replace('{cluster}', nodeBox.cluster.cluster.id).replace('{name}', nodeBox.node.name) 105 | }) 106 | } 107 | const resources = this.getResourceUsage() 108 | const bars = new Bars(nodeBox, resources, nodeBox.tooltip) 109 | bars.x = 0 110 | bars.y = 1 111 | nodeBox.addChild(bars.draw()) 112 | 113 | nodeBox.addPods(App.current.sorterFn) 114 | return nodeBox 115 | } 116 | 117 | addPods(sorterFn) { 118 | const nodeBox = this 119 | const px = App.current.startDrawingPodsAt 120 | const py = App.current.heightOfTopHandlePx + 5 121 | let podsCounter = 0 122 | let podsKubeSystemCounter = 0 123 | const pods = Object.values(this.node.pods).sort(sorterFn) 124 | for (const pod of pods) { 125 | if (pod.namespace != 'kube-system') { 126 | const podBox = Pod.getOrCreate(pod, this.cluster, this.tooltip) 127 | podBox.movePodTo( 128 | new PIXI.Point( 129 | // we have a room for this.cluster.podsPerRow pods 130 | px + (App.current.sizeOfPodPx * (podsCounter % this.podsPerRow)), 131 | // we just count when to get to another row 132 | py + (App.current.sizeOfPodPx * Math.floor(podsCounter / this.podsPerRow)) 133 | ) 134 | ) 135 | nodeBox.addChild(podBox.draw()) 136 | podsCounter++ 137 | } else { 138 | // kube-system pods 139 | const podBox = Pod.getOrCreate(pod, this.cluster, this.tooltip) 140 | podBox.movePodTo( 141 | new PIXI.Point( 142 | // we have a room for this.cluster.podsPerRow pods 143 | px + (App.current.sizeOfPodPx * (podsKubeSystemCounter % this.podsPerRow)), 144 | // like above (for not kube-system pods), but we count from the bottom 145 | this.heightOfNodePx - App.current.sizeOfPodPx - 2 - (App.current.sizeOfPodPx * Math.floor(podsKubeSystemCounter / this.podsPerRow)) 146 | ) 147 | ) 148 | nodeBox.addChild(podBox.draw()) 149 | podsKubeSystemCounter++ 150 | } 151 | } 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /app/src/pod.js: -------------------------------------------------------------------------------- 1 | const PIXI = require('pixi.js') 2 | import App from './app.js' 3 | import {FACTORS, getBarColor, podResource} from './utils.js' 4 | import {BRIGHTNESS_FILTER} from './filters.js' 5 | 6 | const ALL_PODS = {} 7 | 8 | const showNotReady = (pod) => { 9 | if (pod.phase == 'Succeeded') { 10 | return false 11 | } 12 | for (let index = 0; index < pod.containers.length; index++) { 13 | if (!pod.containers[index].ready) { 14 | return true 15 | } 16 | } 17 | return false 18 | } 19 | 20 | const ALL_STATUS_FILTERS = [ 21 | { 22 | text: 'SHOW: All', value: function () { return true } 23 | }, 24 | { 25 | text: 'SHOW: NotReady', value: showNotReady 26 | }, 27 | ] 28 | 29 | const sortByName = (a, b) => { 30 | // https://github.com/hjacobs/kube-ops-view/issues/103 31 | // *.name might be undefined 32 | return (a.name || '').localeCompare(b.name || '') 33 | } 34 | 35 | const sortByAge = (a, b) => { 36 | const dateA = new Date(a.startTime) 37 | const dateB = new Date(b.startTime) 38 | if (dateA.getTime() < dateB.getTime()) { 39 | return -1 40 | } else if (dateA.getTime() === dateB.getTime()) 41 | return 0 42 | else 43 | return 1 44 | } 45 | 46 | const sortByMemory = (a, b) => { 47 | const aMem = podResource('memory')(a.containers, 'usage') 48 | const bMem = podResource('memory')(b.containers, 'usage') 49 | return bMem - aMem 50 | } 51 | 52 | const sortByCPU = (a, b) => { 53 | const aCpu = podResource('cpu')(a.containers, 'usage') 54 | const bCpu = podResource('cpu')(b.containers, 'usage') 55 | return bCpu - aCpu 56 | } 57 | 58 | const sortByStatus = (a, b) => { 59 | return (a.phase).localeCompare(b.phase) 60 | } 61 | 62 | 63 | const ALL_SORTS = [ 64 | { 65 | text: 'SORT: NAME', value: sortByName 66 | }, 67 | { 68 | text: 'SORT: AGE', value: sortByAge 69 | }, 70 | { 71 | text: 'SORT: MEMORY', value: sortByMemory 72 | }, 73 | { 74 | text: 'SORT: CPU', value: sortByCPU 75 | }, 76 | { 77 | text: 'SORT: STATUS', value: sortByStatus 78 | } 79 | ] 80 | 81 | export {ALL_PODS, ALL_SORTS, ALL_STATUS_FILTERS} 82 | 83 | export class Pod extends PIXI.Graphics { 84 | 85 | constructor(pod, cluster, tooltip) { 86 | super() 87 | this.pod = pod 88 | this.cluster = cluster 89 | this.tooltip = tooltip 90 | this.tick = null 91 | this._progress = 1 92 | this._targetPosition = null 93 | 94 | if (cluster) { 95 | ALL_PODS[cluster.cluster.id + '/' + pod.namespace + '/' + pod.name] = this 96 | } 97 | } 98 | 99 | destroy() { 100 | if (this.tick) { 101 | PIXI.ticker.shared.remove(this.tick, this) 102 | } 103 | PIXI.ticker.shared.remove(this.animateMove, this) 104 | super.destroy() 105 | } 106 | 107 | animateMove(time) { 108 | const deltaX = this._targetPosition.x - this.position.x 109 | const deltaY = this._targetPosition.y - this.position.y 110 | if (Math.abs(deltaX) < 2 && Math.abs(deltaY) < 2) { 111 | this.position = this._targetPosition 112 | PIXI.ticker.shared.remove(this.animateMove, this) 113 | } else { 114 | if (Math.abs(deltaX) > time) { 115 | this.position.x += time * Math.sign(deltaX) 116 | } 117 | if (Math.abs(deltaY) > time) { 118 | this.position.y += time * Math.sign(deltaY) 119 | } 120 | } 121 | } 122 | 123 | movePodTo(targetPosition) { 124 | if (!this._targetPosition) { 125 | // just set coords 126 | this.position = this._targetPosition = targetPosition 127 | } else if (!this._targetPosition.equals(targetPosition)) { 128 | // animate moving to new position 129 | this._targetPosition = targetPosition 130 | PIXI.ticker.shared.add(this.animateMove, this) 131 | } 132 | } 133 | 134 | getResourceUsage() { 135 | 136 | const podCpu = podResource('cpu') 137 | const podMem = podResource('memory') 138 | 139 | const cpuLimits = podCpu(this.pod.containers, 'limits') 140 | const cpuUsage = podCpu(this.pod.containers, 'usage') 141 | const cpuRequests = podCpu(this.pod.containers, 'requests') 142 | 143 | const memLimits = podMem(this.pod.containers, 'limits') 144 | const memUsage = podMem(this.pod.containers, 'usage') 145 | const memRequests = podMem(this.pod.containers, 'requests') 146 | 147 | return { 148 | memory: { 149 | limit: memLimits, 150 | requested: memRequests, 151 | used: memUsage 152 | }, 153 | cpu: { 154 | limit: cpuLimits, 155 | requested: cpuRequests, 156 | used: cpuUsage 157 | } 158 | } 159 | } 160 | 161 | static getOrCreate(pod, cluster, tooltip) { 162 | const existingPod = ALL_PODS[cluster.cluster.id + '/' + pod.namespace + '/' + pod.name] 163 | if (existingPod) { 164 | existingPod.pod = pod 165 | existingPod.clear() 166 | return existingPod 167 | } else { 168 | return new Pod(pod, cluster, tooltip) 169 | } 170 | } 171 | 172 | pulsate(_time) { 173 | const v = Math.sin((PIXI.ticker.shared.lastTime % 1000) / 1000. * Math.PI) 174 | this.alpha = v * this._progress 175 | } 176 | 177 | crashing(_time) { 178 | const v = Math.sin((PIXI.ticker.shared.lastTime % 1000) / 1000. * Math.PI) 179 | this.tint = PIXI.utils.rgb2hex([1, v, v]) 180 | } 181 | 182 | terminating(_time) { 183 | const v = Math.sin(((1000 + PIXI.ticker.shared.lastTime) % 1000) / 1000. * Math.PI) 184 | this.cross.alpha = v 185 | } 186 | 187 | draw() { 188 | 189 | let ready = 0 190 | let running = 0 191 | let restarts = 0 192 | for (const container of this.pod.containers) { 193 | if (container.ready) { 194 | ready++ 195 | } 196 | if (container.state && container.state.running) { 197 | running++ 198 | } 199 | restarts += container.restartCount || 0 200 | } 201 | const allReady = ready >= this.pod.containers.length 202 | const allRunning = running >= this.pod.containers.length 203 | const resources = this.getResourceUsage() 204 | 205 | let newTick = null 206 | 207 | const podBox = this 208 | podBox.interactive = true 209 | podBox.on('mouseover', function () { 210 | podBox.filters = podBox.filters.filter(x => x != BRIGHTNESS_FILTER).concat([BRIGHTNESS_FILTER]) 211 | let s = this.pod.name 212 | s += '\nNamespace : ' + this.pod.namespace 213 | s += '\nStatus : ' + this.pod.phase + ' (' + ready + '/' + this.pod.containers.length + ' ready)' 214 | s += '\nStart Time: ' + this.pod.startTime 215 | s += '\nLabels :' 216 | for (var key of Object.keys(this.pod.labels).sort()) { 217 | if (key !== 'pod-template-hash') { 218 | s += '\n ' + key + ': ' + this.pod.labels[key] 219 | } 220 | } 221 | s += '\nContainers:' 222 | for (const container of this.pod.containers) { 223 | s += '\n ' + container.name + ': ' 224 | if (container.state) { 225 | const key = Object.keys(container.state)[0] 226 | s += key 227 | if (container.state[key].reason) { 228 | // "CrashLoopBackOff" 229 | s += ': ' + container.state[key].reason 230 | } 231 | } 232 | if (container.restartCount) { 233 | s += ' (' + container.restartCount + ' restarts)' 234 | } 235 | } 236 | s += '\nCPU:' 237 | s += '\n Requested: ' + (resources.cpu.requested / FACTORS.m).toFixed(0) + ' m' 238 | s += '\n Limit: ' + (resources.cpu.limit / FACTORS.m).toFixed(0) + ' m' 239 | s += '\n Used: ' + (resources.cpu.used / FACTORS.m).toFixed(0) + ' m' 240 | s += '\nMemory:' 241 | s += '\n Requested: ' + (resources.memory.requested / FACTORS.Mi).toFixed(0) + ' MiB' 242 | s += '\n Limit: ' + (resources.memory.limit / FACTORS.Mi).toFixed(0) + ' MiB' 243 | s += '\n Used: ' + (resources.memory.used / FACTORS.Mi).toFixed(0) + ' MiB' 244 | 245 | this.tooltip.setText(s) 246 | this.tooltip.position = this.toGlobal(new PIXI.Point(10, 10)) 247 | this.tooltip.visible = true 248 | }) 249 | podBox.on('mouseout', function () { 250 | podBox.filters = podBox.filters.filter(x => x != BRIGHTNESS_FILTER) 251 | this.tooltip.visible = false 252 | }) 253 | if (App.current.config.podLinkUrlTemplate !== null) { 254 | podBox.buttonMode = true 255 | podBox.on('click', function() { 256 | location.href = App.current.config.podLinkUrlTemplate.replace('{cluster}', this.cluster.cluster.id).replace('{namespace}', this.pod.namespace).replace('{name}', this.pod.name) 257 | }) 258 | } 259 | podBox.lineStyle(1, App.current.theme.primaryColor, 1) 260 | const w = 10 / this.pod.containers.length 261 | for (let i = 0; i < this.pod.containers.length; i++) { 262 | podBox.drawRect(i * w, 0, w, 10) 263 | } 264 | let color 265 | if (this.pod.phase == 'Succeeded') { 266 | // completed Job 267 | color = 0xaaaaff 268 | } else if (this.pod.phase == 'Running' && allReady) { 269 | color = 0xaaffaa 270 | } else if (this.pod.phase == 'Running' && allRunning && !allReady) { 271 | // all containers running, but some not ready (readinessProbe) 272 | newTick = this.pulsate 273 | color = 0xaaffaa 274 | } else if (this.pod.phase == 'Pending') { 275 | newTick = this.pulsate 276 | color = 0xffffaa 277 | } else { 278 | // CrashLoopBackOff, ImagePullBackOff or other unknown state 279 | newTick = this.crashing 280 | color = 0xffaaaa 281 | } 282 | podBox.lineStyle(2, color, 1) 283 | podBox.beginFill(color, 0.2) 284 | podBox.drawRect(0, 0, 10, 10) 285 | if (this.pod.deleted) { 286 | if (!this.cross) { 287 | const cross = new PIXI.Graphics() 288 | cross.lineStyle(3, 0xff0000, 1) 289 | cross.moveTo(0, 0) 290 | cross.lineTo(10, 10) 291 | cross.moveTo(10, 0) 292 | cross.lineTo(0, 10) 293 | cross.pivot.x = 5 294 | cross.pivot.y = 5 295 | cross.x = 5 296 | cross.y = 5 297 | cross.blendMode = PIXI.BLEND_MODES.ADD 298 | this.addChild(cross) 299 | this.cross = cross 300 | } 301 | newTick = this.terminating 302 | } 303 | 304 | if (restarts) { 305 | this.lineStyle(2, 0xff9999, 1) 306 | for (let i = 0; i < Math.min(restarts, 4); i++) { 307 | this.moveTo(10, i * 3 - 1) 308 | this.lineTo(10, i * 3 + 1) 309 | } 310 | } 311 | 312 | if (newTick && newTick != this.tick) { 313 | this.tick = newTick 314 | // important: only register new listener if it does not exist yet! 315 | // (otherwise we leak listeners) 316 | PIXI.ticker.shared.add(this.tick, this) 317 | } else if (!newTick && this.tick) { 318 | PIXI.ticker.shared.remove(this.tick, this) 319 | this.tick = null 320 | this.alpha = this._progress 321 | this.tint = 0xffffff 322 | } 323 | 324 | // CPU 325 | const scaleCpu = Math.max(resources.cpu.requested, resources.cpu.limit, resources.cpu.used) / 8 326 | const scaledCpuReq = resources.cpu.requested !== 0 && scaleCpu !== 0 ? resources.cpu.requested / scaleCpu : 0 327 | const scaledCpuUsed = resources.cpu.used !== 0 && scaleCpu !== 0 ? resources.cpu.used / scaleCpu : 0 328 | podBox.lineStyle() 329 | podBox.beginFill(getBarColor(resources.cpu.requested, resources.cpu.limit), 1) 330 | podBox.drawRect(1, 9 - scaledCpuReq, 1, scaledCpuReq) 331 | podBox.beginFill(getBarColor(resources.cpu.used, resources.cpu.limit), 1) 332 | podBox.drawRect(2, 9 - scaledCpuUsed, 1, scaledCpuUsed) 333 | podBox.endFill() 334 | 335 | // Memory 336 | const scale = Math.max(resources.memory.requested, resources.memory.limit, resources.memory.used) / 8 337 | const scaledMemReq = resources.memory.requested !== 0 && scale !== 0 ? resources.memory.requested / scale : 0 338 | const scaledMemUsed = resources.memory.used !== 0 && scale !== 0 ? resources.memory.used / scale : 0 339 | podBox.lineStyle() 340 | podBox.beginFill(getBarColor(resources.memory.requested, resources.memory.limit), 1) 341 | podBox.drawRect(3, 9 - scaledMemReq, 1, scaledMemReq) 342 | podBox.beginFill(getBarColor(resources.memory.used, resources.memory.limit), 1) 343 | podBox.drawRect(4, 9 - scaledMemUsed, 1, scaledMemUsed) 344 | podBox.endFill() 345 | 346 | return this 347 | } 348 | } 349 | -------------------------------------------------------------------------------- /app/src/selectbox.js: -------------------------------------------------------------------------------- 1 | import App from './app' 2 | 3 | const PIXI = require('pixi.js') 4 | 5 | export default class SelectBox extends PIXI.Graphics { 6 | constructor(items, value, onchange) { 7 | super() 8 | this.items = items 9 | this.value = value 10 | this.count = 0 11 | for (const item of items) { 12 | if (item.value == value) { 13 | break 14 | } 15 | this.count++ 16 | } 17 | if (this.count >= items.length) { 18 | this.count = 0 19 | } 20 | this.text = new PIXI.Text(this.items[this.count].text, { 21 | fontFamily: 'ShareTechMono', 22 | fontSize: 14, 23 | fill: App.current.theme.primaryColor, 24 | align: 'center' 25 | }) 26 | this.text.x = 10 27 | this.text.y = 5 28 | this.addChild(this.text) 29 | this.onchange = onchange 30 | } 31 | 32 | onForwardOver() { 33 | this.forwardArrow.alpha = 0.5 34 | } 35 | 36 | onForwardOut() { 37 | this.forwardArrow.alpha = 1 38 | } 39 | 40 | onForwardPressed() { 41 | const selectBox = this 42 | selectBox.count++ 43 | if (selectBox.count >= this.items.length) { 44 | selectBox.count = 0 45 | } 46 | selectBox.text.text = selectBox.items[selectBox.count].text 47 | this.value = this.items[this.count].value 48 | this.onchange(this.items[this.count].text, this.value) 49 | } 50 | 51 | onBackOver() { 52 | this.backArrow.alpha = 0.5 53 | } 54 | 55 | onBackOut() { 56 | this.backArrow.alpha = 1 57 | } 58 | 59 | onBackPressed() { 60 | const selectBox = this 61 | selectBox.count-- 62 | if (selectBox.count < 0) { 63 | selectBox.count = selectBox.items.length - 1 64 | } 65 | selectBox.text.text = selectBox.items[selectBox.count].text 66 | this.value = this.items[this.count].value 67 | this.onchange(this.items[this.count].text, this.value) 68 | } 69 | 70 | draw() { 71 | const selectBox = this 72 | 73 | const backArrow = this.backArrow = new PIXI.Graphics() 74 | const forwardArrow = this.forwardArrow = new PIXI.Graphics() 75 | backArrow.interactive = true 76 | backArrow.buttonMode = true 77 | forwardArrow.interactive = true 78 | forwardArrow.buttonMode = true 79 | 80 | // FIXME: hardcoded value for average char width.. 81 | const textBoxWidth = 10 + 8 * Math.max.apply(Math, this.items.map(item => item.text.length)) 82 | const arrowBoxWidth = 18 83 | 84 | // draw a triangle 85 | backArrow.beginFill(App.current.theme.secondaryColor, 1) 86 | backArrow.drawRect(-18, 0, arrowBoxWidth, 22) 87 | backArrow.lineStyle(1, App.current.theme.primaryColor, 1) 88 | backArrow.beginFill(App.current.theme.secondaryColor, 1) 89 | backArrow.moveTo(-4, 5) 90 | backArrow.lineTo(-15, 11) 91 | backArrow.lineTo(-4, 17) 92 | backArrow.lineTo(-4, 5) 93 | backArrow.endFill() 94 | selectBox.addChild(backArrow) 95 | 96 | selectBox.lineStyle(1, App.current.theme.primaryColor, 1) 97 | selectBox.drawRect(4, 0, textBoxWidth, 22) 98 | 99 | forwardArrow.beginFill(App.current.theme.secondaryColor, 1) 100 | forwardArrow.drawRect(textBoxWidth + 8, 0, arrowBoxWidth, 22) 101 | forwardArrow.lineStyle(1, App.current.theme.primaryColor, 1) 102 | forwardArrow.beginFill(App.current.theme.secondaryColor, 1) 103 | forwardArrow.moveTo(textBoxWidth + 11, 5) 104 | forwardArrow.lineTo(textBoxWidth + 22, 11) 105 | forwardArrow.lineTo(textBoxWidth + 11, 17) 106 | forwardArrow.lineTo(textBoxWidth + 11, 5) 107 | forwardArrow.endFill() 108 | selectBox.addChild(forwardArrow) 109 | 110 | backArrow.on('mouseover', selectBox.onBackOver.bind(this)) 111 | backArrow.on('mouseout', selectBox.onBackOut.bind(this)) 112 | backArrow.on('mousedown', selectBox.onBackPressed.bind(this)) 113 | backArrow.on('touchstart', selectBox.onBackPressed.bind(this)) 114 | forwardArrow.on('mouseover', selectBox.onForwardOver.bind(this)) 115 | forwardArrow.on('mouseout', selectBox.onForwardOut.bind(this)) 116 | forwardArrow.on('mousedown', selectBox.onForwardPressed.bind(this)) 117 | forwardArrow.on('touchstart', selectBox.onForwardPressed.bind(this)) 118 | 119 | return selectBox 120 | } 121 | 122 | } 123 | -------------------------------------------------------------------------------- /app/src/themes.js: -------------------------------------------------------------------------------- 1 | const PIXI = require('pixi.js') 2 | import {CRTFilter} from '@pixi/filter-crt' 3 | 4 | export const ALL_THEMES = {} 5 | 6 | export class Theme { 7 | constructor() { 8 | } 9 | 10 | static get(name) { 11 | return ALL_THEMES[name] || ALL_THEMES['default'] 12 | } 13 | } 14 | 15 | class DefaultTheme { 16 | constructor() { 17 | this.name = 'default' 18 | this.primaryColor = 0xaaaaff 19 | this.secondaryColor = 0x222233 20 | } 21 | apply(stage) { 22 | stage.filters = [] 23 | } 24 | register() { 25 | ALL_THEMES[this.name] = this 26 | } 27 | } 28 | new DefaultTheme().register() 29 | 30 | class GreenTheme extends DefaultTheme { 31 | constructor() { 32 | super() 33 | this.name = 'green' 34 | this.primaryColor = 0xaaffaa 35 | this.secondaryColor = 0x223322 36 | } 37 | } 38 | new GreenTheme().register() 39 | 40 | class GreyTheme extends DefaultTheme { 41 | constructor() { 42 | super() 43 | this.name = 'grey' 44 | this.primaryColor = 0xeeeeee 45 | this.secondaryColor = 0x333333 46 | } 47 | } 48 | new GreyTheme().register() 49 | 50 | class BlackAndWhiteTheme extends DefaultTheme { 51 | constructor() { 52 | super() 53 | this.name = 'blackandwhite' 54 | this.primaryColor = 0xffffff 55 | this.secondaryColor = 0x000000 56 | } 57 | apply(stage) { 58 | const filter = new PIXI.filters.ColorMatrixFilter() 59 | filter.blackAndWhite() 60 | stage.filters = [filter] 61 | } 62 | } 63 | new BlackAndWhiteTheme().register() 64 | 65 | class SepiaTheme extends DefaultTheme { 66 | constructor() { 67 | super() 68 | this.name = 'sepia' 69 | } 70 | apply(stage) { 71 | const filter = new PIXI.filters.ColorMatrixFilter() 72 | filter.sepia() 73 | stage.filters = [filter] 74 | } 75 | } 76 | new SepiaTheme().register() 77 | 78 | class PolaroidTheme extends DefaultTheme { 79 | constructor() { 80 | super() 81 | this.name = 'polaroid' 82 | } 83 | apply(stage) { 84 | const filter = new PIXI.filters.ColorMatrixFilter() 85 | filter.polaroid() 86 | stage.filters = [filter] 87 | } 88 | } 89 | new PolaroidTheme().register() 90 | 91 | class HighContrastTheme extends DefaultTheme { 92 | constructor() { 93 | super() 94 | this.name = 'highcontrast' 95 | this.primaryColor = 0xffffff 96 | this.secondaryColor = 0x000000 97 | } 98 | apply(stage) { 99 | const filter = new PIXI.filters.ColorMatrixFilter() 100 | filter.saturate(3) 101 | stage.filters = [filter] 102 | } 103 | } 104 | new HighContrastTheme().register() 105 | 106 | class CRTTheme extends DefaultTheme { 107 | constructor() { 108 | super() 109 | this.name = 'crt' 110 | this.primaryColor = 0xaaaaff 111 | this.secondaryColor = 0x222233 112 | } 113 | apply(stage) { 114 | const filter = new CRTFilter({time: 0.5}) 115 | 116 | stage.filters = [filter] 117 | this.filter = filter 118 | PIXI.ticker.shared.add(this.animate, this) 119 | } 120 | 121 | animate(_delta) { 122 | this.filter.seed = Math.random() 123 | this.filter.time += 0.5 124 | } 125 | } 126 | new CRTTheme().register() 127 | -------------------------------------------------------------------------------- /app/src/tooltip.js: -------------------------------------------------------------------------------- 1 | import App from './app.js' 2 | 3 | const PIXI = require('pixi.js') 4 | 5 | export default class Tooltip extends PIXI.Graphics { 6 | constructor () { 7 | super() 8 | this.text = new PIXI.Text('', {fontFamily: 'ShareTechMono', fontSize: 12, fill: 0xffffff}) 9 | this.text.x = 4 10 | this.text.y = 4 11 | this.addChild(this.text) 12 | this.visible = false 13 | } 14 | 15 | setText(text) { 16 | this.text.text = text 17 | this.draw() 18 | } 19 | 20 | draw () { 21 | this.clear() 22 | this.lineStyle(2, App.current.theme.secondaryColor, 0.8) 23 | this.beginFill(App.current.theme.secondaryColor, 0.8) 24 | this.drawRect(0, 0, this.text.width + 8, this.text.height + 8) 25 | this.endFill() 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /app/src/utils.js: -------------------------------------------------------------------------------- 1 | const PIXI = require('pixi.js') 2 | 3 | const FACTORS = { 4 | 'n': 1 / 1000000000, 5 | 'u': 1 / 1000000, 6 | 'm': 1 / 1000, 7 | '': 1, 8 | 'k': 1000, 9 | 'M': Math.pow(1000, 2), 10 | 'G': Math.pow(1000, 3), 11 | 'T': Math.pow(1000, 4), 12 | 'P': Math.pow(1000, 5), 13 | 'E': Math.pow(1000, 6), 14 | 'Ki': 1024, 15 | 'Mi': Math.pow(1024, 2), 16 | 'Gi': Math.pow(1024, 3), 17 | 'Ti': Math.pow(1024, 4), 18 | 'Pi': Math.pow(1024, 5), 19 | 'Ei': Math.pow(1024, 6) 20 | } 21 | 22 | function hsvToRgb(h, s, v) { 23 | let r, g, b 24 | const i = Math.floor(h * 6) 25 | const f = h * 6 - i 26 | const p = v * (1 - s) 27 | const q = v * (1 - f * s) 28 | const t = v * (1 - (1 - f) * s) 29 | switch (i % 6) { 30 | case 0: 31 | r = v 32 | g = t 33 | b = p 34 | break 35 | case 1: 36 | r = q 37 | g = v 38 | b = p 39 | break 40 | case 2: 41 | r = p 42 | g = v 43 | b = t 44 | break 45 | case 3: 46 | r = p 47 | g = q 48 | b = v 49 | break 50 | case 4: 51 | r = t 52 | g = p 53 | b = v 54 | break 55 | case 5: 56 | r = v 57 | g = p 58 | b = q 59 | break 60 | } 61 | return PIXI.utils.rgb2hex([r, g, b]) 62 | } 63 | 64 | function getBarColor(usage, capacity) { 65 | return hsvToRgb(Math.max(0, Math.min(1, 0.4 - (0.4 * (usage / capacity)))), 0.6, 1) 66 | } 67 | 68 | function parseResource(v) { 69 | const match = v.match(/^(\d*)(\D*)$/) 70 | const factor = FACTORS[match[2]] || 1 71 | return parseInt(match[1]) * factor 72 | } 73 | 74 | const metric = (metric, type) => 75 | metric ? (metric[type] ? parseResource(metric[type]) : 0) : 0 76 | 77 | const podResource = type => (containers, resource) => 78 | containers 79 | .map(({resources}) => resources ? metric(resources[resource], type) : 0) 80 | .reduce((a, b) => a + b, 0) 81 | 82 | export {FACTORS, hsvToRgb, getBarColor, parseResource, metric, podResource} 83 | -------------------------------------------------------------------------------- /app/src/vendor/addWheelListener.js: -------------------------------------------------------------------------------- 1 | /** 2 | * This module unifies handling of mouse whee event accross different browsers 3 | * 4 | * See https://developer.mozilla.org/en-US/docs/Web/Reference/Events/wheel?redirectlocale=en-US&redirectslug=DOM%2FMozilla_event_reference%2Fwheel 5 | * for more details 6 | */ 7 | module.exports = addWheelListener; 8 | 9 | var prefix = "", _addEventListener, onwheel, support; 10 | 11 | // detect event model 12 | if ( window.addEventListener ) { 13 | _addEventListener = "addEventListener"; 14 | } else { 15 | _addEventListener = "attachEvent"; 16 | prefix = "on"; 17 | } 18 | 19 | // detect available wheel event 20 | support = "onwheel" in document.createElement("div") ? "wheel" : // Modern browsers support "wheel" 21 | document.onmousewheel !== undefined ? "mousewheel" : // Webkit and IE support at least "mousewheel" 22 | "DOMMouseScroll"; // let's assume that remaining browsers are older Firefox 23 | 24 | function addWheelListener( elem, callback, useCapture ) { 25 | _addWheelListener( elem, support, callback, useCapture ); 26 | 27 | // handle MozMousePixelScroll in older Firefox 28 | if( support == "DOMMouseScroll" ) { 29 | _addWheelListener( elem, "MozMousePixelScroll", callback, useCapture ); 30 | } 31 | }; 32 | 33 | function _addWheelListener( elem, eventName, callback, useCapture ) { 34 | elem[ _addEventListener ]( prefix + eventName, support == "wheel" ? callback : function( originalEvent ) { 35 | !originalEvent && ( originalEvent = window.event ); 36 | 37 | // create a normalized event object 38 | var event = { 39 | // keep a ref to the original event object 40 | originalEvent: originalEvent, 41 | target: originalEvent.target || originalEvent.srcElement, 42 | type: "wheel", 43 | deltaMode: originalEvent.type == "MozMousePixelScroll" ? 0 : 1, 44 | deltaX: 0, 45 | delatZ: 0, 46 | preventDefault: function() { 47 | originalEvent.preventDefault ? 48 | originalEvent.preventDefault() : 49 | originalEvent.returnValue = false; 50 | } 51 | }; 52 | 53 | // calculate deltaY (and deltaX) according to the event 54 | if ( support == "mousewheel" ) { 55 | event.deltaY = - 1/40 * originalEvent.wheelDelta; 56 | // Webkit also support wheelDeltaX 57 | originalEvent.wheelDeltaX && ( event.deltaX = - 1/40 * originalEvent.wheelDeltaX ); 58 | } else { 59 | event.deltaY = originalEvent.detail; 60 | } 61 | 62 | // it's time to fire the callback 63 | return callback( event ); 64 | 65 | }, useCapture || false ); 66 | } 67 | -------------------------------------------------------------------------------- /app/src/vendor/json_delta.js: -------------------------------------------------------------------------------- 1 | /* JSON-delta v2.0 - A diff/patch pair for JSON-serialized data 2 | structures. 3 | 4 | Copyright 2013-2015 Philip J. Roberts . 5 | All rights reserved 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are 9 | met: 10 | 11 | 1. Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | 14 | 2. Redistributions in binary form must reproduce the above copyright 15 | notice, this list of conditions and the following disclaimer in the 16 | documentation and/or other materials provided with the distribution. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | 30 | This implementation is based heavily on the original python2 version: 31 | see http://www.phil-roberts.name/json-delta/ for further 32 | documentation. */ 33 | 34 | export const JSON_delta = { 35 | // Main entry points: ====================================================== 36 | patch: function(struc, diff) { 37 | /* Apply the sequence of diff stanzas diff to the structure 38 | struc, and returns the patched structure. */ 39 | var stan_key; 40 | for (stan_key = 0; stan_key < diff.length; stan_key++) { 41 | struc = this.patchStanza(struc, diff[stan_key]); 42 | } 43 | return struc; 44 | }, 45 | 46 | diff: function(left, right, minimal, key) { 47 | /* Build a diff between the structures left and right. 48 | 49 | Parameters: 50 | key: this is used for mutual recursion between this 51 | function and those it calls. Normally it should be 52 | left unset or set as its default []. 53 | 54 | minimal: if this flag is set true, the function will try 55 | harder to find the diff that encodes as the shortest 56 | possible JSON string, at the expense of using more of 57 | both memory and processor time (as alternatives are 58 | computed and compared). 59 | */ 60 | key = key !== undefined ? key : []; 61 | minimal = minimal !== undefined ? minimal : true; 62 | var dumbdiff = [[key, right]], my_diff = [], common; 63 | 64 | if (this.structureWorthInvestigating(left, right)) { 65 | common = this.commonality(left, right); 66 | if (minimal) { 67 | my_diff = this.needleDiff(left, right, minimal, key); 68 | } else if (common < 0.5) { 69 | my_diff = this.thisLevelDiff(left, right, key, common); 70 | } else { 71 | my_diff = this.keysetDiff(left, right, minimal, key); 72 | } 73 | } else { 74 | my_diff = this.thisLevelDiff(left, right, key, 0.0); 75 | } 76 | 77 | if (minimal) { 78 | if (JSON.stringify(dumbdiff).length < 79 | JSON.stringify(my_diff).length) { 80 | my_diff = dumbdiff; 81 | } 82 | } 83 | 84 | if (key.length === 0) { 85 | if (my_diff.length > 1) { 86 | my_diff = this.sortStanzas(my_diff); 87 | } 88 | } 89 | return my_diff; 90 | }, 91 | 92 | // ========================================================================= 93 | 94 | isStrictlyEqual: function(left, right) { 95 | /* Recursively compare the (potentially nested) objects left 96 | * and right */ 97 | var idx, ks, key; 98 | if (this.isTerminal(left) && this.isTerminal(right)) { 99 | return (left === right); 100 | } 101 | if (this.isTerminal(left) || this.isTerminal(right)) { 102 | return false; 103 | } 104 | if (left instanceof Array && right instanceof Array) { 105 | if (left.length !== right.length) { 106 | return false; 107 | } 108 | for (idx = 0; idx < left.length; idx++) { 109 | if (! this.isStrictlyEqual(left[idx], right[idx])) { 110 | return false; 111 | } 112 | } 113 | return true; 114 | } 115 | if (left instanceof Array || right instanceof Array) { 116 | return false; 117 | } 118 | ks = this.computeKeysets(left, right); 119 | if (ks[1].length !== 0 || ks[2].length !== 0) { 120 | return false; 121 | } 122 | for (idx = 0; idx < ks[0].length; idx++) { 123 | key = ks[0][idx]; 124 | if (! this.isStrictlyEqual(left[key], right[key])) { 125 | return false; 126 | } 127 | } 128 | return true; 129 | }, 130 | 131 | isTerminal: function(obj) { 132 | /* Test whether obj will be a terminal node in the tree when 133 | * serialized as JSON. */ 134 | if (typeof obj === 'string' || typeof obj === 'number' || 135 | typeof obj === 'boolean' || obj === null) { 136 | return true; 137 | } 138 | return false; 139 | }, 140 | 141 | appendKey: function(stanzas, arr, key) { 142 | /* Get the appropriate key for appending to the array arr, 143 | * assuming that stanzas will also be applied, and arr appears 144 | * at key within the overall structure. */ 145 | key = key !== undefined ? key : []; 146 | var addition_key = arr.length, prior_key, i; 147 | for (i = 0; i < stanzas.length; i++) { 148 | prior_key = stanzas[i][0]; 149 | if (stanzas[i].length > 1 && 150 | prior_key.length === key.length + 1 && 151 | prior_key[prior_key.length-1] >= addition_key) 152 | { addition_key = prior_key[prior_key.length-1] + 1; } 153 | } 154 | return addition_key; 155 | }, 156 | 157 | loopOver: function(obj, callback) { 158 | /* Helper function for looping over obj. Does the Right Thing 159 | * whether obj is an array or not. */ 160 | var i, key; 161 | if (obj instanceof Array) { 162 | for (i = 0; i < obj.length; i++) { 163 | callback(obj, i); 164 | } 165 | } else { 166 | for (key in obj) { 167 | if (obj.hasOwnProperty(key)) { 168 | callback(obj, key); 169 | } 170 | } 171 | } 172 | }, 173 | 174 | inArray: function(keypath) { 175 | var terminal = keypath[keypath.length - 1]; 176 | return (typeof terminal === 'number') 177 | }, 178 | 179 | inObject: function(keypath) { 180 | var terminal = keypath[keypath.length - 1]; 181 | return (typeof terminal === 'string') 182 | }, 183 | 184 | splitDiff: function(diff) { 185 | /* Split the stanzas in diff into an array of three arrays: 186 | * [modifications, deletions, insertions]. */ 187 | var idx, objs = [], mods = [], dels = [], inss = []; 188 | var dests = {3: inss, 1: dels}, stanza, keypath; 189 | if (diff.length === 0) {return [[], diff];} 190 | for (idx = 0; idx < diff.length; idx++) { 191 | stanza = diff[idx] 192 | if (stanza.length === 2) { 193 | if (this.inObject(stanza[0])) { 194 | objs.push(stanza); 195 | } else { 196 | mods.push(stanza); 197 | } 198 | } else { 199 | dests[stanza.length].push(stanza) 200 | } 201 | } 202 | return [objs, mods, dels, inss]; 203 | }, 204 | 205 | stableKeypathLengthSort: function(stanzas) { 206 | var comparator = function (a, b) { 207 | var swap; 208 | if (a[0].length === b[0].length) { 209 | return a[0][0] - b[0][0]; 210 | } 211 | return b[0].length - a[0].length; 212 | } 213 | for (var i = 0; i < stanzas.length; i++) { 214 | stanzas[i][0].unshift(i) 215 | } 216 | stanzas.sort(comparator) 217 | for (i = 0; i < stanzas.length; i++) { 218 | stanzas[i][0].shift() 219 | } 220 | return stanzas 221 | }, 222 | 223 | keypathCompare: function(a, b) { 224 | a = a[0]; b = b[0]; 225 | if (a.length !== b.length) { 226 | return a.length - b.length; 227 | } 228 | for (var i = 0; i < a.length; i++) { 229 | if (typeof a[i] === 'number' && a[i] !== b[i]) { 230 | return a[i] - b[i]; 231 | } 232 | } 233 | return 0; 234 | }, 235 | 236 | keypathCompareReverse: function(a, b) { 237 | a = a[0]; b = b[0]; 238 | if (a.length !== b.length) { 239 | return b.length - a.length; 240 | } 241 | for (var i = 0; i < a.length; i++) { 242 | if (typeof a[i] === 'number' && a[i] !== b[i]) { 243 | return b[i] - a[i]; 244 | } 245 | } 246 | return 0; 247 | }, 248 | 249 | sortStanzas: function(diff) { 250 | /* Sorts the stanzas in a diff: object changes can occur in 251 | * any order, but deletions from arrays have to happen last 252 | * node first: ['foo', 'bar', 'baz'] -> ['foo', 'bar'] -> 253 | * ['foo'] -> []; additions to sequences have to happen 254 | * leftmost-node-first: [] -> ['foo'] -> ['foo', 'bar'] -> 255 | * ['foo', 'bar', 'baz'], and insert-and-shift alterations to 256 | * arrays must happen last. */ 257 | 258 | // First we divide the stanzas using splitDiff(): 259 | var split_thing = this.splitDiff(diff); 260 | // Then we sort modifications of arrays in ascending order of keypath 261 | // (note that we can?t tell appends from mods on the info available): 262 | split_thing[1].sort(this.keypathCompare); 263 | // Deletions from arrays in descending order of keypath: 264 | split_thing[2].sort(this.keypathCompareReverse); 265 | // And insert-and-shifts in ascending order of keypath: 266 | split_thing[3].sort(this.keypathCompare) 267 | diff = split_thing[0].concat( 268 | split_thing[1], split_thing[2], split_thing[3] 269 | ); 270 | // Finally, we sort by length of keypath: 271 | diff = this.stableKeypathLengthSort(diff, true) 272 | return diff 273 | }, 274 | 275 | computeKeysets: function(left, right) { 276 | /* Returns an array of three arrays (overlap, left_only, 277 | * right_only), representing the properties common to left and 278 | * right, only defined for left, and only defined for right, 279 | * respectively. */ 280 | var overlap = [], left_only = [], right_only = []; 281 | var target = overlap; 282 | 283 | this.loopOver(left, function(obj, key) { 284 | if (right[key] !== undefined) { 285 | target = overlap; 286 | } 287 | else { 288 | target = left_only; 289 | } 290 | target.push(key); 291 | }); 292 | this.loopOver(right, function(obj, key) { 293 | if (left[key] === undefined) { 294 | right_only.push(key); 295 | } 296 | }); 297 | return [overlap, left_only, right_only]; 298 | }, 299 | 300 | structureWorthInvestigating: function(left, right) { 301 | /* Test whether it is worth looking at the internal structure 302 | * of `left` and `right` to see if they can be efficiently 303 | * diffed. */ 304 | if (this.isTerminal(left) || this.isTerminal(right)) { 305 | return false; 306 | } 307 | if ((left.length === 0) || (right.length === 0)) { 308 | return false; 309 | } 310 | if ((left instanceof Array) && (right instanceof Array)) { 311 | return true; 312 | } 313 | if ((left instanceof Array) || (right instanceof Array)) { 314 | return false; 315 | } 316 | if ((typeof left === 'object') && (typeof right === 'object')) { 317 | return true; 318 | } 319 | return false; 320 | }, 321 | 322 | commonality: function(left, right) { 323 | /* Calculate the amount that the structures left and right 324 | * have in common */ 325 | var com = 0, tot = 0; 326 | var elem, keysets, o, l, r, idx; 327 | if (this.isTerminal(left) || this.isTerminal(right)) { 328 | return 0; 329 | } 330 | 331 | if ((left instanceof Array) && (right instanceof Array)) { 332 | for (idx = 0; idx < left.length; idx++) { 333 | elem = left[idx]; 334 | if (right.indexOf(elem) !== -1) { 335 | com++; 336 | } 337 | } 338 | tot = Math.max(left.length, right.length); 339 | } 340 | else { 341 | if ((left instanceof Array) || (right instanceof Array)) { 342 | return 0; 343 | } 344 | keysets = this.computeKeysets(left, right); 345 | o = keysets[0]; l = keysets[1]; r = keysets[2]; 346 | com = o.length; 347 | tot = o.length + l.length + r.length; 348 | for (idx = 0; idx < r.length; idx++) { 349 | elem = r[idx]; 350 | if (l.indexOf(elem) === -1) { 351 | tot++; 352 | } 353 | } 354 | } 355 | if (tot === 0) {return 0;} 356 | return com / tot; 357 | }, 358 | 359 | thisLevelDiff: function(left, right, key, common) { 360 | /* Returns a sequence of diff stanzas between the objects left 361 | * and right, assuming that they are each at the position key 362 | * within the overall structure. */ 363 | var out = [], idx, okey; 364 | key = key !== undefined ? key : []; 365 | 366 | if (common === undefined) { 367 | common = this.commonality(left, right); 368 | } 369 | 370 | if (common) { 371 | var ks = this.computeKeysets(left, right); 372 | for (idx = 0; idx < ks[0].length; idx++) { 373 | okey = ks[0][idx]; 374 | if (left[okey] !== right[okey]) { 375 | out.push([key.concat([okey]), right[okey]]); 376 | } 377 | } 378 | for (idx = 0; idx < ks[1].length; idx++) { 379 | okey = ks[1][idx]; 380 | out.push([key.concat([okey])]); 381 | } 382 | for (idx = 0; idx < ks[2].length; idx++) { 383 | okey = ks[2][idx]; 384 | out.push([key.concat([okey]), right[okey]]); 385 | } 386 | return out; 387 | } 388 | if (! this.isStrictlyEqual(left, right)) { 389 | return [[key, right]]; 390 | } 391 | return []; 392 | }, 393 | 394 | keysetDiff: function(left, right, minimal, key) { 395 | /* Compute a diff between left and right, without treating 396 | * arrays differently from objects. */ 397 | minimal = minimal !== undefined ? minimal : true; 398 | var out = [], k; 399 | var ks = this.computeKeysets(left, right); 400 | for (k = 0; k < ks[1].length; k++) { 401 | out.push([key.concat(ks[1][k])]); 402 | } 403 | for (k = 0; k < ks[2].length; k++) { 404 | out.push([key.concat(ks[2][k]), right[ks[2][k]]]); 405 | } 406 | for (k = 0; k < ks[0].length; k++) { 407 | out = out.concat(this.diff(left[ks[0][k]], right[ks[0][k]], 408 | minimal, key.concat([ks[0][k]]))); 409 | } 410 | return out; 411 | }, 412 | 413 | needleDiff: function(left, right, minimal, key) { 414 | /* Compute a diff between left and right. If both are arrays, 415 | * a variant of Needleman-Wunsch sequence alignment is used to 416 | * make the diff minimal (at a significant cost in both 417 | * storage and processing). Otherwise, the parms are passed on 418 | * to keysetDiff.*/ 419 | if (! (left instanceof Array && right instanceof Array)) { 420 | return this.keysetDiff(left, right, minimal, key); 421 | } 422 | minimal = minimal !== undefined ? minimal : true; 423 | var down_col = 0, lastrow = [], i, sub_i, left_i, right_i, col_i; 424 | var row, first_left_i, left_elem, right_elem; 425 | var cand_length, win_length, cand, winner; 426 | 427 | var modify_cand = function () { 428 | if (col_i + 1 < lastrow.length) { 429 | return lastrow[col_i+1].concat( 430 | JSON_delta.diff(left_elem, right_elem, 431 | minimal, key.concat([left_i])) 432 | ); 433 | } 434 | }; 435 | 436 | var delete_cand = function () { 437 | if (row.length > 0) { 438 | return row[0].concat([[key.concat([left_i])]]); 439 | } 440 | }; 441 | 442 | var append_cand = function () { 443 | if (col_i === down_col) { 444 | return lastrow[col_i].concat( 445 | [[key.concat([JSON_delta.appendKey(lastrow[col_i], left, key)]), 446 | right_elem]] 447 | ); 448 | } 449 | }; 450 | 451 | var insert_cand = function () { 452 | if (col_i !== down_col) { 453 | return lastrow[col_i].concat( 454 | [[key.concat([right_i]), right_elem, "i"]] 455 | ); 456 | } 457 | }; 458 | 459 | var cand_funcs = [modify_cand, delete_cand, append_cand, insert_cand]; 460 | 461 | for (i = 0; i <= left.length; i++) { 462 | lastrow.unshift([]); 463 | for (sub_i = 0; sub_i < i; sub_i++) { 464 | lastrow[0].push([key.concat([sub_i])]); 465 | } 466 | } 467 | 468 | for (right_i = 0; right_i < right.length; right_i++) { 469 | right_elem = right[right_i]; 470 | row = [] 471 | for (left_i = 0; left_i < left.length; left_i++) { 472 | left_elem = left[left_i]; 473 | col_i = left.length - left_i - 1; 474 | win_length = Infinity; 475 | for (i = 0; i < cand_funcs.length; i++) { 476 | cand = cand_funcs[i](); 477 | if (cand !== undefined) { 478 | cand_length = JSON.stringify(cand).length; 479 | if (cand_length < win_length) { 480 | winner = cand; 481 | win_length = cand_length; 482 | } 483 | } 484 | } 485 | row.unshift(winner); 486 | } 487 | lastrow = row; 488 | } 489 | return winner; 490 | }, 491 | 492 | patchStanza: function(struc, diff) { 493 | /* Applies the diff stanza diff to the structure struc. 494 | Returns the modified structure. */ 495 | var key = diff[0]; 496 | switch (key.length) { 497 | case 0: 498 | struc = diff[1]; 499 | break; 500 | case 1: 501 | if (diff.length === 1) { 502 | if (struc.splice === undefined) { 503 | delete struc[key[0]]; 504 | } 505 | else { 506 | struc.splice(key[0], 1); 507 | } 508 | } else if (diff.length === 3) { 509 | if (struc.splice === undefined) { 510 | struc[key[0]] = diff[1]; 511 | } else { 512 | struc.splice(key[0], 0, diff[1]); 513 | } 514 | } 515 | else { 516 | struc[key[0]] = diff[1]; 517 | } 518 | break; 519 | default: 520 | var pass_key = key.slice(1), pass_struc = struc[key[0]]; 521 | var pass_diff = [pass_key].concat(diff.slice(1)); 522 | if (pass_struc === undefined) { 523 | if (typeof pass_key[0] === 'string') { 524 | pass_struc = {}; 525 | } else { 526 | pass_struc = []; 527 | } 528 | } 529 | struc[key[0]] = this.patchStanza(pass_struc, pass_diff); 530 | } 531 | return struc; 532 | } 533 | }; 534 | -------------------------------------------------------------------------------- /app/webpack.config.js: -------------------------------------------------------------------------------- 1 | var path = require('path'), 2 | webpack = require('webpack'), 3 | pkg = require('./package.json'), 4 | DEBUG = process.env.NODE_ENV !== 'production', 5 | entry = [ 6 | './src/app.js', 7 | ] 8 | 9 | module.exports = { 10 | context: path.join(__dirname, './'), 11 | entry: entry, 12 | target: 'web', 13 | devtool: DEBUG ? 'inline-source-map' : false, 14 | output: { 15 | library: 'App', 16 | path: path.resolve(pkg.config.buildDir), 17 | publicPath: DEBUG ? '/' : './', 18 | filename: DEBUG ? 'app.js' : 'app-[hash].js' 19 | }, 20 | node: { 21 | fs: 'empty' 22 | }, 23 | plugins: [ 24 | new webpack.HotModuleReplacementPlugin(), 25 | new webpack.LoaderOptionsPlugin({ 26 | debug: DEBUG 27 | }) 28 | ], 29 | module: { 30 | rules: [ 31 | {enforce: 'pre', test: /\.js$/, loader: 'eslint-loader', exclude: /node_modules/}, 32 | {test: /\.js$/, exclude: /node_modules/, loader: 'babel-loader', query: {plugins: ['transform-runtime'], presets: ['es2015']}}, 33 | {test: /\.html$/, exclude: /node_modules/, loader: 'file-loader?name=[path][name].[ext]'}, 34 | {test: /\.jpe?g$|\.svg$|\.png$/, exclude: /node_modules/, loader: 'file-loader?name=[path][name].[ext]'}, 35 | {test: /\.json$/, exclude: /node_modules/, loader: 'json'}, 36 | {test: /\.(otf|eot|svg|ttf|woff|woff2)(\?v=\d+\.\d+\.\d+)?$/, loader: 'url?limit=8192&mimetype=application/font-woff'}, 37 | {test: /\.json$/, include: path.join(__dirname, 'node_modules', 'pixi.js'), loader: 'json'}, 38 | {enforce: 'post', include: path.resolve(__dirname, 'node_modules/pixi.js'), loader: 'transform-loader?brfs'} 39 | ] 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /deploy/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | application: kube-ops-view 6 | component: frontend 7 | name: kube-ops-view 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | application: kube-ops-view 13 | component: frontend 14 | template: 15 | metadata: 16 | labels: 17 | application: kube-ops-view 18 | component: frontend 19 | spec: 20 | serviceAccountName: kube-ops-view 21 | containers: 22 | - name: service 23 | # see https://github.com/hjacobs/kube-ops-view/releases 24 | image: hjacobs/kube-ops-view:20.4.0 25 | args: 26 | # remove this option to use built-in memory store 27 | - --redis-url=redis://kube-ops-view-redis:6379 28 | # example to add external links for nodes and pods 29 | # - --node-link-url-template=https://kube-web-view.example.org/clusters/{cluster}/nodes/{name} 30 | # - --pod-link-url-template=https://kube-web-view.example.org/clusters/{cluster}/namespaces/{namespace}/pods/{name} 31 | ports: 32 | - containerPort: 8080 33 | protocol: TCP 34 | readinessProbe: 35 | httpGet: 36 | path: /health 37 | port: 8080 38 | initialDelaySeconds: 5 39 | timeoutSeconds: 1 40 | resources: 41 | limits: 42 | cpu: 200m 43 | memory: 100Mi 44 | requests: 45 | cpu: 50m 46 | memory: 50Mi 47 | securityContext: 48 | readOnlyRootFilesystem: true 49 | runAsNonRoot: true 50 | runAsUser: 1000 51 | -------------------------------------------------------------------------------- /deploy/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - deployment.yaml 5 | - rbac.yaml 6 | - service.yaml 7 | - redis-deployment.yaml 8 | - redis-service.yaml 9 | -------------------------------------------------------------------------------- /deploy/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: kube-ops-view 6 | --- 7 | kind: ClusterRole 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | metadata: 10 | name: kube-ops-view 11 | rules: 12 | - apiGroups: [""] 13 | resources: ["nodes", "pods"] 14 | verbs: 15 | - list 16 | - apiGroups: ["metrics.k8s.io"] 17 | resources: ["nodes", "pods"] 18 | verbs: 19 | - get 20 | - list 21 | --- 22 | kind: ClusterRoleBinding 23 | apiVersion: rbac.authorization.k8s.io/v1 24 | metadata: 25 | name: kube-ops-view 26 | roleRef: 27 | apiGroup: rbac.authorization.k8s.io 28 | kind: ClusterRole 29 | name: kube-ops-view 30 | subjects: 31 | - kind: ServiceAccount 32 | name: kube-ops-view 33 | namespace: default 34 | -------------------------------------------------------------------------------- /deploy/redis-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | application: kube-ops-view 6 | component: redis 7 | name: kube-ops-view-redis 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | application: kube-ops-view 13 | component: redis 14 | template: 15 | metadata: 16 | labels: 17 | application: kube-ops-view 18 | component: redis 19 | spec: 20 | containers: 21 | - name: redis 22 | image: redis:5-alpine 23 | ports: 24 | - containerPort: 6379 25 | protocol: TCP 26 | readinessProbe: 27 | tcpSocket: 28 | port: 6379 29 | resources: 30 | limits: 31 | cpu: 200m 32 | memory: 100Mi 33 | requests: 34 | cpu: 50m 35 | memory: 50Mi 36 | securityContext: 37 | readOnlyRootFilesystem: true 38 | runAsNonRoot: true 39 | # we need to use the "redis" uid 40 | runAsUser: 999 41 | -------------------------------------------------------------------------------- /deploy/redis-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | application: kube-ops-view 6 | component: redis 7 | name: kube-ops-view-redis 8 | spec: 9 | selector: 10 | application: kube-ops-view 11 | component: redis 12 | type: ClusterIP 13 | ports: 14 | - port: 6379 15 | protocol: TCP 16 | targetPort: 6379 17 | -------------------------------------------------------------------------------- /deploy/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | application: kube-ops-view 6 | component: frontend 7 | name: kube-ops-view 8 | spec: 9 | selector: 10 | application: kube-ops-view 11 | component: frontend 12 | type: ClusterIP 13 | ports: 14 | - port: 80 15 | protocol: TCP 16 | targetPort: 8080 17 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | _build/ 2 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = KubernetesOperationalView 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/access-control.rst: -------------------------------------------------------------------------------- 1 | ============== 2 | Access Control 3 | ============== 4 | 5 | Kube Ops View supports protecting the UI via the OAuth Authorization Code Grant flow. 6 | 7 | Relevant configuration settings (environment variables) for OAuth are: 8 | 9 | ``APP_URL`` 10 | The app's own URL, e.g. https://kube-ops-view.example.org. This is used to construct the OAuth 2 redirect URI (callback URL). 11 | ``AUTHORIZE_URL`` 12 | OAuth 2 authorization endpoint URL, e.g. https://oauth2.example.org/authorize 13 | ``ACCESS_TOKEN_URL`` 14 | Token endpoint URL for the OAuth 2 Authorization Code Grant flow, e.g. https://oauth2.example.org/token 15 | ``SCOPE`` 16 | OAuth 2 scopes provide a way to limit the amount of access that is granted to an access token, e.g. https://oauth2.example.org/authorize/readonly 17 | ``CREDENTIALS_DIR`` 18 | Folder path to load client credentials from. The folder needs to contain two files: ``authcode-client-id`` and ``authcode-client-secret``. 19 | 20 | GitHub OAuth Example 21 | ==================== 22 | 23 | How to configure Kubernetes Operational View to use GitHub OAuth for access control (example with localhost): 24 | 25 | * create a new GitHub OAuth application and configure ``http://localhost:8080/login/oauth/authorized`` as "Authorization Callback URL". 26 | * create a file ``authcode-client-id`` with the contents of the generated GitHub "Client ID" 27 | * create a file ``authcode-client-secret`` with the contents of the generated GitHub "Client Secret" 28 | * point the ``CREDENTIALS_DIR`` environment variable to a folder with these two files 29 | * start Kubernetes Operational View with ``OAUTHLIB_INSECURE_TRANSPORT=true`` (needed as localhost is not running with SSL/TLS), ``AUTHORIZE_URL=https://github.com/login/oauth/authorize``, and ``ACCESS_TOKEN_URL=https://github.com/login/oauth/access_token`` 30 | 31 | Screen Tokens 32 | ============= 33 | 34 | Screen tokens allow non-human access to the UI to support permanent dashboards on TV screens. 35 | 36 | On your local machine: authenticate via OAuth redirect flow and go to /screen-tokens to create a new token. 37 | Write down the screen token on a piece of paper. 38 | 39 | Go to the TV screen and enter /screen/$TOKEN in the location bar. 40 | 41 | TODO: how do screen tokens work? 42 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Kubernetes Operational View documentation build configuration file, created by 5 | # sphinx-quickstart on Tue Jan 10 21:54:00 2017. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | # import os 20 | # import sys 21 | # sys.path.insert(0, os.path.abspath('.')) 22 | # -- General configuration ------------------------------------------------ 23 | # If your documentation needs a minimal Sphinx version, state it here. 24 | # 25 | # needs_sphinx = '1.0' 26 | # Add any Sphinx extension module names here, as strings. They can be 27 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 28 | # ones. 29 | from typing import List 30 | 31 | extensions: List[str] = [] 32 | 33 | # Add any paths that contain templates here, relative to this directory. 34 | templates_path = ["_templates"] 35 | 36 | # The suffix(es) of source filenames. 37 | # You can specify multiple suffix as a list of string: 38 | # 39 | # source_suffix = ['.rst', '.md'] 40 | source_suffix = ".rst" 41 | 42 | # The master toctree document. 43 | master_doc = "index" 44 | 45 | # General information about the project. 46 | project = "Kubernetes Operational View" 47 | copyright = "2017, Henning Jacobs" 48 | author = "Henning Jacobs" 49 | 50 | # The version info for the project you're documenting, acts as replacement for 51 | # |version| and |release|, also used in various other places throughout the 52 | # built documents. 53 | # 54 | # The short X.Y version. 55 | version = "0.1" 56 | # The full version, including alpha/beta/rc tags. 57 | release = "0.1" 58 | 59 | # The language for content autogenerated by Sphinx. Refer to documentation 60 | # for a list of supported languages. 61 | # 62 | # This is also used if you do content translation via gettext catalogs. 63 | # Usually you set "language" from the command line for these cases. 64 | language = None 65 | 66 | # List of patterns, relative to source directory, that match files and 67 | # directories to ignore when looking for source files. 68 | # This patterns also effect to html_static_path and html_extra_path 69 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 70 | 71 | # The name of the Pygments (syntax highlighting) style to use. 72 | pygments_style = "sphinx" 73 | 74 | # If true, `todo` and `todoList` produce output, else they produce nothing. 75 | todo_include_todos = False 76 | 77 | 78 | # -- Options for HTML output ---------------------------------------------- 79 | 80 | # The theme to use for HTML and HTML Help pages. See the documentation for 81 | # a list of builtin themes. 82 | # 83 | try: 84 | import sphinx_rtd_theme 85 | 86 | html_theme = "sphinx_rtd_theme" 87 | 88 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 89 | except Exception: 90 | # Sphinx Theme not found, ignore! 91 | pass 92 | 93 | # Theme options are theme-specific and customize the look and feel of a theme 94 | # further. For a list of options available for each theme, see the 95 | # documentation. 96 | # 97 | # html_theme_options = {} 98 | 99 | # Add any paths that contain custom static files (such as style sheets) here, 100 | # relative to this directory. They are copied after the builtin static files, 101 | # so a file named "default.css" will overwrite the builtin "default.css". 102 | html_static_path = ["_static"] 103 | -------------------------------------------------------------------------------- /docs/getting-started.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | Getting Started 3 | =============== 4 | 5 | You can find example Kubernetes manifests for deployment in the deploy folder. It should be as simple as: 6 | 7 | .. code-block:: bash 8 | 9 | $ git clone git@github.com:hjacobs/kube-ops-view.git 10 | $ kubectl apply -f kube-ops-view/deploy 11 | 12 | Afterwards you can open "kube-ops-view" via the kubectl proxy: 13 | 14 | .. code-block:: bash 15 | 16 | $ kubectl proxy 17 | 18 | Now direct your browser to http://localhost:8001/api/v1/proxy/namespaces/default/services/kube-ops-view/ 19 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Kubernetes Operational View documentation master file, created by 2 | sphinx-quickstart on Tue Jan 10 21:54:00 2017. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Kubernetes Operational View's documentation! 7 | ======================================================= 8 | 9 | The goal of Kubernetes Operational View is to provide a common operational picture for multiple Kubernetes clusters. 10 | 11 | GitHub repository: https://github.com/hjacobs/kube-ops-view 12 | 13 | .. toctree:: 14 | :maxdepth: 2 15 | :caption: Contents: 16 | 17 | getting-started 18 | user-guide 19 | multiple-clusters 20 | access-control 21 | ui-options 22 | 23 | 24 | 25 | Indices and tables 26 | ================== 27 | 28 | * :ref:`genindex` 29 | * :ref:`modindex` 30 | * :ref:`search` 31 | -------------------------------------------------------------------------------- /docs/multiple-clusters.rst: -------------------------------------------------------------------------------- 1 | ================= 2 | Multiple Clusters 3 | ================= 4 | 5 | Multiple clusters are supported by either passing a static list of API server URLs, using an existing kubeconfig file or pointing to a Cluster Registry HTTP endpoint. 6 | 7 | Static List of API Server URLs 8 | ============================== 9 | 10 | Set the ``CLUSTERS`` environment variable to a comma separated list of Kubernetes API server URLs. 11 | 12 | These can either be unprotected ``localhost`` URLs or OAuth 2 protected API endpoints. 13 | 14 | The needed OAuth credentials (``Bearer`` access token) must be provided via a file ``${CREDENTIALS_DIR}/read-only-token-secret``. 15 | 16 | 17 | Kubeconfig File 18 | =============== 19 | 20 | The `kubeconfig file`_ allows defining multiple cluster contexts with potential different authentication mechanisms. 21 | 22 | Kubernetes Operational View will try to reach all defined contexts when given the ``--kubeconfig-path`` command line option (or ``KUBECONFIG_PATH`` environment variable). 23 | 24 | Example: 25 | 26 | Assuming ``~/.kube/config`` as the following contents with two defined contexts: 27 | 28 | .. code-block:: yaml 29 | 30 | apiVersion: v1 31 | kind: Config 32 | clusters: 33 | - cluster: {server: 'https://kube.foo.example.org'} 34 | name: kube_foo_example_org 35 | - cluster: {server: 'https://kube.bar.example.org'} 36 | name: kube_bar_example_org 37 | contexts: 38 | - context: {cluster: kube_foo_example_org, user: kube_foo_example_org} 39 | name: foo 40 | - context: {cluster: kube_bar_example_org, user: kube_bar_example_org} 41 | name: bar 42 | current-context: kube_foo_example_org 43 | users: 44 | - name: kube_foo_example_org 45 | user: {token: myfootoken123} 46 | - name: kube_bar_example_org 47 | user: {token: mybartoken456} 48 | 49 | Kubernetes Operational View would try to reach both endpoints with the respective token for authentication: 50 | 51 | .. code-block:: bash 52 | 53 | $ # note that we need to mount the local ~/.kube/config file into the Docker container 54 | $ docker run -it --net=host -v ~/.kube:/kube hjacobs/kube-ops-view --kubeconfig-path=/kube/config 55 | 56 | .. Note:: 57 | 58 | You need to make sure that the Docker container has access to any required SSL certificate files. 59 | `Minikube`_ by default will use certificates in ``~/.minikube``. You can copy them to ``~/.kube`` and make the paths in ``~/.kube/config`` relative. 60 | 61 | The following command should work out of the box with Minikube: 62 | 63 | .. code-block:: bash 64 | 65 | $ docker run -it --net=host -v ~/.kube:/kube -v ~/.minikube:$HOME/.minikube hjacobs/kube-ops-view --kubeconfig-path=/kube/config 66 | 67 | You can select which clusters should be queried by specifying a list of kubeconfig contexts with the ``--kubeconfig-contexts`` option: 68 | 69 | .. code-block:: bash 70 | 71 | $ docker run -it --net=host -v ~/.kube:/kube hjacobs/kube-ops-view --kubeconfig-path=/kube/config --kubeconfig-contexts=bar 72 | 73 | This would only query the Kubernetes cluster defined by the ``bar`` context. 74 | 75 | 76 | Cluster Registry 77 | ================ 78 | 79 | Clusters can be dynamically discovered by providing one HTTP endpoint as the cluster registry. 80 | Set either the ``CLUSTER_REGISTRY_URL`` environment variable or the ``--cluster-registry-url`` option to an URL conforming to: 81 | 82 | .. code-block:: bash 83 | 84 | $ curl -H 'Authorization: Bearer mytoken' $CLUSTER_REGISTRY_URL/kubernetes-clusters 85 | { 86 | "items": [ 87 | { 88 | "id": "my-cluster-id", 89 | "api_server_url": "https://my-cluster.example.org" 90 | } 91 | ] 92 | } 93 | 94 | The cluster registry will be queried with an OAuth Bearer token, the token can be statically set via the ``OAUTH2_ACCESS_TOKENS`` environment variable. 95 | Example: 96 | 97 | .. code-block:: bash 98 | 99 | $ token=mysecrettoken 100 | $ docker run -it -p 8080:8080 -e OAUTH2_ACCESS_TOKENS=read-only=$token hjacobs/kube-ops-view --cluster-registry-url=https://cluster-registry.example.org 101 | 102 | Otherwise the needed OAuth credentials (``Bearer`` access token) must be provided via a file ``${CREDENTIALS_DIR}/read-only-token-secret``. 103 | You can pass this file by mounting a secret like: 104 | 105 | .. code-block:: yaml 106 | 107 | apiVersion: v1 108 | kind: Secret 109 | metadata: 110 | name: kube-ops-view-credentials 111 | type: Opaque 112 | data: 113 | read-only-token-type: Bearer 114 | read-only-token-secret: dXNlcjpwYXNzCg== # base64 encoded token 115 | 116 | The deployment manifest to mount the above secret: 117 | 118 | .. code-block:: yaml 119 | 120 | apiVersion: extensions/v1beta1 121 | kind: Deployment 122 | metadata: 123 | name: kube-ops-view 124 | spec: 125 | replicas: 1 126 | template: 127 | metadata: 128 | labels: 129 | app: kube-ops-view 130 | spec: 131 | containers: 132 | - name: kube-ops-view 133 | image: hjacobs/kube-ops-view:latest 134 | env: 135 | - name: CLUSTER_REGISTRY_URL 136 | value: "https://cluster-registry.example.org" 137 | - name: CREDENTIALS_DIR 138 | value: "/meta/credentials" 139 | ports: 140 | - containerPort: 8080 141 | protocol: TCP 142 | volumeMounts: 143 | - name: credentials 144 | mountPath: /meta/credentials 145 | readOnly: true 146 | volumes: 147 | - name: credentials 148 | secret: 149 | secretName: kube-ops-view-credentials 150 | 151 | 152 | .. _kubeconfig file: https://kubernetes.io/docs/user-guide/kubeconfig-file/ 153 | .. _Minikube: https://github.com/kubernetes/minikube 154 | -------------------------------------------------------------------------------- /docs/ui-options.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | UI Options 3 | ========== 4 | 5 | Kubernetes Operational View has a few options to change the UI behavior. 6 | All these options are passed in the URL's fragment identifier (starting with ``#``) in the format of key/value pairs separated by semicolons. 7 | 8 | Example URL: ``https://kube-ops-view.example.org/#dashboard=true;reload=600`` 9 | 10 | 11 | ``clusters`` 12 | Comma separated list of cluster IDs to show. 13 | ``dashboard`` 14 | Enable dashboard mode which hides the menu bar. 15 | ``reload`` 16 | Reload the whole page after X seconds. This is useful for unattended TV screens running 24x7 to mitigate JavaScript memory leaks and browser crashes. 17 | ``renderer`` 18 | Forces the fallback canvas renderer (instead of WebGL) when set to "canvas". 19 | ``scale`` 20 | Set the initial view scale (``1.0`` is 100%). 21 | -------------------------------------------------------------------------------- /docs/user-guide.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | User's Guide 3 | ============ 4 | 5 | Pod Status 6 | ========== 7 | 8 | Each pod indicates its status by color and animation: 9 | 10 | * Running and all containers ready: constant green 11 | * Running and not all containers ready: flashing green 12 | * Pending or ContainerCreating: flashing yellow 13 | * ImagePullBackoff or CrashLoopBackoff: flashing red 14 | * Succeeded (for jobs): blue 15 | 16 | 17 | Tooltips 18 | ======== 19 | 20 | Various UI elements provide additional tooltip information when hovering over them with the mouse: 21 | 22 | * Hovering over the title bar of a node box reveals the node's labels. 23 | * Hovering over the vertical resource bars will show the node's capacity, sum of all resource requests and current resource usage. 24 | * Hovering over a pod will show the pod's labels, container status and resources. 25 | 26 | 27 | Filtering Pods 28 | ====================== 29 | 30 | Kubernetes Operational View allows you to quickly find your running application pods. 31 | 32 | Typing characters will run the filter, i.e. non-matching pods will be greyed out. 33 | 34 | You can filter by: 35 | 36 | * name 37 | * labels - when query includes ``=``, e.g. ``env=prod`` 38 | * namespace - when query starts with ``namespace``, e.g. ``namespace=default`` 39 | 40 | The pod filter is persisted in the location bar (``#q=..`` query parameter) which allows to conveniently send the filtered view to other users (e.g. for troubleshooting). 41 | 42 | 43 | Sorting Pods 44 | ============ 45 | 46 | Pods can be sorted by different properties: 47 | 48 | * pod name (this is the default) 49 | * age (start time) 50 | * memory usage (metric collected from Heapster) 51 | * CPU usage (metric collected from Heapster) 52 | 53 | Sorting by memory or CPU allows finding the most resource hungry pod (per node). 54 | 55 | 56 | Filtering Clusters 57 | ================== 58 | 59 | Clicking on a cluster handle (the top bar of the cluster box) will toggle between showing the single cluster alone and all clusters. 60 | 61 | 62 | Themes 63 | ====== 64 | 65 | The top menu bar allows selecting an UI color theme matching your taste. The theme selection will be saved in the browser's Local Storage. 66 | -------------------------------------------------------------------------------- /examples/unassigned-pod.yaml: -------------------------------------------------------------------------------- 1 | # example Pod which cannot be assigned to any node 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: unassigned-test 6 | spec: 7 | # use a node selector which will never match.. 8 | nodeSelector: 9 | stuff: x 10 | containers: 11 | - name: test 12 | image: foo 13 | -------------------------------------------------------------------------------- /kube-ops-view-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hjacobs/kube-ops-view/28f93ed4c2b66e395607ad28d63fd83df2f3e413/kube-ops-view-logo.png -------------------------------------------------------------------------------- /kube-ops-view-logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 23 | Kubernetes Operational View 25 | 27 | 30 | 34 | 38 | 39 | 50 | 51 | 73 | 76 | 77 | 79 | 80 | 82 | image/svg+xml 83 | 85 | Kubernetes Operational View 86 | 87 | 88 | hjacobs 89 | 90 | 91 | 92 | 93 | hjacobs 94 | 95 | 96 | 97 | 98 | 99 | 104 | 111 | 118 | 125 | 132 | 139 | 146 | 153 | 160 | 167 | 174 | 181 | 182 | 183 | -------------------------------------------------------------------------------- /kube_ops_view/__init__.py: -------------------------------------------------------------------------------- 1 | # This version is replaced during release process. 2 | __version__ = "2017.0.dev1" 3 | -------------------------------------------------------------------------------- /kube_ops_view/__main__.py: -------------------------------------------------------------------------------- 1 | from .main import main 2 | 3 | main() 4 | -------------------------------------------------------------------------------- /kube_ops_view/backoff.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | 4 | def expo(n: int, base=2, factor=1, max_value=None): 5 | """Exponential decay. 6 | 7 | Adapted from https://github.com/litl/backoff/blob/master/backoff.py (MIT License) 8 | 9 | Args: 10 | ---- 11 | n: The exponent. 12 | base: The mathematical base of the exponentiation operation 13 | factor: Factor to multiply the exponentation by. 14 | max_value: The maximum value to yield. Once the value in the 15 | true exponential sequence exceeds this, the value 16 | of max_value will forever after be yielded. 17 | 18 | """ 19 | a = factor * base ** n 20 | if max_value is None or a < max_value: 21 | return a 22 | else: 23 | return max_value 24 | 25 | 26 | def random_jitter(value, jitter=1): 27 | """Jitter the value a random number of milliseconds. 28 | 29 | Copied from https://github.com/litl/backoff/blob/master/backoff.py (MIT License) 30 | 31 | This adds up to 1 second of additional time to the original value. 32 | Prior to backoff version 1.2 this was the default jitter behavior. 33 | 34 | Args: 35 | ---- 36 | value: The unadulterated backoff value. 37 | jitter: Jitter amount. 38 | 39 | """ 40 | return value + random.uniform(0, jitter) 41 | 42 | 43 | def full_jitter(value): 44 | """Jitter the value across the full range (0 to value). 45 | 46 | Copied from https://github.com/litl/backoff/blob/master/backoff.py (MIT License) 47 | 48 | This corresponds to the "Full Jitter" algorithm specified in the 49 | AWS blog's post on the performance of various jitter algorithms. 50 | (http://www.awsarchitectureblog.com/2015/03/backoff.html) 51 | 52 | Args: 53 | ---- 54 | value: The unadulterated backoff value. 55 | 56 | """ 57 | return random.uniform(0, value) 58 | -------------------------------------------------------------------------------- /kube_ops_view/cluster_discovery.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | import time 4 | from pathlib import Path 5 | from typing import List 6 | from urllib.parse import urljoin 7 | 8 | import requests 9 | import tokens 10 | from pykube import HTTPClient 11 | from pykube import KubeConfig 12 | from requests.auth import AuthBase 13 | 14 | # default URL points to kubectl proxy 15 | DEFAULT_CLUSTERS = "http://localhost:8001/" 16 | CLUSTER_ID_INVALID_CHARS = re.compile("[^a-z0-9:-]") 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | tokens.configure(from_file_only=True) 21 | 22 | 23 | def generate_cluster_id(url: str): 24 | """Generate some "cluster ID" from given API server URL.""" 25 | for prefix in ("https://", "http://"): 26 | if url.startswith(prefix): 27 | url = url[len(prefix) :] 28 | return CLUSTER_ID_INVALID_CHARS.sub("-", url.lower()).strip("-") 29 | 30 | 31 | class StaticAuthorizationHeaderAuth(AuthBase): 32 | 33 | """Static authentication with given "Authorization" header.""" 34 | 35 | def __init__(self, authorization): 36 | self.authorization = authorization 37 | 38 | def __call__(self, request): 39 | request.headers["Authorization"] = self.authorization 40 | return request 41 | 42 | 43 | class OAuthTokenAuth(AuthBase): 44 | 45 | """Dynamic authentication using the "tokens" library to load OAuth tokens from file (potentially mounted from a Kubernetes secret).""" 46 | 47 | def __init__(self, token_name): 48 | self.token_name = token_name 49 | tokens.manage(token_name) 50 | 51 | def __call__(self, request): 52 | token = tokens.get(self.token_name) 53 | request.headers["Authorization"] = f"Bearer {token}" 54 | return request 55 | 56 | 57 | class Cluster: 58 | def __init__(self, id: str, name: str, api_server_url: str, client: HTTPClient): 59 | self.id = id 60 | self.name = name 61 | self.api_server_url = api_server_url 62 | self.client = client 63 | 64 | 65 | class StaticClusterDiscoverer: 66 | def __init__(self, api_server_urls: list): 67 | self._clusters = [] 68 | 69 | if not api_server_urls: 70 | try: 71 | config = KubeConfig.from_service_account() 72 | except FileNotFoundError: 73 | # we are not running inside a cluster 74 | # => assume default kubectl proxy URL 75 | config = KubeConfig.from_url(DEFAULT_CLUSTERS) 76 | client = HTTPClient(config) 77 | cluster = Cluster( 78 | generate_cluster_id(DEFAULT_CLUSTERS), 79 | "cluster", 80 | DEFAULT_CLUSTERS, 81 | client, 82 | ) 83 | else: 84 | client = HTTPClient(config) 85 | cluster = Cluster( 86 | generate_cluster_id(config.cluster["server"]), 87 | "cluster", 88 | config.cluster["server"], 89 | client, 90 | ) 91 | self._clusters.append(cluster) 92 | else: 93 | for api_server_url in api_server_urls: 94 | config = KubeConfig.from_url(api_server_url) 95 | client = HTTPClient(config) 96 | generated_id = generate_cluster_id(api_server_url) 97 | self._clusters.append( 98 | Cluster(generated_id, generated_id, api_server_url, client) 99 | ) 100 | 101 | def get_clusters(self): 102 | return self._clusters 103 | 104 | 105 | class ClusterRegistryDiscoverer: 106 | def __init__(self, cluster_registry_url: str, cache_lifetime=60): 107 | self._url = cluster_registry_url 108 | self._cache_lifetime = cache_lifetime 109 | self._last_cache_refresh = 0 110 | self._clusters: List[Cluster] = [] 111 | self._session = requests.Session() 112 | self._session.auth = OAuthTokenAuth("read-only") 113 | 114 | def refresh(self): 115 | try: 116 | response = self._session.get( 117 | urljoin(self._url, "/kubernetes-clusters"), timeout=10 118 | ) 119 | response.raise_for_status() 120 | clusters = [] 121 | for row in response.json()["items"]: 122 | # only consider "ready" clusters 123 | if row.get("lifecycle_status", "ready") == "ready": 124 | config = KubeConfig.from_url(row["api_server_url"]) 125 | client = HTTPClient(config) 126 | client.session.auth = OAuthTokenAuth("read-only") 127 | clusters.append( 128 | Cluster(row["id"], row["alias"], row["api_server_url"], client) 129 | ) 130 | self._clusters = clusters 131 | self._last_cache_refresh = time.time() 132 | except Exception as e: 133 | logger.exception( 134 | f"Failed to refresh from cluster registry {self._url}: {e}" 135 | ) 136 | 137 | def get_clusters(self): 138 | now = time.time() 139 | if now - self._last_cache_refresh > self._cache_lifetime: 140 | self.refresh() 141 | return self._clusters 142 | 143 | 144 | class KubeconfigDiscoverer: 145 | def __init__(self, kubeconfig_path: Path, contexts: set): 146 | self._path = kubeconfig_path 147 | self._contexts = contexts 148 | 149 | def get_clusters(self): 150 | # Kubernetes Python client expects "vintage" string path 151 | config_file = str(self._path) 152 | config = KubeConfig.from_file(config_file) 153 | for context in config.contexts: 154 | if self._contexts and context not in self._contexts: 155 | # filter out 156 | continue 157 | # create a new KubeConfig with new "current context" 158 | context_config = KubeConfig(config.doc, context) 159 | client = HTTPClient(context_config) 160 | cluster = Cluster( 161 | context, context, context_config.cluster["server"], client 162 | ) 163 | yield cluster 164 | 165 | 166 | class MockDiscoverer: 167 | def get_clusters(self): 168 | for i in range(3): 169 | yield Cluster( 170 | f"mock-cluster-{i}", 171 | f"mock-cluster-{i}", 172 | api_server_url=f"https://kube-{i}.example.org", 173 | client=None, 174 | ) 175 | -------------------------------------------------------------------------------- /kube_ops_view/kubernetes.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | import time 4 | 5 | import pykube 6 | import requests 7 | from pykube import Node 8 | from pykube import Pod 9 | from pykube.objects import APIObject 10 | from pykube.objects import NamespacedAPIObject 11 | 12 | from .utils import get_short_error_message 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | session = requests.Session() 17 | 18 | 19 | # https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/resource-metrics-api.md 20 | class NodeMetrics(APIObject): 21 | 22 | version = "metrics.k8s.io/v1beta1" 23 | endpoint = "nodes" 24 | kind = "NodeMetrics" 25 | 26 | 27 | # https://github.com/kubernetes/community/blob/master/contributors/design-proposals/instrumentation/resource-metrics-api.md 28 | class PodMetrics(NamespacedAPIObject): 29 | 30 | version = "metrics.k8s.io/v1beta1" 31 | endpoint = "pods" 32 | kind = "PodMetrics" 33 | 34 | 35 | def map_node_status(status: dict): 36 | return { 37 | "addresses": status.get("addresses"), 38 | "capacity": status.get("capacity"), 39 | "allocatable": status.get("allocatable"), 40 | } 41 | 42 | 43 | def map_node(node: dict): 44 | return { 45 | "name": node["metadata"]["name"], 46 | "labels": node["metadata"]["labels"], 47 | "status": map_node_status(node["status"]), 48 | "pods": {}, 49 | } 50 | 51 | 52 | def map_pod(pod: dict): 53 | return { 54 | "name": pod["metadata"]["name"], 55 | "namespace": pod["metadata"]["namespace"], 56 | "labels": pod["metadata"].get("labels", {}), 57 | "phase": pod["status"].get("phase"), 58 | "startTime": pod["status"]["startTime"] if "startTime" in pod["status"] else "", 59 | "containers": [], 60 | } 61 | 62 | 63 | def map_container(cont: dict, pod: dict): 64 | obj = {"name": cont["name"], "image": cont["image"], "resources": cont["resources"]} 65 | status = list( 66 | [ 67 | s 68 | for s in pod.get("status", {}).get("containerStatuses", []) 69 | if s["name"] == cont["name"] 70 | ] 71 | ) 72 | if status: 73 | obj.update(**status[0]) 74 | return obj 75 | 76 | 77 | def parse_time(s: str): 78 | return ( 79 | datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") 80 | .replace(tzinfo=datetime.timezone.utc) 81 | .timestamp() 82 | ) 83 | 84 | 85 | def query_kubernetes_cluster(cluster): 86 | cluster_id = cluster.id 87 | api_server_url = cluster.api_server_url 88 | nodes = {} 89 | pods_by_namespace_name = {} 90 | unassigned_pods = {} 91 | for node in Node.objects(cluster.client): 92 | obj = map_node(node.obj) 93 | nodes[obj["name"]] = obj 94 | now = time.time() 95 | for pod in Pod.objects(cluster.client, namespace=pykube.all): 96 | obj = map_pod(pod.obj) 97 | if "deletionTimestamp" in pod.metadata: 98 | obj["deleted"] = parse_time(pod.metadata["deletionTimestamp"]) 99 | for cont in pod.obj["spec"]["containers"]: 100 | obj["containers"].append(map_container(cont, pod.obj)) 101 | if obj["phase"] in ("Succeeded", "Failed"): 102 | last_termination_time = 0 103 | for container in obj["containers"]: 104 | termination_time = ( 105 | container.get("state", {}).get("terminated", {}).get("finishedAt") 106 | ) 107 | if termination_time: 108 | termination_time = parse_time(termination_time) 109 | if termination_time > last_termination_time: 110 | last_termination_time = termination_time 111 | if (last_termination_time and last_termination_time < now - 3600) or ( 112 | obj.get("reason") == "Evicted" 113 | ): 114 | # the job/pod finished more than an hour ago or if it is evicted by cgroup limits 115 | # => filter out 116 | continue 117 | pods_by_namespace_name[(pod.namespace, pod.name)] = obj 118 | pod_key = f"{pod.namespace}/{pod.name}" 119 | node_name = pod.obj["spec"].get("nodeName") 120 | if node_name in nodes: 121 | nodes[node_name]["pods"][pod_key] = obj 122 | else: 123 | unassigned_pods[pod_key] = obj 124 | 125 | try: 126 | for node_metrics in NodeMetrics.objects(cluster.client): 127 | key = node_metrics.name 128 | nodes[key]["usage"] = node_metrics.obj.get("usage", {}) 129 | except Exception as e: 130 | logger.warning( 131 | "Failed to query node metrics {}: {}".format( 132 | cluster.id, get_short_error_message(e) 133 | ) 134 | ) 135 | try: 136 | for pod_metrics in PodMetrics.objects(cluster.client, namespace=pykube.all): 137 | key = (pod_metrics.namespace, pod_metrics.name) 138 | pod = pods_by_namespace_name.get(key) 139 | if pod: 140 | for container in pod["containers"]: 141 | for container_metrics in pod_metrics.obj.get("containers", []): 142 | if container["name"] == container_metrics["name"]: 143 | container["resources"]["usage"] = container_metrics["usage"] 144 | except Exception as e: 145 | logger.warning( 146 | "Failed to query pod metrics for cluster {}: {}".format( 147 | cluster.id, get_short_error_message(e) 148 | ) 149 | ) 150 | return { 151 | "id": cluster_id, 152 | "api_server_url": api_server_url, 153 | "nodes": nodes, 154 | "unassigned_pods": unassigned_pods, 155 | } 156 | -------------------------------------------------------------------------------- /kube_ops_view/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import gevent.monkey 3 | 4 | gevent.monkey.patch_all() 5 | 6 | import click 7 | import flask 8 | import functools 9 | import gevent 10 | import gevent.pywsgi 11 | import json 12 | import logging 13 | import os 14 | import signal 15 | import time 16 | import kube_ops_view 17 | from typing import Union 18 | from pathlib import Path 19 | 20 | from flask import Flask, redirect, url_for 21 | from .oauth import OAuth2ConsumerBlueprintWithClientRefresh 22 | 23 | from .mock import query_mock_cluster 24 | from .kubernetes import query_kubernetes_cluster 25 | from .stores import MemoryStore, RedisStore 26 | from .cluster_discovery import ( 27 | DEFAULT_CLUSTERS, 28 | StaticClusterDiscoverer, 29 | ClusterRegistryDiscoverer, 30 | KubeconfigDiscoverer, 31 | MockDiscoverer, 32 | ) 33 | from .update import update_clusters 34 | 35 | 36 | logger = logging.getLogger(__name__) 37 | 38 | SERVER_STATUS = {"shutdown": False} 39 | AUTHORIZE_URL = os.getenv("AUTHORIZE_URL") 40 | ACCESS_TOKEN_URL = os.getenv("ACCESS_TOKEN_URL") 41 | SCOPE = os.getenv("SCOPE") 42 | 43 | app = Flask(__name__) 44 | 45 | oauth_blueprint = OAuth2ConsumerBlueprintWithClientRefresh( 46 | "oauth", 47 | __name__, 48 | authorization_url=AUTHORIZE_URL, 49 | token_url=ACCESS_TOKEN_URL, 50 | scope=SCOPE, 51 | ) 52 | app.register_blueprint(oauth_blueprint, url_prefix="/login") 53 | 54 | 55 | def authorize(f): 56 | @functools.wraps(f) 57 | def wrapper(*args, **kwargs): 58 | if ( 59 | AUTHORIZE_URL 60 | and "auth_token" not in flask.session 61 | and not oauth_blueprint.session.authorized 62 | ): 63 | return redirect(url_for("oauth.login")) 64 | return f(*args, **kwargs) 65 | 66 | return wrapper 67 | 68 | 69 | @app.route("/health") 70 | def health(): 71 | if SERVER_STATUS["shutdown"]: 72 | flask.abort(503) 73 | else: 74 | return "OK" 75 | 76 | 77 | @app.route("/") 78 | @authorize 79 | def index(): 80 | static_build_path = Path(__file__).parent / "static" / "build" 81 | candidates = sorted(static_build_path.glob("app*.js")) 82 | if candidates: 83 | app_js = candidates[0].name 84 | if app.debug: 85 | # cache busting for local development 86 | app_js += "?_={}".format(time.time()) 87 | else: 88 | logger.error( 89 | "Could not find JavaScript application bundle app*.js in {}".format( 90 | static_build_path 91 | ) 92 | ) 93 | flask.abort(503, "JavaScript application bundle not found (missing build)") 94 | return flask.render_template( 95 | "index.html", 96 | app_js=app_js, 97 | version=kube_ops_view.__version__, 98 | route_prefix=app.app_config["route_prefix"], 99 | app_config_json=json.dumps(app.app_config), 100 | ) 101 | 102 | 103 | def event(cluster_ids: set): 104 | # first sent full data once 105 | for cluster_id in app.store.get_cluster_ids(): 106 | if not cluster_ids or cluster_id in cluster_ids: 107 | status = app.store.get_cluster_status(cluster_id) 108 | if status: 109 | # send the cluster status including last_query_time BEFORE the cluster data 110 | # so the UI knows how to render correctly from the start 111 | yield "event: clusterstatus\ndata: " + json.dumps( 112 | {"cluster_id": cluster_id, "status": status}, separators=(",", ":") 113 | ) + "\n\n" 114 | cluster = app.store.get_cluster_data(cluster_id) 115 | if cluster: 116 | yield "event: clusterupdate\ndata: " + json.dumps( 117 | cluster, separators=(",", ":") 118 | ) + "\n\n" 119 | yield "event: bootstrapend\ndata: \n\n" 120 | 121 | while True: 122 | for event_type, event_data in app.store.listen(): 123 | # hacky, event_data can be delta or full cluster object 124 | if ( 125 | not cluster_ids 126 | or event_data.get("cluster_id", event_data.get("id")) in cluster_ids 127 | ): 128 | yield "event: " + event_type + "\ndata: " + json.dumps( 129 | event_data, separators=(",", ":") 130 | ) + "\n\n" 131 | 132 | 133 | @app.route("/events") 134 | @authorize 135 | def get_events(): 136 | """SSE (Server Side Events), for an EventSource.""" 137 | cluster_ids = set() 138 | for _id in flask.request.args.get("cluster_ids", "").split(): 139 | if _id: 140 | cluster_ids.add(_id) 141 | return flask.Response( 142 | event(cluster_ids), 143 | mimetype="text/event-stream", 144 | headers={"Cache-Control": "no-cache", "X-Accel-Buffering": "no"}, 145 | ) 146 | 147 | 148 | @app.route("/screen-tokens", methods=["GET", "POST"]) 149 | @authorize 150 | def screen_tokens(): 151 | new_token = None 152 | if flask.request.method == "POST": 153 | new_token = app.store.create_screen_token() 154 | return flask.render_template("screen-tokens.html", new_token=new_token) 155 | 156 | 157 | @app.route("/screen/") 158 | def redeem_screen_token(token: str): 159 | remote_addr = ( 160 | flask.request.headers.get("X-Forwarded-For") or flask.request.remote_addr 161 | ) 162 | logger.info( 163 | 'Trying to redeem screen token "{}" for IP {}..'.format(token, remote_addr) 164 | ) 165 | try: 166 | app.store.redeem_screen_token(token, remote_addr) 167 | except Exception: 168 | flask.abort(401) 169 | flask.session["auth_token"] = (token, "") 170 | return redirect(app.config["APPLICATION_ROOT"]) 171 | 172 | 173 | @app.route("/logout") 174 | def logout(): 175 | flask.session.pop("auth_token", None) 176 | return redirect(app.config["APPLICATION_ROOT"]) 177 | 178 | 179 | def shutdown(): 180 | # just wait some time to give Kubernetes time to update endpoints 181 | # this requires changing the readinessProbe's 182 | # PeriodSeconds and FailureThreshold appropriately 183 | # see https://godoc.org/k8s.io/kubernetes/pkg/api/v1#Probe 184 | gevent.sleep(10) 185 | exit(0) 186 | 187 | 188 | def exit_gracefully(signum, frame): 189 | logger.info("Received TERM signal, shutting down..") 190 | SERVER_STATUS["shutdown"] = True 191 | gevent.spawn(shutdown) 192 | 193 | 194 | def print_version(ctx, param, value): 195 | if not value or ctx.resilient_parsing: 196 | return 197 | click.echo("Kubernetes Operational View {}".format(kube_ops_view.__version__)) 198 | ctx.exit() 199 | 200 | 201 | class CommaSeparatedValues(click.ParamType): 202 | name = "comma_separated_values" 203 | 204 | def convert(self, value, param, ctx): 205 | if isinstance(value, str): 206 | values = filter(None, value.split(",")) 207 | else: 208 | values = value 209 | return values 210 | 211 | 212 | @click.command(context_settings={"help_option_names": ["-h", "--help"]}) 213 | @click.option( 214 | "-V", 215 | "--version", 216 | is_flag=True, 217 | callback=print_version, 218 | expose_value=False, 219 | is_eager=True, 220 | help="Print the current version number and exit.", 221 | ) 222 | @click.option( 223 | "-p", 224 | "--port", 225 | type=int, 226 | help="HTTP port to listen on (default: 8080)", 227 | envvar="SERVER_PORT", 228 | default=8080, 229 | ) 230 | @click.option( 231 | "--route-prefix", 232 | help="""The URL prefix under which kube-ops-view is externally reachable 233 | (for example, if kube-ops-view is served via a reverse proxy). Used for 234 | generating relative and absolute links back to kube-ops-view itself. If the 235 | URL has a path portion, it will be used to prefix all HTTP endpoints served 236 | by kube-ops-view. If omitted, relevant URL components will be derived 237 | automatically.""", 238 | envvar="ROUTE_PREFIX", 239 | default="/", 240 | ) 241 | @click.option( 242 | "-d", "--debug", is_flag=True, help="Run in debugging mode", envvar="DEBUG" 243 | ) 244 | @click.option( 245 | "-m", "--mock", is_flag=True, help="Mock Kubernetes clusters", envvar="MOCK" 246 | ) 247 | @click.option( 248 | "--secret-key", 249 | help="Secret key for session cookies", 250 | envvar="SECRET_KEY", 251 | default="development", 252 | ) 253 | @click.option( 254 | "--redis-url", 255 | help="Redis URL to use for pub/sub and job locking", 256 | envvar="REDIS_URL", 257 | ) 258 | @click.option( 259 | "--clusters", 260 | type=CommaSeparatedValues(), 261 | help="Comma separated list of Kubernetes API server URLs (default: {})".format( 262 | DEFAULT_CLUSTERS 263 | ), 264 | envvar="CLUSTERS", 265 | ) 266 | @click.option( 267 | "--cluster-registry-url", 268 | help="URL to cluster registry", 269 | envvar="CLUSTER_REGISTRY_URL", 270 | ) 271 | @click.option( 272 | "--kubeconfig-path", 273 | type=click.Path(exists=True), 274 | help="Path to kubeconfig file", 275 | envvar="KUBECONFIG_PATH", 276 | ) 277 | @click.option( 278 | "--kubeconfig-contexts", 279 | type=CommaSeparatedValues(), 280 | help="List of kubeconfig contexts to use (default: use all defined contexts)", 281 | envvar="KUBECONFIG_CONTEXTS", 282 | ) 283 | @click.option( 284 | "--query-interval", 285 | type=float, 286 | help="Interval in seconds for querying clusters (default: 5)", 287 | envvar="QUERY_INTERVAL", 288 | default=5, 289 | ) 290 | @click.option( 291 | "--node-link-url-template", 292 | help="Template for target URL when clicking on a Node", 293 | envvar="NODE_LINK_URL_TEMPLATE", 294 | ) 295 | @click.option( 296 | "--pod-link-url-template", 297 | help="Template for target URL when clicking on a Pod", 298 | envvar="POD_LINK_URL_TEMPLATE", 299 | ) 300 | def main( 301 | port, 302 | debug, 303 | mock, 304 | secret_key, 305 | redis_url, 306 | clusters: list, 307 | cluster_registry_url, 308 | kubeconfig_path, 309 | kubeconfig_contexts: list, 310 | query_interval, 311 | node_link_url_template: str, 312 | pod_link_url_template: str, 313 | route_prefix: str, 314 | ): 315 | logging.basicConfig(level=logging.DEBUG if debug else logging.INFO) 316 | 317 | store = RedisStore(redis_url) if redis_url else MemoryStore() 318 | 319 | app.debug = debug 320 | app.secret_key = secret_key 321 | app.store = store 322 | app.config["APPLICATION_ROOT"] = route_prefix 323 | app.app_config = { 324 | "node_link_url_template": node_link_url_template, 325 | "pod_link_url_template": pod_link_url_template, 326 | "route_prefix": route_prefix, 327 | } 328 | 329 | discoverer: Union[ 330 | MockDiscoverer, 331 | ClusterRegistryDiscoverer, 332 | KubeconfigDiscoverer, 333 | StaticClusterDiscoverer, 334 | ] 335 | 336 | if mock: 337 | cluster_query = query_mock_cluster 338 | discoverer = MockDiscoverer() 339 | else: 340 | cluster_query = query_kubernetes_cluster 341 | if cluster_registry_url: 342 | discoverer = ClusterRegistryDiscoverer(cluster_registry_url) 343 | elif kubeconfig_path: 344 | discoverer = KubeconfigDiscoverer( 345 | Path(kubeconfig_path), set(kubeconfig_contexts or []) 346 | ) 347 | else: 348 | api_server_urls = clusters or [] 349 | discoverer = StaticClusterDiscoverer(api_server_urls) 350 | 351 | gevent.spawn( 352 | update_clusters, 353 | cluster_discoverer=discoverer, 354 | query_cluster=cluster_query, 355 | store=store, 356 | query_interval=query_interval, 357 | debug=debug, 358 | ) 359 | 360 | signal.signal(signal.SIGTERM, exit_gracefully) 361 | http_server = gevent.pywsgi.WSGIServer(("0.0.0.0", port), app) 362 | logger.info("Listening on :{}..".format(port)) 363 | http_server.serve_forever() 364 | -------------------------------------------------------------------------------- /kube_ops_view/mock.py: -------------------------------------------------------------------------------- 1 | import random 2 | import string 3 | import time 4 | 5 | 6 | def hash_int(x: int): 7 | x = ((x >> 16) ^ x) * 0x45D9F3B 8 | x = ((x >> 16) ^ x) * 0x45D9F3B 9 | x = (x >> 16) ^ x 10 | return x 11 | 12 | 13 | def generate_mock_pod(index: int, i: int, j: int): 14 | names = [ 15 | "agent-cooper", 16 | "black-lodge", 17 | "bob", 18 | "bobby-briggs", 19 | "laura-palmer", 20 | "leland-palmer", 21 | "log-lady", 22 | "sheriff-truman", 23 | ] 24 | labels = {"env": ["prod", "dev"], "owner": ["x-wing", "iris"]} 25 | pod_phases = ["Pending", "Running", "Running", "Failed"] 26 | 27 | pod_labels = {} 28 | for li, k in enumerate(labels): 29 | v = labels[k] 30 | label_choice = hash_int((index + 1) * (i + 1) * (j + 1) * (li + 1)) % ( 31 | len(v) + 1 32 | ) 33 | if label_choice != 0: 34 | pod_labels[k] = v[label_choice - 1] 35 | 36 | phase = pod_phases[hash_int((index + 1) * (i + 1) * (j + 1)) % len(pod_phases)] 37 | containers = [] 38 | for _ in range(1 + j % 2): 39 | # generate "more real data" 40 | requests_cpu = random.randint(10, 50) 41 | requests_memory = random.randint(64, 256) 42 | # with max, we defend ourselves against negative cpu/memory ;) 43 | usage_cpu = max(requests_cpu + random.randint(-30, 30), 1) 44 | usage_memory = max(requests_memory + random.randint(-64, 128), 1) 45 | container = { 46 | "name": "myapp", 47 | "image": "foo/bar/{}".format(j), 48 | "resources": { 49 | "requests": { 50 | "cpu": f"{requests_cpu}m", 51 | "memory": f"{requests_memory}Mi", 52 | }, 53 | "limits": {}, 54 | "usage": {"cpu": f"{usage_cpu}m", "memory": f"{usage_memory}Mi"}, 55 | }, 56 | "ready": True, 57 | "state": {"running": {}}, 58 | } 59 | if phase == "Running": 60 | if j % 13 == 0: 61 | container.update( 62 | **{ 63 | "ready": False, 64 | "state": {"waiting": {"reason": "CrashLoopBackOff"}}, 65 | } 66 | ) 67 | elif j % 7 == 0: 68 | container.update( 69 | **{"ready": False, "state": {"running": {}}, "restartCount": 3} 70 | ) 71 | elif phase == "Failed": 72 | del container["state"] 73 | del container["ready"] 74 | containers.append(container) 75 | pod = { 76 | "name": "{}-{}-{}".format( 77 | names[hash_int((i + 1) * (j + 1)) % len(names)], i, j 78 | ), 79 | "namespace": "kube-system" if j < 3 else "default", 80 | "labels": pod_labels, 81 | "phase": phase, 82 | "containers": containers, 83 | } 84 | if phase == "Running" and j % 17 == 0: 85 | pod["deleted"] = 123 86 | 87 | return pod 88 | 89 | 90 | def query_mock_cluster(cluster): 91 | """Generate deterministic (no randomness!) mock data.""" 92 | index = int(cluster.id.split("-")[-1]) 93 | nodes = {} 94 | for i in range(10): 95 | # add/remove the second to last node every 13 seconds 96 | if i == 8 and int(time.time() / 13) % 2 == 0: 97 | continue 98 | labels = {} 99 | # only the first two clusters have master nodes 100 | if i < 2 and index < 2: 101 | if index == 0: 102 | labels["kubernetes.io/role"] = "master" 103 | elif index == 1: 104 | labels["node-role.kubernetes.io/master"] = "" 105 | else: 106 | labels["master"] = "true" 107 | pods = {} 108 | for j in range(hash_int((index + 1) * (i + 1)) % 32): 109 | # add/remove some pods every 7 seconds 110 | if j % 17 == 0 and int(time.time() / 7) % 2 == 0: 111 | pass 112 | else: 113 | pod = generate_mock_pod(index, i, j) 114 | pods["{}/{}".format(pod["namespace"], pod["name"])] = pod 115 | 116 | # use data from containers (usage) 117 | usage_cpu = 0 118 | usage_memory = 0 119 | for p in pods.values(): 120 | for c in p["containers"]: 121 | usage_cpu += int(c["resources"]["usage"]["cpu"].split("m")[0]) 122 | usage_memory += int(c["resources"]["usage"]["memory"].split("Mi")[0]) 123 | 124 | # generate longer name for a node 125 | suffix = "".join( 126 | [random.choice(string.ascii_letters) for n in range(random.randint(1, 20))] 127 | ) 128 | 129 | node = { 130 | "name": f"node-{i}-{suffix}", 131 | "labels": labels, 132 | "status": { 133 | "capacity": {"cpu": "8", "memory": "64Gi", "pods": "110"}, 134 | "allocatable": {"cpu": "7800m", "memory": "62Gi"}, 135 | }, 136 | "pods": pods, 137 | # get data from containers (usage) 138 | "usage": {"cpu": f"{usage_cpu}m", "memory": f"{usage_memory}Mi"}, 139 | } 140 | nodes[node["name"]] = node 141 | pod = generate_mock_pod(index, 11, index) 142 | unassigned_pods = {"{}/{}".format(pod["namespace"], pod["name"]): pod} 143 | return { 144 | "id": "mock-cluster-{}".format(index), 145 | "api_server_url": "https://kube-{}.example.org".format(index), 146 | "nodes": nodes, 147 | "unassigned_pods": unassigned_pods, 148 | } 149 | -------------------------------------------------------------------------------- /kube_ops_view/oauth.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from flask_dance.consumer import OAuth2ConsumerBlueprint 4 | 5 | 6 | CREDENTIALS_DIR = os.getenv("CREDENTIALS_DIR", "") 7 | 8 | 9 | class OAuth2ConsumerBlueprintWithClientRefresh(OAuth2ConsumerBlueprint): 10 | 11 | """Same as flask_dance.consumer.OAuth2ConsumerBlueprint, but loads client credentials from file.""" 12 | 13 | def refresh_credentials(self): 14 | with open(os.path.join(CREDENTIALS_DIR, "authcode-client-id")) as fd: 15 | # note that we need to set two attributes because of how OAuth2ConsumerBlueprint works :-/ 16 | self._client_id = self.client_id = fd.read().strip() 17 | with open(os.path.join(CREDENTIALS_DIR, "authcode-client-secret")) as fd: 18 | self.client_secret = fd.read().strip() 19 | 20 | def login(self): 21 | self.refresh_credentials() 22 | return super().login() 23 | -------------------------------------------------------------------------------- /kube_ops_view/static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hjacobs/kube-ops-view/28f93ed4c2b66e395607ad28d63fd83df2f3e413/kube_ops_view/static/favicon.ico -------------------------------------------------------------------------------- /kube_ops_view/static/sharetechmono.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hjacobs/kube-ops-view/28f93ed4c2b66e395607ad28d63fd83df2f3e413/kube_ops_view/static/sharetechmono.woff2 -------------------------------------------------------------------------------- /kube_ops_view/stores.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import random 4 | import string 5 | import time 6 | from abc import ABC 7 | from abc import abstractmethod 8 | from queue import Queue 9 | from typing import Set 10 | 11 | import redis 12 | from redlock import Redlock 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | ONE_YEAR = 3600 * 24 * 365 17 | 18 | 19 | def generate_token(n: int): 20 | """Generate a random ASCII token of length n.""" 21 | # uses os.urandom() 22 | rng = random.SystemRandom() 23 | return "".join([rng.choice(string.ascii_letters + string.digits) for i in range(n)]) 24 | 25 | 26 | def generate_token_data(): 27 | """Generate screen token data for storing.""" 28 | token = generate_token(10) 29 | now = time.time() 30 | return {"token": token, "created": now, "expires": now + ONE_YEAR} 31 | 32 | 33 | def check_token(token: str, remote_addr: str, data: dict): 34 | """Check whether the given screen token is valid, raises exception if not.""" 35 | now = time.time() 36 | if ( 37 | data 38 | and now < data["expires"] 39 | and data.get("remote_addr", remote_addr) == remote_addr 40 | ): 41 | data["remote_addr"] = remote_addr 42 | return data 43 | else: 44 | raise ValueError("Invalid token") 45 | 46 | 47 | class AbstractStore(ABC): 48 | @abstractmethod 49 | def set(self, key, val): 50 | pass 51 | 52 | @abstractmethod 53 | def get(self, key): 54 | return None 55 | 56 | def get_cluster_ids(self): 57 | return self.get("cluster-ids") or [] 58 | 59 | def set_cluster_ids(self, cluster_ids: Set[str]): 60 | self.set("cluster-ids", list(sorted(cluster_ids))) 61 | 62 | def get_cluster_status(self, cluster_id: str) -> dict: 63 | return self.get("clusters:{}:status".format(cluster_id)) or {} 64 | 65 | def set_cluster_status(self, cluster_id: str, status: dict): 66 | self.set("clusters:{}:status".format(cluster_id), status) 67 | 68 | def get_cluster_data(self, cluster_id: str) -> dict: 69 | return self.get("clusters:{}:data".format(cluster_id)) or {} 70 | 71 | def set_cluster_data(self, cluster_id: str, data: dict): 72 | self.set("clusters:{}:data".format(cluster_id), data) 73 | 74 | 75 | class MemoryStore(AbstractStore): 76 | 77 | """Memory-only backend, mostly useful for local debugging.""" 78 | 79 | def __init__(self): 80 | self._data = {} 81 | self._queues = [] 82 | self._screen_tokens = {} 83 | 84 | def set(self, key, value): 85 | self._data[key] = value 86 | 87 | def get(self, key): 88 | return self._data.get(key) 89 | 90 | def acquire_lock(self): 91 | # no-op for memory store 92 | return "fake-lock" 93 | 94 | def release_lock(self, lock): 95 | # no op for memory store 96 | pass 97 | 98 | def publish(self, event_type, event_data): 99 | for queue in self._queues: 100 | queue.put((event_type, event_data)) 101 | 102 | def listen(self): 103 | queue = Queue() 104 | self._queues.append(queue) 105 | try: 106 | while True: 107 | item = queue.get() 108 | yield item 109 | finally: 110 | self._queues.remove(queue) 111 | 112 | def create_screen_token(self): 113 | data = generate_token_data() 114 | token = data["token"] 115 | self._screen_tokens[token] = data 116 | return token 117 | 118 | def redeem_screen_token(self, token: str, remote_addr: str): 119 | data = self._screen_tokens.get(token) 120 | data = check_token(token, remote_addr, data) 121 | self._screen_tokens[token] = data 122 | 123 | 124 | class RedisStore(AbstractStore): 125 | 126 | """Redis-based backend for deployments with replicas > 1.""" 127 | 128 | def __init__(self, url: str): 129 | logger.info("Connecting to Redis on {}..".format(url)) 130 | self._redis = redis.StrictRedis.from_url(url) 131 | self._redlock = Redlock([url]) 132 | 133 | def set(self, key, value): 134 | self._redis.set(key, json.dumps(value, separators=(",", ":"))) 135 | 136 | def get(self, key): 137 | value = self._redis.get(key) 138 | if value: 139 | return json.loads(value.decode("utf-8")) 140 | 141 | def acquire_lock(self): 142 | return self._redlock.lock("update", 10000) 143 | 144 | def release_lock(self, lock): 145 | self._redlock.unlock(lock) 146 | 147 | def publish(self, event_type, event_data): 148 | self._redis.publish( 149 | "default", 150 | "{}:{}".format(event_type, json.dumps(event_data, separators=(",", ":"))), 151 | ) 152 | 153 | def listen(self): 154 | p = self._redis.pubsub() 155 | p.subscribe("default") 156 | for message in p.listen(): 157 | if message["type"] == "message": 158 | event_type, data = message["data"].decode("utf-8").split(":", 1) 159 | yield (event_type, json.loads(data)) 160 | 161 | def create_screen_token(self): 162 | """Generate a new screen token and store it in Redis.""" 163 | data = generate_token_data() 164 | token = data["token"] 165 | self._redis.set("screen-tokens:{}".format(token), json.dumps(data)) 166 | return token 167 | 168 | def redeem_screen_token(self, token: str, remote_addr: str): 169 | """Validate the given token and bind it to the IP.""" 170 | redis_key = "screen-tokens:{}".format(token) 171 | data = self._redis.get(redis_key) 172 | if not data: 173 | raise ValueError("Invalid token") 174 | data = json.loads(data.decode("utf-8")) 175 | data = check_token(token, remote_addr, data) 176 | self._redis.set(redis_key, json.dumps(data)) 177 | -------------------------------------------------------------------------------- /kube_ops_view/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Kubernetes Operational View {{ version }} 7 | 8 | 9 | 21 | 22 | 23 | 24 |
Loading..
25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /kube_ops_view/templates/screen-tokens.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Screen Tokens 7 | 8 | 9 | 10 |

Screen Tokens

11 | {% if new_token: %} 12 |

The new token is: {{ new_token }}

13 | {% endif %} 14 |
15 | 16 |
17 | 18 | 19 | -------------------------------------------------------------------------------- /kube_ops_view/update.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | from typing import Callable 4 | 5 | import gevent 6 | import json_delta 7 | import requests.exceptions 8 | 9 | from .backoff import expo 10 | from .backoff import random_jitter 11 | from .cluster_discovery import Cluster 12 | from .utils import get_short_error_message 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | def calculate_backoff(tries: int): 18 | return random_jitter(expo(tries, factor=2, max_value=60), jitter=4) 19 | 20 | 21 | def handle_query_failure(e: Exception, cluster: Cluster, backoff: dict): 22 | if not backoff: 23 | backoff = {} 24 | tries = backoff.get("tries", 0) + 1 25 | backoff["tries"] = tries 26 | wait_seconds = calculate_backoff(tries) 27 | backoff["next_try"] = time.time() + wait_seconds 28 | message = get_short_error_message(e) 29 | if isinstance(e, requests.exceptions.RequestException): 30 | log = logger.error 31 | else: 32 | log = logger.exception 33 | log( 34 | "Failed to query cluster {} ({}): {} (try {}, wait {} seconds)".format( 35 | cluster.id, cluster.api_server_url, message, tries, round(wait_seconds) 36 | ) 37 | ) 38 | return backoff 39 | 40 | 41 | def update_clusters( 42 | cluster_discoverer, 43 | query_cluster: Callable[[Cluster], dict], 44 | store, 45 | query_interval: float = 5, 46 | debug: bool = False, 47 | ): 48 | while True: 49 | lock = store.acquire_lock() 50 | if lock: 51 | try: 52 | clusters = cluster_discoverer.get_clusters() 53 | cluster_ids = set() 54 | for cluster in clusters: 55 | cluster_ids.add(cluster.id) 56 | status = store.get_cluster_status(cluster.id) 57 | now = time.time() 58 | if now < status.get("last_query_time", 0) + query_interval: 59 | continue 60 | backoff = status.get("backoff") 61 | if backoff and now < backoff["next_try"]: 62 | # cluster is still in backoff, skip 63 | continue 64 | try: 65 | logger.debug( 66 | "Querying cluster {} ({})..".format( 67 | cluster.id, cluster.api_server_url 68 | ) 69 | ) 70 | data = query_cluster(cluster) 71 | except Exception as e: 72 | backoff = handle_query_failure(e, cluster, backoff) 73 | status["backoff"] = backoff 74 | store.publish( 75 | "clusterstatus", 76 | {"cluster_id": cluster.id, "status": status}, 77 | ) 78 | else: 79 | status["last_query_time"] = now 80 | if backoff: 81 | logger.info( 82 | "Cluster {} ({}) recovered after {} tries.".format( 83 | cluster.id, cluster.api_server_url, backoff["tries"] 84 | ) 85 | ) 86 | del status["backoff"] 87 | old_data = store.get_cluster_data(data["id"]) 88 | if old_data: 89 | # https://pikacode.com/phijaro/json_delta/ticket/11/ 90 | # diff is extremely slow without array_align=False 91 | delta = json_delta.diff( 92 | old_data, data, verbose=debug, array_align=False 93 | ) 94 | store.publish( 95 | "clusterdelta", 96 | {"cluster_id": cluster.id, "delta": delta}, 97 | ) 98 | if delta: 99 | store.set_cluster_data(cluster.id, data) 100 | else: 101 | logger.info( 102 | "Discovered new cluster {} ({}).".format( 103 | cluster.id, cluster.api_server_url 104 | ) 105 | ) 106 | # first send status with last_query_time! 107 | store.publish( 108 | "clusterstatus", 109 | {"cluster_id": cluster.id, "status": status}, 110 | ) 111 | store.publish("clusterupdate", data) 112 | store.set_cluster_data(cluster.id, data) 113 | store.set_cluster_status(cluster.id, status) 114 | store.set_cluster_ids(cluster_ids) 115 | except Exception as e: 116 | logger.exception(f"Failed to update: {e}") 117 | finally: 118 | store.release_lock(lock) 119 | # sleep 1-2 seconds 120 | gevent.sleep(min(random_jitter(1), query_interval)) 121 | -------------------------------------------------------------------------------- /kube_ops_view/utils.py: -------------------------------------------------------------------------------- 1 | import requests.exceptions 2 | 3 | 4 | def get_short_error_message(e: Exception): 5 | """Generate a reasonable short message why the HTTP request failed.""" 6 | 7 | if isinstance(e, requests.exceptions.RequestException) and e.response is not None: 8 | # e.g. "401 Unauthorized" 9 | return "{} {}".format(e.response.status_code, e.response.reason) 10 | elif isinstance(e, requests.exceptions.ConnectionError): 11 | # e.g. "ConnectionError" or "ConnectTimeout" 12 | return e.__class__.__name__ 13 | else: 14 | return str(e) 15 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool] 2 | [tool.poetry] 3 | name = "kube-ops-view" 4 | version = "2020.0.dev1" 5 | description = "Kubernetes Operational View - read-only system dashboard for multiple K8s clusters" 6 | authors = ["Henning Jacobs "] 7 | 8 | [tool.poetry.dependencies] 9 | python = ">=3.7" 10 | click = "*" 11 | flask = "*" 12 | flask-dance = "*" 13 | gevent = "*" 14 | json-delta = ">=2.0" 15 | pykube-ng = "*" 16 | redlock-py = "*" 17 | requests = "*" 18 | stups-tokens = ">=1.1.19" 19 | 20 | [tool.poetry.dev-dependencies] 21 | coveralls = "*" 22 | flake8 = "*" 23 | pytest = "*" 24 | pytest-cov = "*" 25 | black = "^19.10b0" 26 | mypy = "^0.761" 27 | pre-commit = "^1.21.0" 28 | -------------------------------------------------------------------------------- /screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hjacobs/kube-ops-view/28f93ed4c2b66e395607ad28d63fd83df2f3e413/screenshot.png -------------------------------------------------------------------------------- /tests/test_mock.py: -------------------------------------------------------------------------------- 1 | from kube_ops_view.cluster_discovery import MockDiscoverer 2 | from kube_ops_view.mock import query_mock_cluster 3 | 4 | 5 | def test_query_mock_clusters(): 6 | discoverer = MockDiscoverer() 7 | for cluster in discoverer.get_clusters(): 8 | data = query_mock_cluster(cluster) 9 | assert data["id"].startswith("mock-cluster-") 10 | --------------------------------------------------------------------------------