")
158 | def redeem_screen_token(token: str):
159 | remote_addr = (
160 | flask.request.headers.get("X-Forwarded-For") or flask.request.remote_addr
161 | )
162 | logger.info(
163 | 'Trying to redeem screen token "{}" for IP {}..'.format(token, remote_addr)
164 | )
165 | try:
166 | app.store.redeem_screen_token(token, remote_addr)
167 | except Exception:
168 | flask.abort(401)
169 | flask.session["auth_token"] = (token, "")
170 | return redirect(app.config["APPLICATION_ROOT"])
171 |
172 |
173 | @app.route("/logout")
174 | def logout():
175 | flask.session.pop("auth_token", None)
176 | return redirect(app.config["APPLICATION_ROOT"])
177 |
178 |
179 | def shutdown():
180 | # just wait some time to give Kubernetes time to update endpoints
181 | # this requires changing the readinessProbe's
182 | # PeriodSeconds and FailureThreshold appropriately
183 | # see https://godoc.org/k8s.io/kubernetes/pkg/api/v1#Probe
184 | gevent.sleep(10)
185 | exit(0)
186 |
187 |
188 | def exit_gracefully(signum, frame):
189 | logger.info("Received TERM signal, shutting down..")
190 | SERVER_STATUS["shutdown"] = True
191 | gevent.spawn(shutdown)
192 |
193 |
194 | def print_version(ctx, param, value):
195 | if not value or ctx.resilient_parsing:
196 | return
197 | click.echo("Kubernetes Operational View {}".format(kube_ops_view.__version__))
198 | ctx.exit()
199 |
200 |
201 | class CommaSeparatedValues(click.ParamType):
202 | name = "comma_separated_values"
203 |
204 | def convert(self, value, param, ctx):
205 | if isinstance(value, str):
206 | values = filter(None, value.split(","))
207 | else:
208 | values = value
209 | return values
210 |
211 |
212 | @click.command(context_settings={"help_option_names": ["-h", "--help"]})
213 | @click.option(
214 | "-V",
215 | "--version",
216 | is_flag=True,
217 | callback=print_version,
218 | expose_value=False,
219 | is_eager=True,
220 | help="Print the current version number and exit.",
221 | )
222 | @click.option(
223 | "-p",
224 | "--port",
225 | type=int,
226 | help="HTTP port to listen on (default: 8080)",
227 | envvar="SERVER_PORT",
228 | default=8080,
229 | )
230 | @click.option(
231 | "--route-prefix",
232 | help="""The URL prefix under which kube-ops-view is externally reachable
233 | (for example, if kube-ops-view is served via a reverse proxy). Used for
234 | generating relative and absolute links back to kube-ops-view itself. If the
235 | URL has a path portion, it will be used to prefix all HTTP endpoints served
236 | by kube-ops-view. If omitted, relevant URL components will be derived
237 | automatically.""",
238 | envvar="ROUTE_PREFIX",
239 | default="/",
240 | )
241 | @click.option(
242 | "-d", "--debug", is_flag=True, help="Run in debugging mode", envvar="DEBUG"
243 | )
244 | @click.option(
245 | "-m", "--mock", is_flag=True, help="Mock Kubernetes clusters", envvar="MOCK"
246 | )
247 | @click.option(
248 | "--secret-key",
249 | help="Secret key for session cookies",
250 | envvar="SECRET_KEY",
251 | default="development",
252 | )
253 | @click.option(
254 | "--redis-url",
255 | help="Redis URL to use for pub/sub and job locking",
256 | envvar="REDIS_URL",
257 | )
258 | @click.option(
259 | "--clusters",
260 | type=CommaSeparatedValues(),
261 | help="Comma separated list of Kubernetes API server URLs (default: {})".format(
262 | DEFAULT_CLUSTERS
263 | ),
264 | envvar="CLUSTERS",
265 | )
266 | @click.option(
267 | "--cluster-registry-url",
268 | help="URL to cluster registry",
269 | envvar="CLUSTER_REGISTRY_URL",
270 | )
271 | @click.option(
272 | "--kubeconfig-path",
273 | type=click.Path(exists=True),
274 | help="Path to kubeconfig file",
275 | envvar="KUBECONFIG_PATH",
276 | )
277 | @click.option(
278 | "--kubeconfig-contexts",
279 | type=CommaSeparatedValues(),
280 | help="List of kubeconfig contexts to use (default: use all defined contexts)",
281 | envvar="KUBECONFIG_CONTEXTS",
282 | )
283 | @click.option(
284 | "--query-interval",
285 | type=float,
286 | help="Interval in seconds for querying clusters (default: 5)",
287 | envvar="QUERY_INTERVAL",
288 | default=5,
289 | )
290 | @click.option(
291 | "--node-link-url-template",
292 | help="Template for target URL when clicking on a Node",
293 | envvar="NODE_LINK_URL_TEMPLATE",
294 | )
295 | @click.option(
296 | "--pod-link-url-template",
297 | help="Template for target URL when clicking on a Pod",
298 | envvar="POD_LINK_URL_TEMPLATE",
299 | )
300 | def main(
301 | port,
302 | debug,
303 | mock,
304 | secret_key,
305 | redis_url,
306 | clusters: list,
307 | cluster_registry_url,
308 | kubeconfig_path,
309 | kubeconfig_contexts: list,
310 | query_interval,
311 | node_link_url_template: str,
312 | pod_link_url_template: str,
313 | route_prefix: str,
314 | ):
315 | logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
316 |
317 | store = RedisStore(redis_url) if redis_url else MemoryStore()
318 |
319 | app.debug = debug
320 | app.secret_key = secret_key
321 | app.store = store
322 | app.config["APPLICATION_ROOT"] = route_prefix
323 | app.app_config = {
324 | "node_link_url_template": node_link_url_template,
325 | "pod_link_url_template": pod_link_url_template,
326 | "route_prefix": route_prefix,
327 | }
328 |
329 | discoverer: Union[
330 | MockDiscoverer,
331 | ClusterRegistryDiscoverer,
332 | KubeconfigDiscoverer,
333 | StaticClusterDiscoverer,
334 | ]
335 |
336 | if mock:
337 | cluster_query = query_mock_cluster
338 | discoverer = MockDiscoverer()
339 | else:
340 | cluster_query = query_kubernetes_cluster
341 | if cluster_registry_url:
342 | discoverer = ClusterRegistryDiscoverer(cluster_registry_url)
343 | elif kubeconfig_path:
344 | discoverer = KubeconfigDiscoverer(
345 | Path(kubeconfig_path), set(kubeconfig_contexts or [])
346 | )
347 | else:
348 | api_server_urls = clusters or []
349 | discoverer = StaticClusterDiscoverer(api_server_urls)
350 |
351 | gevent.spawn(
352 | update_clusters,
353 | cluster_discoverer=discoverer,
354 | query_cluster=cluster_query,
355 | store=store,
356 | query_interval=query_interval,
357 | debug=debug,
358 | )
359 |
360 | signal.signal(signal.SIGTERM, exit_gracefully)
361 | http_server = gevent.pywsgi.WSGIServer(("0.0.0.0", port), app)
362 | logger.info("Listening on :{}..".format(port))
363 | http_server.serve_forever()
364 |
--------------------------------------------------------------------------------
/kube_ops_view/mock.py:
--------------------------------------------------------------------------------
1 | import random
2 | import string
3 | import time
4 |
5 |
6 | def hash_int(x: int):
7 | x = ((x >> 16) ^ x) * 0x45D9F3B
8 | x = ((x >> 16) ^ x) * 0x45D9F3B
9 | x = (x >> 16) ^ x
10 | return x
11 |
12 |
13 | def generate_mock_pod(index: int, i: int, j: int):
14 | names = [
15 | "agent-cooper",
16 | "black-lodge",
17 | "bob",
18 | "bobby-briggs",
19 | "laura-palmer",
20 | "leland-palmer",
21 | "log-lady",
22 | "sheriff-truman",
23 | ]
24 | labels = {"env": ["prod", "dev"], "owner": ["x-wing", "iris"]}
25 | pod_phases = ["Pending", "Running", "Running", "Failed"]
26 |
27 | pod_labels = {}
28 | for li, k in enumerate(labels):
29 | v = labels[k]
30 | label_choice = hash_int((index + 1) * (i + 1) * (j + 1) * (li + 1)) % (
31 | len(v) + 1
32 | )
33 | if label_choice != 0:
34 | pod_labels[k] = v[label_choice - 1]
35 |
36 | phase = pod_phases[hash_int((index + 1) * (i + 1) * (j + 1)) % len(pod_phases)]
37 | containers = []
38 | for _ in range(1 + j % 2):
39 | # generate "more real data"
40 | requests_cpu = random.randint(10, 50)
41 | requests_memory = random.randint(64, 256)
42 | # with max, we defend ourselves against negative cpu/memory ;)
43 | usage_cpu = max(requests_cpu + random.randint(-30, 30), 1)
44 | usage_memory = max(requests_memory + random.randint(-64, 128), 1)
45 | container = {
46 | "name": "myapp",
47 | "image": "foo/bar/{}".format(j),
48 | "resources": {
49 | "requests": {
50 | "cpu": f"{requests_cpu}m",
51 | "memory": f"{requests_memory}Mi",
52 | },
53 | "limits": {},
54 | "usage": {"cpu": f"{usage_cpu}m", "memory": f"{usage_memory}Mi"},
55 | },
56 | "ready": True,
57 | "state": {"running": {}},
58 | }
59 | if phase == "Running":
60 | if j % 13 == 0:
61 | container.update(
62 | **{
63 | "ready": False,
64 | "state": {"waiting": {"reason": "CrashLoopBackOff"}},
65 | }
66 | )
67 | elif j % 7 == 0:
68 | container.update(
69 | **{"ready": False, "state": {"running": {}}, "restartCount": 3}
70 | )
71 | elif phase == "Failed":
72 | del container["state"]
73 | del container["ready"]
74 | containers.append(container)
75 | pod = {
76 | "name": "{}-{}-{}".format(
77 | names[hash_int((i + 1) * (j + 1)) % len(names)], i, j
78 | ),
79 | "namespace": "kube-system" if j < 3 else "default",
80 | "labels": pod_labels,
81 | "phase": phase,
82 | "containers": containers,
83 | }
84 | if phase == "Running" and j % 17 == 0:
85 | pod["deleted"] = 123
86 |
87 | return pod
88 |
89 |
90 | def query_mock_cluster(cluster):
91 | """Generate deterministic (no randomness!) mock data."""
92 | index = int(cluster.id.split("-")[-1])
93 | nodes = {}
94 | for i in range(10):
95 | # add/remove the second to last node every 13 seconds
96 | if i == 8 and int(time.time() / 13) % 2 == 0:
97 | continue
98 | labels = {}
99 | # only the first two clusters have master nodes
100 | if i < 2 and index < 2:
101 | if index == 0:
102 | labels["kubernetes.io/role"] = "master"
103 | elif index == 1:
104 | labels["node-role.kubernetes.io/master"] = ""
105 | else:
106 | labels["master"] = "true"
107 | pods = {}
108 | for j in range(hash_int((index + 1) * (i + 1)) % 32):
109 | # add/remove some pods every 7 seconds
110 | if j % 17 == 0 and int(time.time() / 7) % 2 == 0:
111 | pass
112 | else:
113 | pod = generate_mock_pod(index, i, j)
114 | pods["{}/{}".format(pod["namespace"], pod["name"])] = pod
115 |
116 | # use data from containers (usage)
117 | usage_cpu = 0
118 | usage_memory = 0
119 | for p in pods.values():
120 | for c in p["containers"]:
121 | usage_cpu += int(c["resources"]["usage"]["cpu"].split("m")[0])
122 | usage_memory += int(c["resources"]["usage"]["memory"].split("Mi")[0])
123 |
124 | # generate longer name for a node
125 | suffix = "".join(
126 | [random.choice(string.ascii_letters) for n in range(random.randint(1, 20))]
127 | )
128 |
129 | node = {
130 | "name": f"node-{i}-{suffix}",
131 | "labels": labels,
132 | "status": {
133 | "capacity": {"cpu": "8", "memory": "64Gi", "pods": "110"},
134 | "allocatable": {"cpu": "7800m", "memory": "62Gi"},
135 | },
136 | "pods": pods,
137 | # get data from containers (usage)
138 | "usage": {"cpu": f"{usage_cpu}m", "memory": f"{usage_memory}Mi"},
139 | }
140 | nodes[node["name"]] = node
141 | pod = generate_mock_pod(index, 11, index)
142 | unassigned_pods = {"{}/{}".format(pod["namespace"], pod["name"]): pod}
143 | return {
144 | "id": "mock-cluster-{}".format(index),
145 | "api_server_url": "https://kube-{}.example.org".format(index),
146 | "nodes": nodes,
147 | "unassigned_pods": unassigned_pods,
148 | }
149 |
--------------------------------------------------------------------------------
/kube_ops_view/oauth.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from flask_dance.consumer import OAuth2ConsumerBlueprint
4 |
5 |
6 | CREDENTIALS_DIR = os.getenv("CREDENTIALS_DIR", "")
7 |
8 |
9 | class OAuth2ConsumerBlueprintWithClientRefresh(OAuth2ConsumerBlueprint):
10 |
11 | """Same as flask_dance.consumer.OAuth2ConsumerBlueprint, but loads client credentials from file."""
12 |
13 | def refresh_credentials(self):
14 | with open(os.path.join(CREDENTIALS_DIR, "authcode-client-id")) as fd:
15 | # note that we need to set two attributes because of how OAuth2ConsumerBlueprint works :-/
16 | self._client_id = self.client_id = fd.read().strip()
17 | with open(os.path.join(CREDENTIALS_DIR, "authcode-client-secret")) as fd:
18 | self.client_secret = fd.read().strip()
19 |
20 | def login(self):
21 | self.refresh_credentials()
22 | return super().login()
23 |
--------------------------------------------------------------------------------
/kube_ops_view/static/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hjacobs/kube-ops-view/28f93ed4c2b66e395607ad28d63fd83df2f3e413/kube_ops_view/static/favicon.ico
--------------------------------------------------------------------------------
/kube_ops_view/static/sharetechmono.woff2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hjacobs/kube-ops-view/28f93ed4c2b66e395607ad28d63fd83df2f3e413/kube_ops_view/static/sharetechmono.woff2
--------------------------------------------------------------------------------
/kube_ops_view/stores.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import random
4 | import string
5 | import time
6 | from abc import ABC
7 | from abc import abstractmethod
8 | from queue import Queue
9 | from typing import Set
10 |
11 | import redis
12 | from redlock import Redlock
13 |
14 | logger = logging.getLogger(__name__)
15 |
16 | ONE_YEAR = 3600 * 24 * 365
17 |
18 |
19 | def generate_token(n: int):
20 | """Generate a random ASCII token of length n."""
21 | # uses os.urandom()
22 | rng = random.SystemRandom()
23 | return "".join([rng.choice(string.ascii_letters + string.digits) for i in range(n)])
24 |
25 |
26 | def generate_token_data():
27 | """Generate screen token data for storing."""
28 | token = generate_token(10)
29 | now = time.time()
30 | return {"token": token, "created": now, "expires": now + ONE_YEAR}
31 |
32 |
33 | def check_token(token: str, remote_addr: str, data: dict):
34 | """Check whether the given screen token is valid, raises exception if not."""
35 | now = time.time()
36 | if (
37 | data
38 | and now < data["expires"]
39 | and data.get("remote_addr", remote_addr) == remote_addr
40 | ):
41 | data["remote_addr"] = remote_addr
42 | return data
43 | else:
44 | raise ValueError("Invalid token")
45 |
46 |
47 | class AbstractStore(ABC):
48 | @abstractmethod
49 | def set(self, key, val):
50 | pass
51 |
52 | @abstractmethod
53 | def get(self, key):
54 | return None
55 |
56 | def get_cluster_ids(self):
57 | return self.get("cluster-ids") or []
58 |
59 | def set_cluster_ids(self, cluster_ids: Set[str]):
60 | self.set("cluster-ids", list(sorted(cluster_ids)))
61 |
62 | def get_cluster_status(self, cluster_id: str) -> dict:
63 | return self.get("clusters:{}:status".format(cluster_id)) or {}
64 |
65 | def set_cluster_status(self, cluster_id: str, status: dict):
66 | self.set("clusters:{}:status".format(cluster_id), status)
67 |
68 | def get_cluster_data(self, cluster_id: str) -> dict:
69 | return self.get("clusters:{}:data".format(cluster_id)) or {}
70 |
71 | def set_cluster_data(self, cluster_id: str, data: dict):
72 | self.set("clusters:{}:data".format(cluster_id), data)
73 |
74 |
75 | class MemoryStore(AbstractStore):
76 |
77 | """Memory-only backend, mostly useful for local debugging."""
78 |
79 | def __init__(self):
80 | self._data = {}
81 | self._queues = []
82 | self._screen_tokens = {}
83 |
84 | def set(self, key, value):
85 | self._data[key] = value
86 |
87 | def get(self, key):
88 | return self._data.get(key)
89 |
90 | def acquire_lock(self):
91 | # no-op for memory store
92 | return "fake-lock"
93 |
94 | def release_lock(self, lock):
95 | # no op for memory store
96 | pass
97 |
98 | def publish(self, event_type, event_data):
99 | for queue in self._queues:
100 | queue.put((event_type, event_data))
101 |
102 | def listen(self):
103 | queue = Queue()
104 | self._queues.append(queue)
105 | try:
106 | while True:
107 | item = queue.get()
108 | yield item
109 | finally:
110 | self._queues.remove(queue)
111 |
112 | def create_screen_token(self):
113 | data = generate_token_data()
114 | token = data["token"]
115 | self._screen_tokens[token] = data
116 | return token
117 |
118 | def redeem_screen_token(self, token: str, remote_addr: str):
119 | data = self._screen_tokens.get(token)
120 | data = check_token(token, remote_addr, data)
121 | self._screen_tokens[token] = data
122 |
123 |
124 | class RedisStore(AbstractStore):
125 |
126 | """Redis-based backend for deployments with replicas > 1."""
127 |
128 | def __init__(self, url: str):
129 | logger.info("Connecting to Redis on {}..".format(url))
130 | self._redis = redis.StrictRedis.from_url(url)
131 | self._redlock = Redlock([url])
132 |
133 | def set(self, key, value):
134 | self._redis.set(key, json.dumps(value, separators=(",", ":")))
135 |
136 | def get(self, key):
137 | value = self._redis.get(key)
138 | if value:
139 | return json.loads(value.decode("utf-8"))
140 |
141 | def acquire_lock(self):
142 | return self._redlock.lock("update", 10000)
143 |
144 | def release_lock(self, lock):
145 | self._redlock.unlock(lock)
146 |
147 | def publish(self, event_type, event_data):
148 | self._redis.publish(
149 | "default",
150 | "{}:{}".format(event_type, json.dumps(event_data, separators=(",", ":"))),
151 | )
152 |
153 | def listen(self):
154 | p = self._redis.pubsub()
155 | p.subscribe("default")
156 | for message in p.listen():
157 | if message["type"] == "message":
158 | event_type, data = message["data"].decode("utf-8").split(":", 1)
159 | yield (event_type, json.loads(data))
160 |
161 | def create_screen_token(self):
162 | """Generate a new screen token and store it in Redis."""
163 | data = generate_token_data()
164 | token = data["token"]
165 | self._redis.set("screen-tokens:{}".format(token), json.dumps(data))
166 | return token
167 |
168 | def redeem_screen_token(self, token: str, remote_addr: str):
169 | """Validate the given token and bind it to the IP."""
170 | redis_key = "screen-tokens:{}".format(token)
171 | data = self._redis.get(redis_key)
172 | if not data:
173 | raise ValueError("Invalid token")
174 | data = json.loads(data.decode("utf-8"))
175 | data = check_token(token, remote_addr, data)
176 | self._redis.set(redis_key, json.dumps(data))
177 |
--------------------------------------------------------------------------------
/kube_ops_view/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Kubernetes Operational View {{ version }}
7 |
8 |
9 |
21 |
22 |
23 |
24 | Loading..
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/kube_ops_view/templates/screen-tokens.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | Screen Tokens
7 |
8 |
9 |
10 | Screen Tokens
11 | {% if new_token: %}
12 | The new token is: {{ new_token }}
13 | {% endif %}
14 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/kube_ops_view/update.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 | from typing import Callable
4 |
5 | import gevent
6 | import json_delta
7 | import requests.exceptions
8 |
9 | from .backoff import expo
10 | from .backoff import random_jitter
11 | from .cluster_discovery import Cluster
12 | from .utils import get_short_error_message
13 |
14 | logger = logging.getLogger(__name__)
15 |
16 |
17 | def calculate_backoff(tries: int):
18 | return random_jitter(expo(tries, factor=2, max_value=60), jitter=4)
19 |
20 |
21 | def handle_query_failure(e: Exception, cluster: Cluster, backoff: dict):
22 | if not backoff:
23 | backoff = {}
24 | tries = backoff.get("tries", 0) + 1
25 | backoff["tries"] = tries
26 | wait_seconds = calculate_backoff(tries)
27 | backoff["next_try"] = time.time() + wait_seconds
28 | message = get_short_error_message(e)
29 | if isinstance(e, requests.exceptions.RequestException):
30 | log = logger.error
31 | else:
32 | log = logger.exception
33 | log(
34 | "Failed to query cluster {} ({}): {} (try {}, wait {} seconds)".format(
35 | cluster.id, cluster.api_server_url, message, tries, round(wait_seconds)
36 | )
37 | )
38 | return backoff
39 |
40 |
41 | def update_clusters(
42 | cluster_discoverer,
43 | query_cluster: Callable[[Cluster], dict],
44 | store,
45 | query_interval: float = 5,
46 | debug: bool = False,
47 | ):
48 | while True:
49 | lock = store.acquire_lock()
50 | if lock:
51 | try:
52 | clusters = cluster_discoverer.get_clusters()
53 | cluster_ids = set()
54 | for cluster in clusters:
55 | cluster_ids.add(cluster.id)
56 | status = store.get_cluster_status(cluster.id)
57 | now = time.time()
58 | if now < status.get("last_query_time", 0) + query_interval:
59 | continue
60 | backoff = status.get("backoff")
61 | if backoff and now < backoff["next_try"]:
62 | # cluster is still in backoff, skip
63 | continue
64 | try:
65 | logger.debug(
66 | "Querying cluster {} ({})..".format(
67 | cluster.id, cluster.api_server_url
68 | )
69 | )
70 | data = query_cluster(cluster)
71 | except Exception as e:
72 | backoff = handle_query_failure(e, cluster, backoff)
73 | status["backoff"] = backoff
74 | store.publish(
75 | "clusterstatus",
76 | {"cluster_id": cluster.id, "status": status},
77 | )
78 | else:
79 | status["last_query_time"] = now
80 | if backoff:
81 | logger.info(
82 | "Cluster {} ({}) recovered after {} tries.".format(
83 | cluster.id, cluster.api_server_url, backoff["tries"]
84 | )
85 | )
86 | del status["backoff"]
87 | old_data = store.get_cluster_data(data["id"])
88 | if old_data:
89 | # https://pikacode.com/phijaro/json_delta/ticket/11/
90 | # diff is extremely slow without array_align=False
91 | delta = json_delta.diff(
92 | old_data, data, verbose=debug, array_align=False
93 | )
94 | store.publish(
95 | "clusterdelta",
96 | {"cluster_id": cluster.id, "delta": delta},
97 | )
98 | if delta:
99 | store.set_cluster_data(cluster.id, data)
100 | else:
101 | logger.info(
102 | "Discovered new cluster {} ({}).".format(
103 | cluster.id, cluster.api_server_url
104 | )
105 | )
106 | # first send status with last_query_time!
107 | store.publish(
108 | "clusterstatus",
109 | {"cluster_id": cluster.id, "status": status},
110 | )
111 | store.publish("clusterupdate", data)
112 | store.set_cluster_data(cluster.id, data)
113 | store.set_cluster_status(cluster.id, status)
114 | store.set_cluster_ids(cluster_ids)
115 | except Exception as e:
116 | logger.exception(f"Failed to update: {e}")
117 | finally:
118 | store.release_lock(lock)
119 | # sleep 1-2 seconds
120 | gevent.sleep(min(random_jitter(1), query_interval))
121 |
--------------------------------------------------------------------------------
/kube_ops_view/utils.py:
--------------------------------------------------------------------------------
1 | import requests.exceptions
2 |
3 |
4 | def get_short_error_message(e: Exception):
5 | """Generate a reasonable short message why the HTTP request failed."""
6 |
7 | if isinstance(e, requests.exceptions.RequestException) and e.response is not None:
8 | # e.g. "401 Unauthorized"
9 | return "{} {}".format(e.response.status_code, e.response.reason)
10 | elif isinstance(e, requests.exceptions.ConnectionError):
11 | # e.g. "ConnectionError" or "ConnectTimeout"
12 | return e.__class__.__name__
13 | else:
14 | return str(e)
15 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool]
2 | [tool.poetry]
3 | name = "kube-ops-view"
4 | version = "2020.0.dev1"
5 | description = "Kubernetes Operational View - read-only system dashboard for multiple K8s clusters"
6 | authors = ["Henning Jacobs "]
7 |
8 | [tool.poetry.dependencies]
9 | python = ">=3.7"
10 | click = "*"
11 | flask = "*"
12 | flask-dance = "*"
13 | gevent = "*"
14 | json-delta = ">=2.0"
15 | pykube-ng = "*"
16 | redlock-py = "*"
17 | requests = "*"
18 | stups-tokens = ">=1.1.19"
19 |
20 | [tool.poetry.dev-dependencies]
21 | coveralls = "*"
22 | flake8 = "*"
23 | pytest = "*"
24 | pytest-cov = "*"
25 | black = "^19.10b0"
26 | mypy = "^0.761"
27 | pre-commit = "^1.21.0"
28 |
--------------------------------------------------------------------------------
/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hjacobs/kube-ops-view/28f93ed4c2b66e395607ad28d63fd83df2f3e413/screenshot.png
--------------------------------------------------------------------------------
/tests/test_mock.py:
--------------------------------------------------------------------------------
1 | from kube_ops_view.cluster_discovery import MockDiscoverer
2 | from kube_ops_view.mock import query_mock_cluster
3 |
4 |
5 | def test_query_mock_clusters():
6 | discoverer = MockDiscoverer()
7 | for cluster in discoverer.get_clusters():
8 | data = query_mock_cluster(cluster)
9 | assert data["id"].startswith("mock-cluster-")
10 |
--------------------------------------------------------------------------------