├── scanners ├── __init__.py ├── flux_oci_repository.py ├── flux_helm_repo.py ├── flux_helm_release.py └── argo_helm_application.py ├── .envrc ├── requirements.txt ├── web ├── src │ ├── vite-env.d.ts │ ├── info │ │ ├── dex.md │ │ ├── pgadmin.md │ │ ├── reloader.md │ │ ├── zitadel.md │ │ ├── home-assistant.md │ │ ├── longhorn.md │ │ ├── metallb.md │ │ ├── cert-manager.md │ │ ├── kured.md │ │ ├── authelia.md │ │ ├── descheduler.md │ │ ├── redis.md │ │ ├── loki.md │ │ ├── kyverno.md │ │ ├── snmp-exporter-dell-idrac.md │ │ ├── hajimari.md │ │ ├── vpa.md │ │ ├── cilium.md │ │ ├── snapshot-controller.md │ │ ├── cloudflared.md │ │ ├── external-dns.md │ │ ├── external-secrets.md │ │ ├── grafana.md │ │ ├── k8s-gateway.md │ │ ├── goldilocks.md │ │ ├── thanos.md │ │ ├── traefik.md │ │ ├── volsync.md │ │ ├── ingress-nginx.md │ │ ├── kube-prometheus-stack.md │ │ ├── metrics-server.md │ │ ├── emqx.md │ │ ├── rook-ceph-cluster.md │ │ ├── rook-ceph-operator.md │ │ ├── rook-ceph.md │ │ ├── immich-server.md │ │ ├── immich-web.md │ │ ├── immich-typesense.md │ │ ├── immich-machine-learning.md │ │ ├── immich-microservices.md │ │ └── node-feature-discovery.md │ ├── components │ │ ├── text │ │ │ └── index.tsx │ │ ├── code │ │ │ └── index.tsx │ │ ├── icon │ │ │ └── index.tsx │ │ ├── table │ │ │ ├── index.tsx │ │ │ └── sorting.ts │ │ ├── ui │ │ │ └── tabs.tsx │ │ └── search │ │ │ ├── index.tsx │ │ │ └── hr.tsx │ ├── lib │ │ └── utils.ts │ ├── entry-server.tsx │ ├── pages │ │ ├── repo.tsx │ │ ├── top.tsx │ │ ├── index.tsx │ │ ├── grep.tsx │ │ ├── image.tsx │ │ └── helm-release.tsx │ ├── entry-client.tsx │ ├── utils.ts │ ├── index.css │ ├── generators │ │ └── helm-release │ │ │ ├── models.ts │ │ │ └── generator.ts │ └── App.tsx ├── postcss.config.cjs ├── tsconfig.node.json ├── vite.config.ts ├── .gitignore ├── components.json ├── tsconfig.prerender.json ├── public │ ├── opensearch.xml │ └── k8s-search.svg ├── e2e │ ├── wordcloud.spec.ts │ ├── hr.spec.ts │ ├── grep.spec.ts │ ├── search.spec.ts │ └── dynamic.spec.ts ├── index.html ├── tsconfig.json ├── tailwind.config.cjs ├── prerender.ts ├── package.json ├── server.ts ├── playwright.config.ts └── renderer.ts ├── info_model.py ├── .gitignore ├── init-db.py ├── download.py ├── .github └── workflows │ ├── build.yaml │ └── update.yaml ├── flake.nix ├── scripts ├── download.mjs └── trends.py ├── flake.lock ├── README.md ├── search.py └── interesting.py /scanners/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | use nix 2 | use flake . --impure 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | ruamel.yaml 3 | pydantic -------------------------------------------------------------------------------- /web/src/vite-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /web/src/info/dex.md: -------------------------------------------------------------------------------- 1 | [Dex](https://dexidp.io/) is OpenID Connect (OIDC) identity and OAuth 2.0 provider with pluggable connectors. 2 | -------------------------------------------------------------------------------- /web/postcss.config.cjs: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | plugins: { 3 | tailwindcss: {}, 4 | autoprefixer: {}, 5 | cssnano: {} 6 | }, 7 | } 8 | -------------------------------------------------------------------------------- /info_model.py: -------------------------------------------------------------------------------- 1 | from pydantic import BaseModel 2 | 3 | class InfoModel(BaseModel): 4 | repo_name: str 5 | amount_lines: int 6 | url: str 7 | timestamp: str -------------------------------------------------------------------------------- /web/src/info/pgadmin.md: -------------------------------------------------------------------------------- 1 | [pgadmin](https://www.pgadmin.org/) is a popular administration tool for PostgreSQL. It is a web application that can be run in Kubernetes. -------------------------------------------------------------------------------- /web/src/info/reloader.md: -------------------------------------------------------------------------------- 1 | [Reloader](https://github.com/stakater/Reloader) is a Kubernetes controller that watches changes for in configMaps and secrets, and can restart pods. 2 | -------------------------------------------------------------------------------- /web/src/info/zitadel.md: -------------------------------------------------------------------------------- 1 | [Zitadel](https://zitadel.com) is an identity management platform which has the best of Auth0 and Keycloack combined. It's built for the serverless era. 2 | -------------------------------------------------------------------------------- /web/src/components/text/index.tsx: -------------------------------------------------------------------------------- 1 | export default function(props: { 2 | children: any }) { 3 | return

4 | {props.children} 5 |

6 | } -------------------------------------------------------------------------------- /web/src/lib/utils.ts: -------------------------------------------------------------------------------- 1 | import { clsx, type ClassValue } from "clsx" 2 | import { twMerge } from "tailwind-merge" 3 | 4 | export function cn(...inputs: ClassValue[]) { 5 | return twMerge(clsx(inputs)) 6 | } 7 | -------------------------------------------------------------------------------- /web/src/info/home-assistant.md: -------------------------------------------------------------------------------- 1 | # Home Assistant 2 | [Home Assistant](https://www.home-assistant.io/) is a home automation platform that has many integrations. It can be used to control lights, fans, thermostats, cameras, and many other devicesn 3 | -------------------------------------------------------------------------------- /web/src/components/code/index.tsx: -------------------------------------------------------------------------------- 1 | export default function(props: { 2 | children: any 3 | }) { 4 | return
5 |         
6 |             {props.children}
7 |         
8 |     
; 9 | } -------------------------------------------------------------------------------- /web/src/info/longhorn.md: -------------------------------------------------------------------------------- 1 | [Longhorn](https://longhorn.io/) is an open-source, distributed block storage system for Kubernetes. It provides a fast and reliable solution for persistent storage, making it an ideal choice for a wide range of use cases, including stateful applications, databases, and more. -------------------------------------------------------------------------------- /web/src/info/metallb.md: -------------------------------------------------------------------------------- 1 | [metallb](https://metallb.universe.tf/) is a load-balancer implementation for Kubernetes, allowing you to create services with `type: LoadBalancer`. With BGP, you can setup IP ranges in your network that get assigned to k8s services. You can use IP addresses that are assigned to you, and share them amongst services. 2 | -------------------------------------------------------------------------------- /web/src/info/cert-manager.md: -------------------------------------------------------------------------------- 1 | [cert-manager](https://cert-manager.io/) is a certificate management controller for Kubernetes. It automates the management and issuance of TLS certificates from various issuing sources. It will ensure certificates are valid and up to date periodically, and attempt to renew certificates at an appropriate time before expiry. -------------------------------------------------------------------------------- /web/src/info/kured.md: -------------------------------------------------------------------------------- 1 | Kured (Kubernetes REboot Daemon) is a Kubernetes add-on that automates the process of rebooting nodes in your cluster. It integrates with the underlying operating system's package management system to identify when a reboot is necessary, and performs the reboot in a safe and controlled manner to minimize disruption to your applications. 2 | -------------------------------------------------------------------------------- /web/src/info/authelia.md: -------------------------------------------------------------------------------- 1 | [Authelia](https://www.authelia.com) is an open-source authentication and authorization server and portal fulfilling the identity and access management (IAM) role of information security in providing multi-factor authentication and single sign-on (SSO) for your applications via a web portal. It acts as a companion for common reverse proxies. 2 | -------------------------------------------------------------------------------- /web/tsconfig.node.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "emitDeclarationOnly": true, 4 | "declaration": true, 5 | "outDir": "dist/server", 6 | "esModuleInterop": true, 7 | "jsx": "react-jsx", 8 | "module": "CommonJS", 9 | 10 | "moduleResolution": "Node", 11 | }, 12 | "include": ["src/entry-server.tsx"] 13 | } -------------------------------------------------------------------------------- /web/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'vite' 2 | import react from '@vitejs/plugin-react-swc' 3 | import path from 'node:path' 4 | 5 | 6 | // https://vitejs.dev/config/ 7 | export default defineConfig({ 8 | plugins: [ 9 | react()], 10 | 11 | resolve: { 12 | alias: { 13 | "@": path.resolve(__dirname, "./src"), 14 | }, 15 | }, 16 | }) 17 | -------------------------------------------------------------------------------- /web/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | **/playwright-report/ -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/node_modules/ 2 | **/.*cache/ 3 | **/dist/ 4 | **/public/repos.db 5 | **/public/*wasm* 6 | repos*.db* 7 | repos/ 8 | .venv/ 9 | .direnv/ 10 | **/__pycache__/ 11 | sqljs-worker-wasm.zip 12 | .DS_Store 13 | frontend/cypress/videos/ 14 | frontend/cypress/screenshots/ 15 | .devenv* 16 | devenv.local.nix 17 | .pre-commit-config.yaml 18 | result 19 | yarn-error.log 20 | web/test-results/**/* 21 | -------------------------------------------------------------------------------- /web/src/info/descheduler.md: -------------------------------------------------------------------------------- 1 | [descheduler](https://github.com/kubernetes-sigs/descheduler) is a Kubernetes add-on that helps optimize resource utilization and clean up unused resources in your cluster. It works by identifying and rescheduling pods that are running on underutilized nodes to more suitable nodes, freeing up resources for other pods to use. This helps to improve the overall performance of your cluster and reduce waste. -------------------------------------------------------------------------------- /web/src/components/icon/index.tsx: -------------------------------------------------------------------------------- 1 | import { Icon as IconifyIcon } from '@iconify/react'; 2 | 3 | export default function Icon(props: { icon?: string }) { 4 | const name = (!props.icon?.includes(":") ? "mdi:" : "") + props.icon; 5 | return ( 6 | 7 | {props.icon && } 8 | 9 | ); 10 | } 11 | -------------------------------------------------------------------------------- /web/src/info/redis.md: -------------------------------------------------------------------------------- 1 | [Redis](https://redis.io/) is an open-source, in-memory data structure store that is used as a database, cache, and message broker. It provides a fast and flexible solution for storing and retrieving data, making it an ideal choice for a wide range of use cases, including real-time analytics, session management, and more. 2 | 3 | Redis is used by [Authentik](/#authentik), [Authelia](/#authelia), [oauth2-proxy](/#oauth2-proxy). -------------------------------------------------------------------------------- /web/src/info/loki.md: -------------------------------------------------------------------------------- 1 | [Loki](https://grafana.com/oss/loki/) is a log aggregation system that aggregates logs from various sources, such as Kubernetes, and makes them available for search and analysis. It uses a label-based approach to categorize logs, making it easy to query and retrieve relevant logs. The system is designed to be horizontally scalable, highly available, and multi-tenant, making it suitable for use in a variety of environments, including home labs. -------------------------------------------------------------------------------- /web/src/info/kyverno.md: -------------------------------------------------------------------------------- 1 | [Kyverno](https://kyverno.io/) is a policy engine for Kubernetes that enables you to define and enforce policies for your cluster resources. It provides a flexible and scalable solution for managing your cluster configuration, allowing you to enforce best practices, prevent misconfigurations, and enforce compliance requirements. 2 | 3 | The [kyverno/policies](https://github.com/kyverno/policies) offer all kinds of security and best practice policies that you could use. -------------------------------------------------------------------------------- /web/src/info/snmp-exporter-dell-idrac.md: -------------------------------------------------------------------------------- 1 | To monitor Dell iDRAC devices with Prometheus, you can use the [SNMP Exporter](https://github.com/prometheus/snmp_exporter) and use it with the `snmp.yml` configuration file. 2 | 3 | Below you can find other users from our community that have already configured the SNMP Exporter for Dell iDRAC devices. Click on their repository links to see their helm release and in the same directory you should find a config map with the `snmp.yml` configuration file. -------------------------------------------------------------------------------- /web/src/info/hajimari.md: -------------------------------------------------------------------------------- 1 | [Hajimari](https://hajimari.io/charts/hajimari/) is a versatile and customizable browser startpage designed specifically for Kubernetes users. It provides a beautiful and user-friendly interface that makes it easy to discover and launch applications running in your home lab. With Hajimari, you can quickly access all of your favorite applications and services, including those deployed on your cluster, without having to remember complex URLs or navigate through multiple tabs. -------------------------------------------------------------------------------- /web/src/entry-server.tsx: -------------------------------------------------------------------------------- 1 | import ReactDOMServer from 'react-dom/server' 2 | import { StaticRouter } from 'react-router-dom/server' 3 | import App, { AppData } from './App' 4 | 5 | 6 | 7 | export function render(url: string, appData: AppData, pageData: any) { 8 | return ReactDOMServer.renderToString( 9 | 10 | 11 | , 12 | ) 13 | } 14 | 15 | export type RenderFunction = typeof render; -------------------------------------------------------------------------------- /web/src/info/vpa.md: -------------------------------------------------------------------------------- 1 | [Kubernetes Vertical Pod Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler) (VPA) is a Kubernetes feature that enables automatic scaling of resources for individual pods in a cluster. It works by analyzing resource usage data for pods and adjusting their resource requests and limits to optimize resource utilization. This helps to ensure that your applications have the resources they need to run effectively, while also reducing waste and improving overall cluster efficiency. -------------------------------------------------------------------------------- /web/src/info/cilium.md: -------------------------------------------------------------------------------- 1 | [Cilium](https://cilium.io/) is a networking, security, and observability solution for Kubernetes, built on eBPF. It provides high-performance networking with support for Layer 3-7 policies, load balancing, and transparent encryption. With Cilium, you can enforce fine-grained security policies, observe network flows in real time, and integrate seamlessly with service meshes. It also offers eBPF-powered network policies and cluster mesh capabilities, enabling secure communication across multiple Kubernetes clusters. 2 | -------------------------------------------------------------------------------- /web/src/info/snapshot-controller.md: -------------------------------------------------------------------------------- 1 | [Snapshot Controller](https://github.com/piraeusdatastore/helm-charts/tree/main/charts/snapshot-controller) deploys the **Kubernetes snapshot-controller**, a required component for **CSI volume snapshotting**. It manages the lifecycle of **VolumeSnapshots**, enabling the creation, listing, and deletion of snapshots. The controller is **CSI-agnostic**, meaning it works with any **CSI driver** that supports the snapshot API, providing a standardized way to handle persistent volume snapshots in Kubernetes clusters. 2 | -------------------------------------------------------------------------------- /web/components.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://ui.shadcn.com/schema.json", 3 | "style": "default", 4 | "rsc": false, 5 | "tsx": true, 6 | "tailwind": { 7 | "config": "tailwind.config.cjs", 8 | "css": "src/index.css", 9 | "baseColor": "gray", 10 | "cssVariables": true, 11 | "prefix": "" 12 | }, 13 | "aliases": { 14 | "components": "@/components", 15 | "utils": "@/lib/utils", 16 | "ui": "@/components/ui", 17 | "lib": "@/lib", 18 | "hooks": "@/hooks" 19 | }, 20 | "iconLibrary": "lucide" 21 | } -------------------------------------------------------------------------------- /web/src/info/cloudflared.md: -------------------------------------------------------------------------------- 1 | [Cloudflared](https://github.com/cloudflare/cloudflared) is a lightweight proxy client for securely connecting applications to Cloudflare’s network. It powers **Cloudflare Tunnel**, allowing services to be exposed without opening ports or configuring firewall rules. `cloudflared` routes traffic through Cloudflare’s global network, enhancing security with DDoS protection, authentication, and encrypted connections. It supports **HTTP, TCP, and QUIC**, making it ideal for self-hosted services, remote access, and **zero-trust networking**. 2 | -------------------------------------------------------------------------------- /web/src/info/external-dns.md: -------------------------------------------------------------------------------- 1 | [ExternalDNS](https://github.com/kubernetes-sigs/external-dns) is a Kubernetes add-on that automatically manages the creation and updates of DNS records for services running in your cluster. It integrates with a variety of DNS providers, such as CloudFlare DNS, Amazon Route 53, Google Cloud DNS, and more, to ensure that your applications are easily accessible from anywhere with a DNS name. With ExternalDNS, you can simplify the process of managing DNS records for your home lab, eliminating the need for manual updates and reducing the risk of errors. -------------------------------------------------------------------------------- /web/src/info/external-secrets.md: -------------------------------------------------------------------------------- 1 | [External Secrets](https://external-secrets.io/) is a Kubernetes operator that synchronizes secrets from **external secret management systems** (e.g., AWS Secrets Manager, HashiCorp Vault, Google Secret Manager, 1Password) into Kubernetes secrets. It ensures that sensitive data stays outside the cluster while being securely referenced within applications. **External Secrets** supports **automatic synchronization, templating, and multi-backend integration**, making it a scalable and secure solution for managing secrets in Kubernetes environments. 2 | -------------------------------------------------------------------------------- /web/src/info/grafana.md: -------------------------------------------------------------------------------- 1 | [Grafana](https://grafana.com/) is an open-source data visualization and monitoring platform that provides support for various data sources such as Prometheus, InfluxDB, and Elasticsearch. It offers a wide range of customizable dashboards and panels that can be used to display and analyze data in real-time. Whether you are looking to monitor the performance of your home lab or simply want to visualize your data in a more meaningful way, Grafana has you covered. With its intuitive user interface and powerful visualization capabilities, it is a must-have tool for any home Kubernetes user. -------------------------------------------------------------------------------- /web/tsconfig.prerender.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | "compilerOptions": { 4 | "lib": ["es2021"], 5 | "module": "commonjs", 6 | "target": "es2021", 7 | 8 | "strict": true, 9 | "esModuleInterop": true, 10 | "skipLibCheck": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "moduleResolution": "node" 13 | }, 14 | "include": ["src/**/*", "types/*", "server.ts", "node_modules/vite/types/*"], 15 | "ts-node": { 16 | "transpileOnly": true, 17 | "files": true, 18 | "esm": true, 19 | "compilerOptions": { 20 | "module": "CommonJS" 21 | } 22 | } 23 | } -------------------------------------------------------------------------------- /web/src/info/k8s-gateway.md: -------------------------------------------------------------------------------- 1 | [k8s_gateway](https://github.com/k8s-gateway/k8s_gateway) is a CoreDNS plugin that resolves all types of external Kubernetes resources. 2 | 3 | It will gather all domains from all services, ingresses, and HTTPRoutes resources and create a DNS record for each of them, corresponding to the service IPs. This ensures that all external resources are resolved from within the cluster, and traffic is not leaving the cluster. 4 | 5 | Most people deploy the k8s_gateway together with CoreDNS and call the Helm Release 'k8s-gateway'. This can be supplimenting along side the kube-dns or default CoreDNS from k3s. 6 | -------------------------------------------------------------------------------- /web/src/info/goldilocks.md: -------------------------------------------------------------------------------- 1 | [Golidlocks](https://github.com/FairwindsOps/goldilocks) is an open-source project that provides a set of tools and resources for optimizing resource requests and limits in Kubernetes. It works by analyzing resource usage data for your pods and comparing it against the defined resource requests and limits. This allows you to identify resources that are over-allocated or under-allocated, and make adjustments to optimize resource utilization. With Goldilocks, you can improve the performance and efficiency of your home lab, reduce waste, and ensure that your applications have the resources they need to run effectively. -------------------------------------------------------------------------------- /init-db.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | import json 3 | # create sqlite db 4 | conn = sqlite3.connect('repos.db') 5 | c = conn.cursor() 6 | # create table if not exists 7 | # table name: repos 8 | # fields: repo name, url, stars 9 | # primary key repo_name 10 | c.execute('''CREATE TABLE IF NOT EXISTS repo 11 | (repo_name text primary key, url text, branch text, stars integer)''') 12 | c.execute('''delete from repo''') 13 | results = json.loads(open('repos.json').read()) 14 | for (repo_name, url, branch, stars) in results: 15 | c.execute("INSERT OR REPLACE INTO repo VALUES (?, ?, ?, ?)", (repo_name, url, branch, stars)) 16 | conn.commit() -------------------------------------------------------------------------------- /web/src/info/thanos.md: -------------------------------------------------------------------------------- 1 | [Thanos](https://thanos.io/) is an open-source project that provides a highly available and horizontally scalable way to store and query time-series data in a Kubernetes cluster. It integrates with popular time-series databases, such as Prometheus, to provide a unified view of your monitoring and logging data. Thanos also provides features such as data compression and deduplication, making it an efficient solution for storing large amounts of time-series data. With Thanos, you can easily store, query, and analyze your home lab's monitoring and logging data, providing valuable insights into the performance and health of your applications. -------------------------------------------------------------------------------- /web/src/info/traefik.md: -------------------------------------------------------------------------------- 1 | [Traefik](https://doc.traefik.io/traefik/) is a HTTP reverse proxy and load balancer for microservices. It integrates with popular orchestration systems, such as Kubernetes, to provide seamless and automatic service discovery, load balancing, and proxying for your applications. Traefik is designed to be fast, efficient, and easy to use, making it a good solution for home lab environments. With Traefik, you can expose your applications to the internet with ease, providing remote access to your services from anywhere. Whether you're looking to host a personal website, run a web-based service, or simply provide external access to your applications. -------------------------------------------------------------------------------- /web/src/info/volsync.md: -------------------------------------------------------------------------------- 1 | [VolSync](https://github.com/backube/volsync) is a Kubernetes operator for **asynchronous data replication and backup** of persistent volumes. It enables efficient synchronization between storage locations using **rsync, restic, or Rclone**, making it ideal for **backups, disaster recovery, and data migration** across clusters. 2 | 3 | ### **How It Works** 4 | VolSync automates **snapshot-based or file-based** replication of PVCs, supporting: 5 | - **Local backups** (e.g., to another PVC or storage backend) 6 | - **Remote replication** (e.g., across Kubernetes clusters) 7 | - **Incremental synchronization** to optimize storage and bandwidth usage 8 | -------------------------------------------------------------------------------- /web/public/opensearch.xml: -------------------------------------------------------------------------------- 1 | 3 | kubesearch.dev 4 | Your Gateway Drug to Kubernetes! 5 | [UTF-8] 6 | https://kubesearch.dev/k8s-search.svg 7 | 8 | 9 | [https://kubesearch.dev/] 10 | 11 | -------------------------------------------------------------------------------- /web/src/info/ingress-nginx.md: -------------------------------------------------------------------------------- 1 | [ingress-nginx](https://github.com/kubernetes/ingress-nginx) is a popular Kubernetes Ingress controller that provides external access to your applications running in a cluster. It acts as a reverse proxy and load balancer, routing incoming requests to the appropriate backend service based on URL paths and host names. With ingress-nginx, you can easily expose your home lab applications to the internet, allowing you to access them from anywhere with a web browser. Whether you're looking to run a web-based service, host a personal website, or simply want to provide remote access to your applications, ingress-nginx provides a simple and flexible solution for managing external access to your cluster. -------------------------------------------------------------------------------- /web/e2e/wordcloud.spec.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from '@playwright/test'; 2 | 3 | test('test wordcloud', async ({ page }) => { 4 | await page.goto('.'); 5 | 6 | await page.waitForSelector('"cert-manager"'); 7 | 8 | // there should only be one link with the text "cert-manager", 9 | // so any errors suggesting that there are multiple links with 10 | // the same text suggests there is a bug in the wordcloud 11 | await page.getByRole('link', { name: 'cert-manager' }).first().click(); 12 | await expect(page).toHaveURL('/hr/charts.jetstack.io-cert-manager'); 13 | 14 | const rows = page.locator('table tbody tr'); 15 | for (let i = 0; i < 5; i++) { 16 | await expect(rows).not.toHaveCount(i); 17 | } 18 | 19 | }); 20 | 21 | -------------------------------------------------------------------------------- /web/src/info/kube-prometheus-stack.md: -------------------------------------------------------------------------------- 1 | [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) is a Helm collection that deploys: 2 | - [Grafana](https://grafana.com/grafana/) for dashboarding 3 | - [Prometheus](https://prometheus.io/) as metric database, together with an operator to manage it. 4 | - [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics) for exposing k8s metrics 5 | - [prometheus-node-exporter](https://github.com/prometheus/node_exporter) for exposing node metrics 6 | - [alertmanager](https://github.com/prometheus/alertmanager) for setting up alerts based on metrics or logs 7 | 8 | Overall, kube-prometheus-stack is comperehensive and heavily configures all the tools. 9 | -------------------------------------------------------------------------------- /web/src/info/metrics-server.md: -------------------------------------------------------------------------------- 1 | [Metrics Server](https://github.com/kubernetes-sigs/metrics-server) is a crucial component of a Kubernetes cluster, responsible for collecting resource usage and performance metrics from the nodes and pods. It provides a centralized and scalable solution for retrieving cluster-wide metrics, which is essential for monitoring the health and performance of your home lab. With Metrics Server, you can easily track the utilization of resources such as CPU, memory, and network bandwidth, allowing you to identify bottlenecks and make informed decisions about scaling and resource allocation. Whether you're looking to optimize resource utilization or simply monitor the health of your cluster, Metrics Server is a must-have tool for any home Kubernetes user. -------------------------------------------------------------------------------- /web/src/info/emqx.md: -------------------------------------------------------------------------------- 1 | [EMQX](https://www.emqx.io/) is a scalable, open-source MQTT message broker that enables you to build and manage real-time IoT systems and applications. 2 | 3 | MQTT is a lightweight communication protocol for connecting devices and applications, making it an ideal solution for IoT and machine-to-machine (M2M) communication. 4 | 5 | Whether you're looking to build a smart home system, implement a remote monitoring solution, or simply want to connect your devices and applications, EMQ X is a powerful and flexible tool for your MQTT needs. 6 | 7 | Popular home applications that support MQTT are: 8 | 9 | - [Home Assistant](#/home-assistant) 10 | - [ESPHome](#/esphome) 11 | - [Zigbee2MQTT](#/zigbee2mqtt) 12 | - [ZWaveJS2MQTT](#/zwavejs2mqtt) 13 | - [NodeRED](#/node-red) -------------------------------------------------------------------------------- /download.py: -------------------------------------------------------------------------------- 1 | from re import sub 2 | import subprocess 3 | import sqlite3 4 | 5 | def bash_command(cmd): 6 | subprocess.Popen(['bash', '-c', cmd]) 7 | 8 | conn = sqlite3.connect('repos.db') 9 | c = conn.cursor() 10 | c.execute(""" 11 | SELECT 12 | replace(repo_name, '/', '-') as dir_name, 13 | branch, 14 | url 15 | FROM repo 16 | """) 17 | repos = c.fetchall() 18 | 19 | # mkdir repos 20 | subprocess.run(['mkdir', 'repos']) 21 | 22 | for repo in repos: 23 | dir_name, branch, url = repo 24 | print("downloading "+dir_name+" "+branch+" "+url) 25 | # max 1 year 26 | bash_command(f'rm -rf repos/{dir_name}; git clone {url} repos/{dir_name} --branch {branch} --filter=blob:limit=1m --single-branch --shallow-since="1 year"') 27 | 28 | print('') 29 | print('') 30 | 31 | print("done") 32 | -------------------------------------------------------------------------------- /web/src/info/rook-ceph-cluster.md: -------------------------------------------------------------------------------- 1 | [Rook Ceph Cluster](https://rook.io/docs/rook/latest-release/) is a Kubernetes operator for deploying and managing [Ceph](https://docs.ceph.com/) storage clusters. Ceph is a distributed storage system that provides block, object, and file storage for your home lab, and Rook makes it easy to deploy and manage Ceph storage clusters on top of Kubernetes. This allows you to store and access your data from anywhere in the cluster, such pods are not stuck to a single node, like with local-path storage. 2 | 3 | With [Ceph Block devices](https://docs.ceph.com/en/latest/rbd/rbd-kubernetes/) you can use Ceph as a persistent storage solution for your Kubernetes cluster. You can create a storage class that will automatically provision a Ceph block device for your pods. 4 | 5 | For Rook Ceph cluster, you need to have deployed the [Rook Ceph Operator](./charts.rook.io-release-rook-ceph-rook-ceph-operator) first. -------------------------------------------------------------------------------- /web/src/info/rook-ceph-operator.md: -------------------------------------------------------------------------------- 1 | [Rook Ceph](https://rook.io/docs/rook/) is a Kubernetes operator that can be used to manage [Ceph](https://docs.ceph.com/) storage clusters on Kubernetes. Ceph is a distributed storage system that provides block, object, and file storage for your home lab, and Rook makes it easy to deploy and manage Ceph storage clusters on top of Kubernetes. This allows you to store and access your data from anywhere in the cluster, such pods are not stuck to a single node, like with local-path storage. 2 | 3 | With [Ceph Block devices](https://docs.ceph.com/en/latest/rbd/rbd-kubernetes/) you can use Ceph as a persistent storage solution for your Kubernetes cluster. You can create a storage class that will automatically provision a Ceph block device for your pods. 4 | 5 | After deploying the operator, you can deploy Rook Ceph clusters via the [Rook Ceph Cluster](./charts.rook.io-release-rook-ceph-cluster) chart. -------------------------------------------------------------------------------- /web/src/info/rook-ceph.md: -------------------------------------------------------------------------------- 1 | [Rook Ceph](https://rook.io/docs/rook/) is a Kubernetes operator that can be used to manage [Ceph](https://docs.ceph.com/) storage clusters on Kubernetes. Ceph is a distributed storage system that provides block, object, and file storage for your home lab, and Rook makes it easy to deploy and manage Ceph storage clusters on top of Kubernetes. This allows you to store and access your data from anywhere in the cluster, such pods are not stuck to a single node, like with local-path storage. 2 | 3 | With [Ceph Block devices](https://docs.ceph.com/en/latest/rbd/rbd-kubernetes/) you can use Ceph as a persistent storage solution for your Kubernetes cluster. You can create a storage class that will automatically provision a Ceph block device for your pods. 4 | 5 | **Note:** Most people release deploy the operator with the name [rook-ceph-operator](./charts.rook.io-release-rook-ceph-rook-ceph-operator) and the cluster with [rook-ceph-cluster](./charts.rook.io-release-rook-ceph-cluster). -------------------------------------------------------------------------------- /web/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 17 | 18 | <!--title-->kubesearch 19 | 20 | 21 | 25 |
26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /web/src/info/immich-server.md: -------------------------------------------------------------------------------- 1 | [Immich](https://github.com/immich-app/immich) is a self-hosted photo and video backup solution. 2 | 3 | For immich, there are multiple helm deployments: 4 | 5 | - [immich-web](./bjw-s.github.io-helm-charts-app-template-immich-web) 6 | - [**immich-server**](./bjw-s.github.io-helm-charts-app-template-immich-server) **(this one)** 7 | - [immich-typesense](./bjw-s.github.io-helm-charts-app-template-immich-typesense) 8 | - [immich-machine-learning](./bjw-s.github.io-helm-charts-app-template-immich-machine-learning) 9 | - [immich-microservices](./bjw-s.github.io-helm-charts-app-template-immich-microservices) 10 | 11 | For redis and postgres, there are multiple helm charts: 12 | - Redis: [app-template/redis](./bjw-s.github.io-helm-charts-app-template-immich-redis), [bitnami/redis](./charts.bitnami.com-bitnami-redis) 13 | - PostgreSQL: operator [cloudnative-pg/cloudnative-pg](./cloudnative-pg.github.io-charts-cloudnative-pg-postgres), bitnami [bitnami/postgresql](./charts.bitnami.com-bitnami-postgresql) -------------------------------------------------------------------------------- /web/src/info/immich-web.md: -------------------------------------------------------------------------------- 1 | [Immich](https://github.com/immich-app/immich) is a self-hosted photo and video backup solution. 2 | 3 | For immich, there are multiple helm deployments: 4 | 5 | - [**immich-web**](./bjw-s.github.io-helm-charts-app-template-immich-web) **(this one)** 6 | - [immich-server](./bjw-s.github.io-helm-charts-app-template-immich-server) 7 | - [immich-typesense](./bjw-s.github.io-helm-charts-app-template-immich-typesense) 8 | - [immich-machine-learning](./bjw-s.github.io-helm-charts-app-template-immich-machine-learning) 9 | - [immich-microservices](./bjw-s.github.io-helm-charts-app-template-immich-microservices) 10 | 11 | For redis and postgres, there are multiple helm charts: 12 | - Redis: [app-template/redis](./bjw-s.github.io-helm-charts-app-template-immich-redis), [bitnami/redis](./charts.bitnami.com-bitnami-redis) 13 | - PostgreSQL: operator [cloudnative-pg/cloudnative-pg](./cloudnative-pg.github.io-charts-cloudnative-pg-postgres), bitnami [bitnami/postgresql](./charts.bitnami.com-bitnami-postgresql) -------------------------------------------------------------------------------- /web/src/info/immich-typesense.md: -------------------------------------------------------------------------------- 1 | [Immich](https://github.com/immich-app/immich) is a self-hosted photo and video backup solution. 2 | 3 | For immich, there are multiple helm deployments: 4 | 5 | - [immich-web](./bjw-s.github.io-helm-charts-app-template-immich-web) 6 | - [immich-server](./bjw-s.github.io-helm-charts-app-template-immich-server) 7 | - [**immich-typesense**](./bjw-s.github.io-helm-charts-app-template-immich-typesense) **(this one)** 8 | - [immich-machine-learning](./bjw-s.github.io-helm-charts-app-template-immich-machine-learning) 9 | - [immich-microservices](./bjw-s.github.io-helm-charts-app-template-immich-microservices) 10 | 11 | For redis and postgres, there are multiple helm charts: 12 | - Redis: [app-template/redis](./bjw-s.github.io-helm-charts-app-template-immich-redis), [bitnami/redis](./charts.bitnami.com-bitnami-redis) 13 | - PostgreSQL: operator [cloudnative-pg/cloudnative-pg](./cloudnative-pg.github.io-charts-cloudnative-pg-postgres), bitnami [bitnami/postgresql](./charts.bitnami.com-bitnami-postgresql) -------------------------------------------------------------------------------- /web/src/info/immich-machine-learning.md: -------------------------------------------------------------------------------- 1 | [Immich](https://github.com/immich-app/immich) is a self-hosted photo and video backup solution. 2 | 3 | For immich, there are multiple helm deployments: 4 | 5 | - [immich-web](./bjw-s.github.io-helm-charts-app-template-immich-web) 6 | - [immich-server](./bjw-s.github.io-helm-charts-app-template-immich-server) 7 | - [immich-typesense](./bjw-s.github.io-helm-charts-app-template-immich-typesense) 8 | - [**immich-machine-learning**](./bjw-s.github.io-helm-charts-app-template-immich-machine-learning) **(this one)** 9 | - [immich-microservices](./bjw-s.github.io-helm-charts-app-template-immich-microservices) 10 | 11 | For redis and postgres, there are multiple helm charts: 12 | - Redis: [app-template/redis](./bjw-s.github.io-helm-charts-app-template-immich-redis), [bitnami/redis](./charts.bitnami.com-bitnami-redis) 13 | - PostgreSQL: operator [cloudnative-pg/cloudnative-pg](./cloudnative-pg.github.io-charts-cloudnative-pg-postgres), bitnami [bitnami/postgresql](./charts.bitnami.com-bitnami-postgresql) -------------------------------------------------------------------------------- /web/src/info/immich-microservices.md: -------------------------------------------------------------------------------- 1 | [Immich](https://github.com/immich-app/immich) is a self-hosted photo and video backup solution. 2 | 3 | For immich, there are multiple helm deployments: 4 | 5 | - [immich-web](./bjw-s.github.io-helm-charts-app-template-immich-web) 6 | - [immich-server](./bjw-s.github.io-helm-charts-app-template-immich-server) 7 | - [immich-typesense](./bjw-s.github.io-helm-charts-app-template-immich-typesense) 8 | - [immich-machine-learning](./bjw-s.github.io-helm-charts-app-template-immich-machine-learning) 9 | - [**immich-microservices**](./bjw-s.github.io-helm-charts-app-template-immich-microservices) **(this one)** 10 | 11 | For redis and postgres, there are multiple helm charts: 12 | - Redis: [app-template/redis](./bjw-s.github.io-helm-charts-app-template-immich-redis), [bitnami/redis](./charts.bitnami.com-bitnami-redis) 13 | - PostgreSQL: operator [cloudnative-pg/cloudnative-pg](./cloudnative-pg.github.io-charts-cloudnative-pg-postgres), bitnami [bitnami/postgresql](./charts.bitnami.com-bitnami-postgresql) -------------------------------------------------------------------------------- /web/src/info/node-feature-discovery.md: -------------------------------------------------------------------------------- 1 | [Node Feature Discovery](https://github.com/kubernetes-sigs/node-feature-discovery) is a Kubernetes add-on that helps automate the discovery and configuration of node features such as CPU, memory, and network interfaces. 2 | 3 | Most people in k8s at home use Node Feature Discovery to ensure that pods that need video transcoding capabilities are running on a node with an Intel GPU (which has excellent video transcoding capabilities). But also for detecting a Zigbee/Z-Wave USB sticks. 4 | 5 | For example, the following code would guarantee that a node will run on a Node with Intel GPU: 6 | ```yaml 7 | affinity: 8 | nodeAffinity: 9 | requiredDuringSchedulingIgnoredDuringExecution: 10 | nodeSelectorTerms: 11 | - matchExpressions: 12 | - key: feature.node.kubernetes.io/custom-intel-gpu 13 | operator: In 14 | values: 15 | - "true" 16 | ``` 17 | 18 | Node Feature Discovery, would figure out a node has an Intel GPU and add the `feature.node.kubernetes.io/custom-intel-gpu: true` label. -------------------------------------------------------------------------------- /web/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "allowJs": true, 4 | "alwaysStrict": true, 5 | "allowSyntheticDefaultImports": true, 6 | "baseUrl": ".", 7 | "checkJs": true, 8 | "declaration": true, 9 | "declarationMap": true, 10 | "esModuleInterop": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "incremental": true, 13 | "jsx": "react-jsxdev", 14 | "lib": ["DOM", "DOM.Iterable", "ESNext"], 15 | "strict": true, 16 | "outDir": "dist", 17 | "module": "ESNext", 18 | "moduleResolution": "Node", 19 | "skipLibCheck": true, 20 | "sourceMap": true, 21 | "target": "ES2017", 22 | "paths": { 23 | "@/*": ["src/*"] 24 | } 25 | }, 26 | "include": ["src/**/*", "types/*", "server.ts", "node_modules/vite/types/*"], 27 | "exclude": ["dist/**/*"], 28 | "ts-node": { 29 | "transpileOnly": true, 30 | "files": true, 31 | "esm": true, 32 | "moduleTypes": { 33 | "prerender.ts": "cjs" 34 | }, 35 | "compilerOptions": { 36 | "module": "CommonJS", 37 | "target": "ESNext", 38 | "moduleResolution": "NodeNext" 39 | } 40 | } 41 | } -------------------------------------------------------------------------------- /web/src/components/table/index.tsx: -------------------------------------------------------------------------------- 1 | export default function (props: { 2 | headers: any[], 3 | rows: { 4 | key: string, 5 | data: any[] 6 | }[] 7 | }) { 8 | return 9 | 10 | 11 | {props.headers.map((header) => { 12 | return 15 | } 16 | )} 17 | 18 | 19 | 20 | {props.rows.map((row) => { 21 | return 22 | {row.data.map((cell, i) => { 23 | return 26 | })} 27 | 28 | } 29 | )} 30 | 31 |
{header}
{cell}
32 | } -------------------------------------------------------------------------------- /web/src/pages/repo.tsx: -------------------------------------------------------------------------------- 1 | import type { RepoPageData } from "../generators/helm-release/models"; 2 | import Table from "../components/table"; 3 | import dayjs from "dayjs"; 4 | 5 | import relativeTime from "dayjs/plugin/relativeTime"; 6 | import Icon from "../components/icon"; 7 | dayjs.extend(relativeTime); 8 | 9 | 10 | export function Repo(props: RepoPageData) { 11 | return
12 |

Helm Releases from {props.name}

13 | a.timestamp - b.timestamp).reverse().map((release) => ({ 16 | key: "examples" + release.chart + release.name, 17 | data: [ 18 | 19 | 20 | {release.name} 21 | , 22 | release.chart, 23 | dayjs.unix(release.timestamp).fromNow(), 24 | release.version, 25 | ] 26 | 27 | }))} 28 | /> 29 | 30 | ; 31 | } -------------------------------------------------------------------------------- /web/src/pages/top.tsx: -------------------------------------------------------------------------------- 1 | import Table from "../components/table"; 2 | 3 | 4 | export function Top(props: any) { 5 | const {interestingIdToName, repoAlsoHasMap} = props.repoAlsoHas; 6 | 7 | return
8 |

Top Repositories

9 |
({ 12 | key: "top" + repo.url, 13 | data: [ 14 | {repo.count}, 15 | 16 | {repo.name} 17 | , 18 | repo.stars, 19 |
20 | {(repoAlsoHasMap?.[repo.name] || []).sort().map((id: any) => ( 21 | 22 | {interestingIdToName[id]} 23 | 24 | ))} 25 |
26 | ] 27 | }))} 28 | /> 29 | ; 30 | } -------------------------------------------------------------------------------- /web/src/entry-client.tsx: -------------------------------------------------------------------------------- 1 | import ReactDOM from 'react-dom/client' 2 | import { BrowserRouter } from 'react-router-dom' 3 | import App, { AppData } from './App' 4 | import pako from 'pako'; 5 | 6 | function b64DecodeUnicode(str: string) { 7 | // const raw = decodeURIComponent(Array.prototype.map.call(atob(str), function(c) { 8 | // return '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2) 9 | // }).join('')) 10 | const encoder = new TextEncoder(); 11 | const decoder = new TextDecoder('utf-8') 12 | 13 | const input = Uint8Array.from(atob(str), c => c.charCodeAt(0)); 14 | 15 | return decoder.decode(pako.ungzip(input)); 16 | } 17 | 18 | // app data is passed from server to client via window.__APP_DATA__ 19 | declare global { 20 | interface Window { 21 | __APP_DATA__: string, 22 | __PAGE_DATA__: string, 23 | } 24 | } 25 | const appData = JSON.parse(b64DecodeUnicode(window.__APP_DATA__)) as AppData; 26 | const pageData = JSON.parse(b64DecodeUnicode(window.__PAGE_DATA__)); 27 | 28 | ReactDOM.hydrateRoot( 29 | document.getElementById('app')!, 30 | 31 | 32 | , 33 | ) 34 | console.log('hydrated') -------------------------------------------------------------------------------- /web/e2e/hr.spec.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from '@playwright/test'; 2 | 3 | test('test helm release page', async ({ page }) => { 4 | await page.goto('/hr/charts.jetstack.io-cert-manager'); 5 | await expect(page.$$('text="is a certificate management controller for Kubernetes."')).toBeTruthy(); 6 | 7 | // Top Repositories (5 out of N), find out N 8 | const topRepositories = await page.$$('h4:has-text("Top Repositories")'); 9 | await expect(topRepositories.length).toBe(1); 10 | const topRepositoriesText = await topRepositories[0].innerText(); 11 | // use regex 12 | const regex = /Top\s*Repositories \(5 out of (\d+)\)/; 13 | const matches = topRepositoriesText.match(regex); 14 | const topRepositoriesCount = parseInt(matches![1]); 15 | await expect(topRepositoriesCount).toBeGreaterThan(80); 16 | 17 | // installCRDs (98) 18 | const installCRDs = await page.$$('a:has-text("installCRDs")'); 19 | await expect(installCRDs.length).toBe(1); 20 | const installCRDsText = await installCRDs[0].innerText(); 21 | // use regex 22 | const regex2 = /installCRDs \((\d+)\)/; 23 | const matches2 = installCRDsText.match(regex2); 24 | const installCRDsCount = parseInt(matches2![1]); 25 | await expect(installCRDsCount).toBeGreaterThan(50); 26 | }); 27 | -------------------------------------------------------------------------------- /web/e2e/grep.spec.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from '@playwright/test'; 2 | 3 | test('test grep', async ({ page }) => { 4 | await page.goto('.'); 5 | // input field should be focused 6 | await expect(page.locator('[placeholder="Search for a helm release..."]')).toBeFocused(); 7 | 8 | await page.locator('[placeholder="Search for a helm release..."]').type('grep '); 9 | await expect(page).toHaveURL('/grep#grep%20'); 10 | 11 | await expect(page.locator('[placeholder="Search for a grep pattern..."]')).toBeFocused(); 12 | 13 | // search for "image.repository" 14 | await page.locator('[placeholder="Search for a grep pattern..."]').type('image.repository'); 15 | 16 | // search image.repository 17 | // const expand = await page 18 | // .getByRole('listitem') 19 | // .filter({ has: page.getByRole('heading', { name: 'image.repository', exact: true })}) 20 | // .getByRole('button') 21 | // // svg with title: 'Expand' 22 | // .filter({ has: page.getByRole('img', { name: 'Expand', exact: true })}) 23 | // .first(); 24 | // // check if expand is visible 25 | // await expect(expand).toBeVisible(); 26 | // await expand.click(); 27 | // // .click() 28 | 29 | // // verify ghcr.io/onedr0p/plex is there 30 | // await expect(page.locator('span:has-text("ghcr.io/onedr0p/plex")')).toBeVisible(); 31 | 32 | }); -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: build-frontend 2 | on: 3 | push: 4 | branches: 5 | - main 6 | schedule: 7 | - cron: "30 0 * * *" 8 | jobs: 9 | build-frontend: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | with: 14 | submodules: false 15 | # cd frontend/ 16 | - name: NPM or Yarn install with caching 📦 17 | uses: bahmutov/npm-install@v1 18 | with: 19 | working-directory: web/ 20 | - name: Install dependencies 📦 21 | run: yarn install --web-lockfile 22 | working-directory: web/ 23 | - name: Download DB 📙 24 | run: | 25 | yarn global add zx 26 | ./scripts/download.mjs 27 | - name: Build web 🚧 28 | run: yarn run generate 29 | working-directory: web/ 30 | - name: Install Playwright Browsers 31 | run: yarn playwright install --with-deps 32 | working-directory: web/ 33 | - name: Run Playwright tests 34 | run: yarn playwright test 35 | working-directory: web/ 36 | - uses: actions/upload-artifact@v4 37 | if: always() 38 | with: 39 | name: playwright-report 40 | path: web/playwright-report/ 41 | retention-days: 30 42 | - name: Deploy 🚀 43 | uses: JamesIves/github-pages-deploy-action@v4.2.3 44 | with: 45 | branch: gh-pages 46 | folder: web/dist/static/ 47 | -------------------------------------------------------------------------------- /web/e2e/search.spec.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from '@playwright/test'; 2 | 3 | test('test search', async ({ page }) => { 4 | await page.goto('.'); 5 | 6 | 7 | // Click [placeholder="search a chart"] 8 | await page.locator('[placeholder="Search for a helm release..."]').click(); 9 | // Fill [placeholder="search a chart"] 10 | await page.locator('[placeholder="Search for a helm release..."]').fill('plex'); 11 | // await expect(page).toHaveURL('#/plex'); 12 | 13 | 14 | // code to check if the search results are correct 15 | // 16 | // check if table has plex, parse count value and check if it is above 15 17 | // first select row with plex 18 | const rows = await page.locator('table tbody tr:has(a:has-text("plex"))').all(); 19 | // at least one row should be found 20 | await expect(rows.length).toBeGreaterThan(0); 21 | // get the count value 22 | const count = await rows[0].locator('td:nth-of-type(3)').innerText(); 23 | // check if count is above or equal to 5 24 | await expect(parseInt(count)).toBeGreaterThan(4); 25 | 26 | // click on first cell of first row 27 | await rows[0].locator('td:nth-of-type(1) a').click(); 28 | 29 | // check url, it must be either of the two (OCI vs non-OCI) 30 | try { 31 | await expect(page).toHaveURL('/hr/bjw-s-labs.github.io-helm-charts-app-template-plex'); 32 | } catch (e) { 33 | await expect(page).toHaveURL('/hr/ghcr.io-bjw-s-labs-helm-app-template-plex'); 34 | } 35 | }); 36 | -------------------------------------------------------------------------------- /web/e2e/dynamic.spec.ts: -------------------------------------------------------------------------------- 1 | import { test, expect } from '@playwright/test'; 2 | 3 | test('test dynamic page', async ({ page }) => { 4 | await page.goto('.'); 5 | 6 | 7 | // Click [placeholder="search a chart"] 8 | await expect(page.locator('[placeholder="Search for a helm release..."]')).toBeFocused(); 9 | // Fill [placeholder="search a chart"] 10 | await page.locator('[placeholder="Search for a helm release..."]').type('istio'); 11 | // await expect(page).toHaveURL('#/istio'); 12 | 13 | 14 | // code to check if the search results are correct 15 | // 16 | // check if table has plex, parse count value and check if it is above 15 17 | // first select row with plex 18 | const rows = await page.locator('table tbody tr:has(a:has-text("istio"))').all(); 19 | // at least one row should be found 20 | await expect(rows.length).toBeGreaterThan(0); 21 | // get the count value 22 | const lastRow = rows[rows.length - 1]; 23 | const count = await lastRow.locator('td:nth-of-type(3)').innerText(); 24 | // count < 3 25 | await expect(parseInt(count)).toBeLessThanOrEqual(3); 26 | 27 | const name = await lastRow.locator('td:nth-of-type(1) a').innerText(); 28 | 29 | await expect(name).toContain('istio'); 30 | 31 | // click on first cell of first row 32 | await lastRow.locator('td:nth-of-type(1) a').click(); 33 | 34 | // check url, /.*${name}/ 35 | await expect(page).toHaveURL(new RegExp(`.*${name}(#hr)?$`)); 36 | 37 | await expect(page.getByText(/All\s*Repositories/)).toBeVisible() 38 | 39 | }); 40 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | inputs = { 3 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11"; 4 | devshell.url = "github:numtide/devshell"; 5 | 6 | }; 7 | 8 | outputs = inputs@{ flake-parts, ... }: 9 | flake-parts.lib.mkFlake { inherit inputs; } { 10 | imports = [ 11 | inputs.devshell.flakeModule 12 | ]; 13 | systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-linux" "aarch64-darwin" ]; 14 | 15 | perSystem = { config, self', inputs', pkgs, lib, system, ... }: 16 | let 17 | nodejs = pkgs.nodejs_23; 18 | python = pkgs.python3; 19 | in 20 | { 21 | devshells.default = { 22 | env = [ 23 | { 24 | name = "BROWSER_PATH"; 25 | value = pkgs.playwright-driver.browsers; 26 | } 27 | { 28 | name = "PLAYWRIGHT_SKIP_VALIDATE_HOST_REQUIREMENTS"; 29 | value = "true"; 30 | } 31 | ]; 32 | commands = [ 33 | { 34 | help = "download repos"; 35 | name = "download"; 36 | command = "./scripts/download.mjs"; 37 | } 38 | { 39 | help = "run frontend"; 40 | name = "run"; 41 | command = "cd web/; yarn run dev"; 42 | } 43 | ]; 44 | packages = [ 45 | (python.withPackages (p: with p; [ 46 | requests 47 | ruamel-yaml 48 | pydantic 49 | ])) 50 | pkgs.sqlite 51 | pkgs.curl 52 | pkgs.playwright-driver 53 | pkgs.nodePackages.zx 54 | pkgs.nodePackages.npm 55 | pkgs.nodePackages.yarn 56 | ]; 57 | }; 58 | }; 59 | }; 60 | } 61 | -------------------------------------------------------------------------------- /web/tailwind.config.cjs: -------------------------------------------------------------------------------- 1 | /** @type {import('tailwindcss').Config} */ 2 | module.exports = { 3 | // darkMode: ["class"], 4 | content: [ 5 | "./index.html", 6 | "./src/**/*.{js,ts,jsx,tsx,md,html}", 7 | ], 8 | theme: { 9 | extend: { 10 | borderRadius: { 11 | lg: 'var(--radius)', 12 | md: 'calc(var(--radius) - 2px)', 13 | sm: 'calc(var(--radius) - 4px)' 14 | }, 15 | colors: { 16 | background: 'hsl(var(--background))', 17 | foreground: 'hsl(var(--foreground))', 18 | card: { 19 | DEFAULT: 'hsl(var(--card))', 20 | foreground: 'hsl(var(--card-foreground))' 21 | }, 22 | popover: { 23 | DEFAULT: 'hsl(var(--popover))', 24 | foreground: 'hsl(var(--popover-foreground))' 25 | }, 26 | primary: { 27 | DEFAULT: 'hsl(var(--primary))', 28 | foreground: 'hsl(var(--primary-foreground))' 29 | }, 30 | secondary: { 31 | DEFAULT: 'hsl(var(--secondary))', 32 | foreground: 'hsl(var(--secondary-foreground))' 33 | }, 34 | muted: { 35 | DEFAULT: 'hsl(var(--muted))', 36 | foreground: 'hsl(var(--muted-foreground))' 37 | }, 38 | accent: { 39 | DEFAULT: 'hsl(var(--accent))', 40 | foreground: 'hsl(var(--accent-foreground))' 41 | }, 42 | destructive: { 43 | DEFAULT: 'hsl(var(--destructive))', 44 | foreground: 'hsl(var(--destructive-foreground))' 45 | }, 46 | border: 'hsl(var(--border))', 47 | input: 'hsl(var(--input))', 48 | ring: 'hsl(var(--ring))', 49 | chart: { 50 | '1': 'hsl(var(--chart-1))', 51 | '2': 'hsl(var(--chart-2))', 52 | '3': 'hsl(var(--chart-3))', 53 | '4': 'hsl(var(--chart-4))', 54 | '5': 'hsl(var(--chart-5))' 55 | } 56 | } 57 | } 58 | }, 59 | plugins: [ 60 | require('@tailwindcss/typography'), 61 | require("tailwindcss-animate") 62 | ], 63 | } 64 | -------------------------------------------------------------------------------- /scripts/download.mjs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env zx 2 | 3 | $.verbose = false; 4 | 5 | let date = null; 6 | 7 | // if argument, then it is a date 8 | if (process.argv[3]) { 9 | date = process.argv[3]; 10 | console.log("using date", date); 11 | } 12 | 13 | 14 | // download latest releases from github 15 | const repo = 'whazor/k8s-at-home-search'; 16 | // use $`curl` for download 17 | const { stdout } = await $`curl -s https://api.github.com/repos/${repo}/releases?per_page=100`; 18 | 19 | const sortedReleases = JSON.parse(stdout).map(r => r.name).sort(); 20 | const firstName = sortedReleases[0]; 21 | const lastName = sortedReleases[sortedReleases.length - 1]; 22 | const monthOldName = sortedReleases.length > 33 && sortedReleases[sortedReleases.length - 32]; 23 | 24 | const releases = JSON.parse(stdout); 25 | // print first and last release 26 | console.log(`from release: ${firstName}, to latest release: ${lastName}, one month ago: ${monthOldName}`); 27 | 28 | 29 | 30 | let release; 31 | if (date) { 32 | release = releases.find(({ name }) => name.includes(date)); 33 | } else { 34 | release = releases.find(({ name }) => name === lastName); 35 | } 36 | if (!release) { 37 | console.error("no release found"); 38 | process.exit(1); 39 | } 40 | console.log(`found release: ${release.name}`); 41 | 42 | // parse json 43 | const { assets } = release; 44 | 45 | const repos = assets.find(({ name }) => name === 'repos.db')["browser_download_url"]; 46 | const reposExtended = assets.find(({ name }) => name === 'repos-extended.db')["browser_download_url"]; 47 | 48 | console.log(`curl -s -L ${repos} -o ./web/repos${ 49 | date ? `-${date}` : "" 50 | }.db`); 51 | console.log(`curl -s -L ${reposExtended} -o ./web/repos-extended${ 52 | date ? `-${date}` : "" 53 | }.db`); 54 | 55 | // download files 56 | await $`curl -s -L ${repos} -o ./web/repos${ 57 | date ? `-${date}` : "" 58 | }.db`; 59 | await $`curl -s -L ${reposExtended} -o ./web/repos-extended${ 60 | date ? `-${date}` : "" 61 | }.db`; -------------------------------------------------------------------------------- /web/src/components/ui/tabs.tsx: -------------------------------------------------------------------------------- 1 | import * as React from "react" 2 | import * as TabsPrimitive from "@radix-ui/react-tabs" 3 | 4 | import { cn } from "@/lib/utils" 5 | 6 | const Tabs = TabsPrimitive.Root 7 | 8 | const TabsList = React.forwardRef< 9 | React.ElementRef, 10 | React.ComponentPropsWithoutRef 11 | >(({ className, ...props }, ref) => ( 12 | 20 | )) 21 | TabsList.displayName = TabsPrimitive.List.displayName 22 | 23 | const TabsTrigger = React.forwardRef< 24 | React.ElementRef, 25 | React.ComponentPropsWithoutRef 26 | >(({ className, ...props }, ref) => ( 27 | 35 | )) 36 | TabsTrigger.displayName = TabsPrimitive.Trigger.displayName 37 | 38 | const TabsContent = React.forwardRef< 39 | React.ElementRef, 40 | React.ComponentPropsWithoutRef 41 | >(({ className, ...props }, ref) => ( 42 | 50 | )) 51 | TabsContent.displayName = TabsPrimitive.Content.displayName 52 | 53 | export { Tabs, TabsList, TabsTrigger, TabsContent } 54 | -------------------------------------------------------------------------------- /web/prerender.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ts-node 2 | import fs from 'node:fs' 3 | import path from 'node:path' 4 | import { Renderer } from './renderer' 5 | import type { RenderFunction } from './src/entry-server' 6 | import { dirname } from 'path'; 7 | import { fileURLToPath } from 'url'; 8 | 9 | const __dirname = dirname(fileURLToPath(import.meta.url)); 10 | 11 | const toAbsolute = (p: string) => path.resolve(__dirname, p) 12 | 13 | const template = fs.readFileSync(toAbsolute('dist/static/index.html'), 'utf-8') 14 | 15 | const renderer = new Renderer() 16 | ; (async () => { 17 | await renderer.prepareData(); 18 | 19 | 20 | const { render }: { render: RenderFunction } = await import('./dist/server/entry-server.js'); 21 | 22 | async function generatePage(url: string) { 23 | const html = await renderer.generatePage(render, url, template); 24 | await fs.promises.writeFile(toAbsolute(`dist/static${url + (url.endsWith('/') ? 'index' : '')}.html`), html); 25 | } 26 | 27 | const fileData = renderer.jsonFilesData; 28 | 29 | let pageGenerators: Array> = []; 30 | 31 | const folders = new Set(); 32 | for (const key of renderer.getPages()) { 33 | const folder = key.split('/').slice(0, -1).join('/'); 34 | if (!folders.has(folder)) { 35 | const folderPath = path.join(__dirname, 'dist/static/', folder); 36 | if (!path.isAbsolute(folderPath)) { 37 | throw new Error('Folder path is not absolute: ' + folderPath); 38 | } 39 | 40 | await fs.promises.mkdir(path.resolve(folderPath), { recursive: true }); 41 | folders.add(folder); 42 | } 43 | 44 | pageGenerators.push(generatePage(key)); 45 | } 46 | 47 | for (const [i, jsonPageDataString] of Object.entries(fileData)) { 48 | await fs.promises.writeFile(toAbsolute(`dist/static/hr/data-${i}.json`), jsonPageDataString); 49 | } 50 | 51 | const routesToPrerender = ["/"]; 52 | for (const url of routesToPrerender) { 53 | pageGenerators.push(generatePage(url)); 54 | } 55 | 56 | const sitemap = await renderer.generateSitemap(); 57 | await fs.promises.writeFile(toAbsolute(`dist/static/sitemap.xml`), sitemap); 58 | 59 | await Promise.all(pageGenerators); 60 | })() 61 | -------------------------------------------------------------------------------- /web/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "web", 3 | "private": true, 4 | "type": "module", 5 | "version": "0.0.0", 6 | "scripts": { 7 | "dev": "tsx ./server.ts", 8 | "dev:watch": "tsx watch --clear-screen=false --exclude *.timestamp-*.* --exclude tailwind.config.cjs ./server.ts", 9 | "build": "tsc && yarn run build:client && yarn run build:server", 10 | "build:client": "vite build --outDir dist/client --ssrManifest", 11 | "build:server": "vite build --ssr src/entry-server.tsx --outDir dist/server", 12 | "generate": "rm -rf dist; tsc && vite build --outDir dist/static --ssrManifest && yarn run build:server && tsx prerender.ts", 13 | "serve": "vite preview --outDir dist/static", 14 | "test": "playwright test" 15 | }, 16 | "dependencies": { 17 | "@iconify/react": "^4.0.1", 18 | "@radix-ui/react-tabs": "^1.1.2", 19 | "@types/marked": "^4.0.8", 20 | "caniuse-lite": "^1.0.30001565", 21 | "class-variance-authority": "^0.7.1", 22 | "clsx": "^2.1.1", 23 | "dayjs": "^1.11.7", 24 | "dompurify": "^2.4.3", 25 | "jsdom": "^21.0.0", 26 | "lucide-react": "^0.473.0", 27 | "marked": "^4.2.12", 28 | "pako": "^2.1.0", 29 | "react": "^18.2.0", 30 | "react-dom": "^18.2.0", 31 | "react-github-btn": "^1.4.0", 32 | "react-router-dom": "^6.6.2", 33 | "sqlite": "^4.1.2", 34 | "sqlite3": "^5.1.4", 35 | "tailwind-merge": "^2.6.0", 36 | "tailwindcss-animate": "^1.0.7" 37 | }, 38 | "devDependencies": { 39 | "@mdx-js/rollup": "^2.2.1", 40 | "@playwright/test": "^1.47.0", 41 | "@tailwindcss/typography": "^0.5.9", 42 | "@types/dompurify": "^2.4.0", 43 | "@types/express": "^4.17.16", 44 | "@types/jsdom": "^20.0.1", 45 | "@types/node": "^18.11.18", 46 | "@types/pako": "^2.0.0", 47 | "@types/react": "^18.0.26", 48 | "@types/react-dom": "^18.0.9", 49 | "@vitejs/plugin-react-swc": "^3.0.0", 50 | "autoprefixer": "^10.4.13", 51 | "cssnano": "^5.1.14", 52 | "express": "^4.18.2", 53 | "nodemon": "^2.0.20", 54 | "postcss": "^8.4.21", 55 | "tailwindcss": "^3.3.6", 56 | "ts-node": "^10.9.1", 57 | "tsconfig-paths": "^4.1.2", 58 | "tsx": "^4.19.2", 59 | "typescript": "^4.9.3", 60 | "vite": "^4.0.0" 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /.github/workflows/update.yaml: -------------------------------------------------------------------------------- 1 | name: Update-search-index 2 | 3 | on: 4 | schedule: 5 | - cron: "1 0 * * *" 6 | workflow_dispatch: 7 | jobs: 8 | update-search-index: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v2 12 | with: 13 | submodules: false 14 | - uses: actions/setup-python@v2 15 | with: 16 | python-version: "3.x" 17 | - name: setup git config 18 | run: | 19 | git config user.name "GitHub Actions Bot" 20 | git config user.email "<>" 21 | - name: Get current date 22 | id: date 23 | run: echo "::set-output name=date::$(date +'%Y-%m-%d')" 24 | - name: Install dependencies 25 | run: pip install -r requirements.txt 26 | - name: Get interesting repositories 27 | run: python3 interesting.py 28 | env: 29 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | - name: Build DB 31 | run: python3 init-db.py 32 | - name: Download repos 33 | run: python3 download.py 34 | - name: Update search index 35 | run: python3 search.py 36 | - name: Create Release 37 | id: create_release 38 | uses: actions/create-release@v1 39 | env: 40 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 41 | with: 42 | tag_name: ${{ steps.date.outputs.date }} 43 | release_name: Release ${{ steps.date.outputs.date }} 44 | draft: false 45 | prerelease: true 46 | - name: Upload Release Asset 1/2 47 | id: upload-release-asset-1 48 | uses: actions/upload-release-asset@v1 49 | env: 50 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 51 | with: 52 | upload_url: ${{ steps.create_release.outputs.upload_url }} 53 | asset_path: ./repos.db 54 | asset_name: repos.db 55 | asset_content_type: application/vnd.sqlite3 56 | - name: Upload Release Asset 2/2 57 | id: upload-release-asset-2 58 | uses: actions/upload-release-asset@v1 59 | env: 60 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 61 | with: 62 | upload_url: ${{ steps.create_release.outputs.upload_url }} 63 | asset_path: ./repos-extended.db 64 | asset_name: repos-extended.db 65 | asset_content_type: application/vnd.sqlite3 66 | -------------------------------------------------------------------------------- /web/server.ts: -------------------------------------------------------------------------------- 1 | import fs from 'node:fs' 2 | import path from 'node:path' 3 | import express from 'express' 4 | import { createServer as createViteServer } from 'vite' 5 | import { Renderer } from './renderer' 6 | import { fileURLToPath } from 'node:url' 7 | 8 | const __dirname = path.dirname(fileURLToPath(import.meta.url)) 9 | 10 | const toAbsolute = (p: string) => path.resolve(__dirname, p) 11 | 12 | const template = fs.readFileSync(toAbsolute('index.html'), 'utf-8') 13 | 14 | async function createServer() { 15 | const app = express() 16 | 17 | // Create Vite server in middleware mode and configure the app type as 18 | // 'custom', disabling Vite's own HTML serving logic so parent server 19 | // can take control 20 | const vite = await createViteServer({ 21 | server: { middlewareMode: true }, 22 | appType: 'custom' 23 | }) 24 | 25 | const renderer = new Renderer(); 26 | await renderer.prepareData(); 27 | 28 | // use vite's connect instance as middleware 29 | // if you use your own express router (express.Router()), you should use router.use 30 | app.use(vite.middlewares) 31 | 32 | // /hr/data-*.json => dist/static/hr/data-*.json 33 | app.use("/hr/data-:id.json", (req, res) => { 34 | const id = parseInt(req.params.id); 35 | if (id in renderer.jsonFilesData) { 36 | // input is already json stringified 37 | // res.json(renderer.jsonFilesData[id]); 38 | res.status(200).header('Content-Type', 'application/json').send(renderer.jsonFilesData[id]); 39 | } else { 40 | res.status(404).end(); 41 | } 42 | }); 43 | 44 | app.use(async (req: express.Request, res: express.Response, next: express.NextFunction) => { 45 | const url = req.originalUrl 46 | 47 | try { 48 | const transformed = await vite.transformIndexHtml(url, template); 49 | const { render } = await vite.ssrLoadModule('/src/entry-server.tsx'); 50 | const html = await renderer.generatePage(render, url, transformed); 51 | 52 | res.status(200).set({ 'Content-Type': 'text/html' }).end(html); 53 | } catch (e) { 54 | // If an error is caught, let Vite fix the stack trace so it maps back to 55 | // your actual source code. 56 | vite.ssrFixStacktrace(e as Error) 57 | next(e) 58 | } 59 | }) 60 | 61 | app.listen(5173) 62 | } 63 | 64 | createServer() 65 | -------------------------------------------------------------------------------- /web/src/components/table/sorting.ts: -------------------------------------------------------------------------------- 1 | 2 | type SortTypes = 3 | "alphabatical" | "version" | "number" | "date" | "size" | "cpu"; 4 | 5 | const sorters = { 6 | "alphabatical": (a: string, b: string) => a.localeCompare(b), 7 | "version": (a: string, b: string) => { 8 | const aSplit = a.split("."); 9 | const bSplit = b.split("."); 10 | for (let i = 0; i < Math.min(aSplit.length, bSplit.length); i++) { 11 | if (parseInt(aSplit[i]) > parseInt(bSplit[i])) { 12 | return 1; 13 | } else if (parseInt(aSplit[i]) < parseInt(bSplit[i])) { 14 | return -1; 15 | } 16 | } 17 | return 0; 18 | }, 19 | "number": (a: string, b: string) => parseInt(a) - parseInt(b), 20 | "date": (a: string, b: string) => { 21 | const date1 = new Date(a); 22 | const date2 = new Date(b); 23 | return date1.getTime() - date2.getTime(); 24 | }, 25 | "size": (a: string, b: string) => { 26 | const suffix: Record = { 27 | "K": 1000, 28 | "M": Math.pow(1000, 2), 29 | "G": Math.pow(1000, 3), 30 | "T": Math.pow(1000, 4), 31 | "P": Math.pow(1000, 5), 32 | "E": Math.pow(1000, 6), 33 | "Ki": 1024, 34 | "Mi": Math.pow(1024, 2), 35 | "Gi": Math.pow(1024, 3), 36 | "Ti": Math.pow(1024, 4), 37 | "Pi": Math.pow(1024, 5), 38 | "Ei": Math.pow(1024, 6) 39 | } 40 | const regex = /(\d+)([KMGTPEi]*)B?/; 41 | const aSplit = regex.exec(a); 42 | const bSplit = regex.exec(b); 43 | if (aSplit && bSplit) { 44 | const aNum = parseInt(aSplit[1]); 45 | const bNum = parseInt(bSplit[1]); 46 | const aSuffix = aSplit[2]; 47 | const bSuffix = bSplit[2]; 48 | if (aSuffix in suffix && bSuffix in suffix) { 49 | return aNum * suffix[aSuffix] - bNum * suffix[bSuffix]; 50 | } 51 | } 52 | return a.localeCompare(b); 53 | }, 54 | "cpu": (a: string, b: string) => { 55 | const normalize = (cpu: string) => { 56 | if (cpu.endsWith("m")) { 57 | return parseInt(cpu.slice(0, -1)); 58 | } else { 59 | return (parseFloat(cpu) * 1000); 60 | } 61 | }; 62 | return normalize(a) - normalize(b); 63 | } 64 | }; 65 | 66 | function detectSort(input: string[]) { 67 | const versionRegex = /(\d+\.)+\d+/; 68 | } -------------------------------------------------------------------------------- /scanners/flux_oci_repository.py: -------------------------------------------------------------------------------- 1 | import re 2 | from typing import Optional 3 | 4 | from info_model import InfoModel 5 | 6 | # name, tag, url, namespace 7 | 8 | class FluxOCIRepository(InfoModel): 9 | name: str 10 | tag: str 11 | url: str 12 | namespace: Optional[str] 13 | repo_name: str 14 | 15 | class FluxOCIRepositoryScanner: 16 | api_version = "source.toolkit.fluxcd.io" 17 | kind = "OCIRepository" 18 | def pre_check(self, stream) -> bool: 19 | try: 20 | contains_api_version = False 21 | contains_kind = False 22 | for line in stream: 23 | if line.strip().startswith("apiVersion: " + self.api_version): 24 | contains_api_version = True 25 | if line.strip() == "kind: " + self.kind: 26 | contains_kind = True 27 | if contains_api_version and contains_kind: 28 | return True 29 | except UnicodeDecodeError as e: 30 | print("unicode error", e) 31 | return False 32 | 33 | def check(self, walk) -> bool: 34 | return walk('apiVersion', lambda x: x.startswith(self.api_version)) and \ 35 | walk('kind', lambda x: x == self.kind) and \ 36 | walk('metadata.name', lambda x: re.match(r'^[^{}]+$', x) is not None) 37 | 38 | def parse(self, walk, rest: InfoModel) -> FluxOCIRepository: 39 | name = walk('metadata.name') 40 | tag = walk('spec.ref.tag') 41 | url = walk('spec.url') 42 | namespace = walk('metadata.namespace') 43 | 44 | return FluxOCIRepository.parse_obj(rest.dict() | { 45 | 'name': name, 46 | 'tag': tag, 47 | 'url': url, 48 | 'namespace': namespace, 49 | 'repo_name': rest.repo_name, 50 | }) 51 | 52 | def create_table(self, c1, c2): 53 | c1.execute('''DROP TABLE IF EXISTS flux_oci_repository''') 54 | c1.execute('''CREATE TABLE IF NOT EXISTS flux_oci_repository 55 | (name text NOT NULL, 56 | tag text NOT NULL, 57 | url text NOT NULL, 58 | namespace text NULL, 59 | repo_name text NOT NULL)''') 60 | 61 | def insert(self, c1, c2, data: FluxOCIRepository): 62 | c1.execute( 63 | "INSERT INTO flux_oci_repository VALUES (?, ?, ?, ?, ?)", 64 | ( 65 | data.name, 66 | data.tag, 67 | data.url, 68 | data.namespace, 69 | data.repo_name, 70 | )) 71 | 72 | def test(self, c1, c2) -> bool: 73 | c1.execute("SELECT count(*) FROM flux_oci_repository") 74 | c = c1.fetchone()[0] 75 | print("flux_oci_repository count", c) 76 | return c > 100 77 | -------------------------------------------------------------------------------- /web/src/utils.ts: -------------------------------------------------------------------------------- 1 | 2 | export function mode(array: K[]) { 3 | if (array.length == 0) 4 | return undefined; 5 | let modeMap: Record = {} as Record; 6 | let maxEl = array[0], maxCount = 1; 7 | for (let i = 0; i < array.length; i++) { 8 | let el = array[i]; 9 | if (modeMap[el] == null) 10 | modeMap[el] = 1; 11 | else 12 | modeMap[el]++; 13 | if (modeMap[el] > maxCount) { 14 | maxEl = el; 15 | maxCount = modeMap[el]; 16 | } 17 | } 18 | return maxEl; 19 | } 20 | 21 | export function modeCount(array: K[]) { 22 | if (array.length == 0) 23 | return [undefined, 0]; 24 | let modeMap: Record = {} as Record; 25 | let maxEl = array[0], maxCount = 1; 26 | for (let i = 0; i < array.length; i++) { 27 | let el = array[i]; 28 | if (modeMap[el] == null) 29 | modeMap[el] = 1; 30 | else 31 | modeMap[el]++; 32 | if (modeMap[el] > maxCount) { 33 | maxEl = el; 34 | maxCount = modeMap[el]; 35 | } 36 | } 37 | return [maxEl, maxCount]; 38 | } 39 | 40 | 41 | 42 | export function simplifyURL(url: string) { 43 | if (url.startsWith('http')) { 44 | // remove https 45 | let name = url.replace(/https?:\/\//, '').split('/')[0]; 46 | // remove tld 47 | name = name.split('.').slice(0, -1).join('.'); 48 | // remove charts. 49 | name = name.replace(/^charts\./, ''); 50 | // remove www 51 | name = name.replace(/^www\./, ''); 52 | // remove github 53 | name = name.replace(/\.github$/, ''); 54 | return name; 55 | } 56 | 57 | if ( 58 | url.startsWith('oci://ghcr.io/') || 59 | url.startsWith('oci://quay.io/') || 60 | url.startsWith('oci://tccr.io/') 61 | ) { 62 | // Split by "/" and get everything after the third "/" 63 | const parts = url.split('/'); 64 | // parts[0] = "oci:" 65 | // parts[1] = "" (empty, because of the double slash) 66 | // parts[2] = "ghcr.io" | "quay.io" | "tccr.io" 67 | // parts[3] = actual name 68 | return parts.slice(3).join('/'); 69 | } 70 | 71 | if (url.startsWith('oci')) { 72 | // remove oci:// 73 | let name = url.split('oci://')[1]; 74 | // get domain 75 | name = name.split('.').slice(0, -1).join('.'); 76 | return name; 77 | } 78 | return url; 79 | }; 80 | -------------------------------------------------------------------------------- /scanners/flux_helm_repo.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from info_model import InfoModel 4 | 5 | class FluxHelmRepo(InfoModel): 6 | helm_repo_name: str 7 | namespace: str 8 | helm_repo_url: str 9 | interval: Optional[str] 10 | 11 | class FluxHelmRepoScanner: 12 | api_version = "source.toolkit.fluxcd.io" 13 | kind = "HelmRepository" 14 | def pre_check(self, stream) -> bool: 15 | try: 16 | contains_api_version = False 17 | contains_kind = False 18 | for line in stream: 19 | if line.strip().startswith("apiVersion: " + self.api_version): 20 | contains_api_version = True 21 | if line.strip() == "kind: " + self.kind: 22 | contains_kind = True 23 | if contains_api_version and contains_kind: 24 | return True 25 | except UnicodeDecodeError as e: 26 | print("unicode error", e) 27 | return False 28 | 29 | def check(self, walk) -> bool: 30 | return walk('apiVersion', lambda x: x.startswith(self.api_version)) and \ 31 | walk('kind', lambda x: x == self.kind) and \ 32 | walk('spec.url', lambda x: x is not None) and \ 33 | walk('metadata.name', lambda x: x is not None) and \ 34 | walk('metadata.namespace', lambda x: x is not None) 35 | 36 | def parse(self, walk, rest: InfoModel) -> FluxHelmRepo: 37 | name = walk('metadata.name') 38 | namespace = walk('metadata.namespace') 39 | url = walk('spec.url') 40 | interval = walk('spec.interval') 41 | return FluxHelmRepo.parse_obj(rest.dict() | { 42 | 'helm_repo_name': name, 43 | 'namespace': namespace, 44 | 'helm_repo_url': url.rstrip('/') + '/', 45 | 'interval': interval, 46 | }) 47 | 48 | def create_table(self, c1, c2): 49 | c1.execute('''DROP TABLE IF EXISTS flux_helm_repo''') 50 | c1.execute('''CREATE TABLE IF NOT EXISTS flux_helm_repo 51 | (helm_repo_name text NOT NULL, 52 | namespace text NOT NULL, 53 | helm_repo_url text NOT NULL, 54 | interval text NULL, 55 | repo_name text NOT NULL, 56 | lines number NOT NULL, 57 | url text NOT NULL, 58 | timestamp text NOT NULL)''') 59 | 60 | 61 | def insert(self, c1, c2, data: FluxHelmRepo): 62 | c1.execute( 63 | "INSERT INTO flux_helm_repo VALUES (?, ?, ?, ?, ?, ?, ?, ?)", 64 | (data.helm_repo_name, data.namespace, data.helm_repo_url, data.interval, data.repo_name, data.amount_lines, data.url, data.timestamp)) 65 | 66 | 67 | def test(self, c1, c2) -> bool: 68 | c1.execute("SELECT count(*) FROM flux_helm_repo") 69 | return c1.fetchone()[0] > 1000 70 | -------------------------------------------------------------------------------- /web/src/index.css: -------------------------------------------------------------------------------- 1 | @tailwind base; 2 | @tailwind components; 3 | @tailwind utilities; 4 | a { 5 | @apply underline text-blue-600 hover:text-blue-800 cursor-pointer dark:text-zinc-300 dark:hover:text-zinc-100 6 | } 7 | h1 { 8 | @apply font-bold dark:text-gray-300 text-3xl 9 | } 10 | h2 { 11 | @apply font-bold dark:text-gray-300 text-2xl 12 | } 13 | h3 { 14 | @apply font-bold dark:text-gray-300 text-xl 15 | } 16 | h4 { 17 | @apply font-bold dark:text-gray-300 text-lg 18 | } 19 | h5 { 20 | @apply font-bold dark:text-gray-300 text-base 21 | } 22 | h6 { 23 | @apply font-bold dark:text-gray-300 text-sm 24 | } 25 | @layer base { 26 | :root { 27 | --background: 0 0% 100%; 28 | --foreground: 224 71.4% 4.1%; 29 | --card: 0 0% 100%; 30 | --card-foreground: 224 71.4% 4.1%; 31 | --popover: 0 0% 100%; 32 | --popover-foreground: 224 71.4% 4.1%; 33 | --primary: 220.9 39.3% 11%; 34 | --primary-foreground: 210 20% 98%; 35 | --secondary: 220 14.3% 95.9%; 36 | --secondary-foreground: 220.9 39.3% 11%; 37 | --muted: 220 14.3% 95.9%; 38 | --muted-foreground: 220 8.9% 46.1%; 39 | --accent: 220 14.3% 95.9%; 40 | --accent-foreground: 220.9 39.3% 11%; 41 | --destructive: 0 84.2% 60.2%; 42 | --destructive-foreground: 210 20% 98%; 43 | --border: 220 13% 91%; 44 | --input: 220 13% 91%; 45 | --ring: 224 71.4% 4.1%; 46 | --chart-1: 12 76% 61%; 47 | --chart-2: 173 58% 39%; 48 | --chart-3: 197 37% 24%; 49 | --chart-4: 43 74% 66%; 50 | --chart-5: 27 87% 67%; 51 | --radius: 0.5rem 52 | } 53 | @media (prefers-color-scheme: dark) { 54 | :root { 55 | --background: 224 71.4% 4.1%; 56 | --foreground: 210 20% 98%; 57 | --card: 224 71.4% 4.1%; 58 | --card-foreground: 210 20% 98%; 59 | --popover: 224 71.4% 4.1%; 60 | --popover-foreground: 210 20% 98%; 61 | --primary: 210 20% 98%; 62 | --primary-foreground: 220.9 39.3% 11%; 63 | --secondary: 215 27.9% 16.9%; 64 | --secondary-foreground: 210 20% 98%; 65 | --muted: 215 27.9% 16.9%; 66 | --muted-foreground: 217.9 10.6% 64.9%; 67 | --accent: 215 27.9% 16.9%; 68 | --accent-foreground: 210 20% 98%; 69 | --destructive: 0 62.8% 30.6%; 70 | --destructive-foreground: 210 20% 98%; 71 | --border: 215 27.9% 16.9%; 72 | --input: 215 27.9% 16.9%; 73 | --ring: 216 12.2% 83.9%; 74 | --chart-1: 220 70% 50%; 75 | --chart-2: 160 60% 45%; 76 | --chart-3: 30 80% 55%; 77 | --chart-4: 280 65% 60%; 78 | --chart-5: 340 75% 55% 79 | } 80 | } 81 | } 82 | @layer base { 83 | * { 84 | @apply border-border; 85 | } 86 | html, body { 87 | @apply bg-background text-foreground; 88 | } 89 | } -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "devshell": { 4 | "inputs": { 5 | "nixpkgs": "nixpkgs" 6 | }, 7 | "locked": { 8 | "lastModified": 1735644329, 9 | "narHash": "sha256-tO3HrHriyLvipc4xr+Ewtdlo7wM1OjXNjlWRgmM7peY=", 10 | "owner": "numtide", 11 | "repo": "devshell", 12 | "rev": "f7795ede5b02664b57035b3b757876703e2c3eac", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "devshell", 18 | "type": "github" 19 | } 20 | }, 21 | "flake-parts": { 22 | "inputs": { 23 | "nixpkgs-lib": "nixpkgs-lib" 24 | }, 25 | "locked": { 26 | "lastModified": 1736143030, 27 | "narHash": "sha256-+hu54pAoLDEZT9pjHlqL9DNzWz0NbUn8NEAHP7PQPzU=", 28 | "owner": "hercules-ci", 29 | "repo": "flake-parts", 30 | "rev": "b905f6fc23a9051a6e1b741e1438dbfc0634c6de", 31 | "type": "github" 32 | }, 33 | "original": { 34 | "id": "flake-parts", 35 | "type": "indirect" 36 | } 37 | }, 38 | "nixpkgs": { 39 | "locked": { 40 | "lastModified": 1722073938, 41 | "narHash": "sha256-OpX0StkL8vpXyWOGUD6G+MA26wAXK6SpT94kLJXo6B4=", 42 | "owner": "NixOS", 43 | "repo": "nixpkgs", 44 | "rev": "e36e9f57337d0ff0cf77aceb58af4c805472bfae", 45 | "type": "github" 46 | }, 47 | "original": { 48 | "owner": "NixOS", 49 | "ref": "nixpkgs-unstable", 50 | "repo": "nixpkgs", 51 | "type": "github" 52 | } 53 | }, 54 | "nixpkgs-lib": { 55 | "locked": { 56 | "lastModified": 1735774519, 57 | "narHash": "sha256-CewEm1o2eVAnoqb6Ml+Qi9Gg/EfNAxbRx1lANGVyoLI=", 58 | "type": "tarball", 59 | "url": "https://github.com/NixOS/nixpkgs/archive/e9b51731911566bbf7e4895475a87fe06961de0b.tar.gz" 60 | }, 61 | "original": { 62 | "type": "tarball", 63 | "url": "https://github.com/NixOS/nixpkgs/archive/e9b51731911566bbf7e4895475a87fe06961de0b.tar.gz" 64 | } 65 | }, 66 | "nixpkgs_2": { 67 | "locked": { 68 | "lastModified": 1736867362, 69 | "narHash": "sha256-i/UJ5I7HoqmFMwZEH6vAvBxOrjjOJNU739lnZnhUln8=", 70 | "owner": "NixOS", 71 | "repo": "nixpkgs", 72 | "rev": "9c6b49aeac36e2ed73a8c472f1546f6d9cf1addc", 73 | "type": "github" 74 | }, 75 | "original": { 76 | "owner": "NixOS", 77 | "ref": "nixos-24.11", 78 | "repo": "nixpkgs", 79 | "type": "github" 80 | } 81 | }, 82 | "root": { 83 | "inputs": { 84 | "devshell": "devshell", 85 | "flake-parts": "flake-parts", 86 | "nixpkgs": "nixpkgs_2" 87 | } 88 | } 89 | }, 90 | "root": "root", 91 | "version": 7 92 | } 93 | -------------------------------------------------------------------------------- /web/src/pages/index.tsx: -------------------------------------------------------------------------------- 1 | import { useMemo, useState } from "react"; 2 | import Icon from "../components/icon" 3 | 4 | type Release = { 5 | key: string, 6 | chart: string, 7 | release: string, 8 | count: number, 9 | icon?: string, 10 | group?: string 11 | } 12 | 13 | const normalizeGroup = (group: string) => { 14 | group = group.toLocaleLowerCase(); 15 | group = group.replaceAll("home automation", "home"); 16 | if(group === "internal" || group === "external") { 17 | group = ""; 18 | } 19 | return group; 20 | } 21 | export default function Home(props: {releases: Release[]}) { 22 | const [selectedGroup, setSelectedGroup] = useState(undefined); 23 | const groupedReleases = useMemo(() => 24 | props.releases.filter(r => r.group) as (Release&{group: string})[] 25 | , [props.releases]); 26 | 27 | const groups = useMemo(() => 28 | new Set(groupedReleases.map(r => normalizeGroup(r.group))) 29 | , [groupedReleases]); 30 | const groupMap = useMemo(() => 31 | groupedReleases.reduce((map, r) => { 32 | const group = normalizeGroup(r.group); 33 | if (!map[group]) { 34 | map[group] = []; 35 | } 36 | map[group].push(r); 37 | return map; 38 | }, {} as Record) 39 | , [groupedReleases]); 40 | 41 | return ( 42 | <> 43 |
44 |

Popular releases

45 |
46 | {[...groups].filter(Boolean) 47 | .filter(group => 48 | groupMap[group].reduce((a, b) => a + b.count, 0) > 5 && 49 | groupMap[group].length > 3 50 | ) 51 | .sort((a, b) => groupMap[b].length - groupMap[a].length) 52 | .map((group, i) => { 53 | return 63 | })} 64 |
65 |
66 | 67 | { 68 | (selectedGroup ? groupMap[selectedGroup] : props.releases) 69 | .sort((a, b) => b.count - a.count).filter(a => a.count > 5).map(({key, chart, group, icon, release}) => { 70 | return ( 71 | 79 | {icon && } 80 | 81 | {release} 82 | 83 | ) 84 | })} 85 | 86 | ) 87 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kubesearch.dev 2 | Search Flux HelmReleases through [awesome k8s-at-home](https://github.com/k8s-at-home/awesome-home-kubernetes) projects, check it out at https://kubesearch.dev/. We index Flux HelmReleases from Github and Gitlab repositories with the [k8s-at-home topic](https://github.com/topics/k8s-at-home) and [kubesearch topic](https://github.com/topics/kubesearch). To include your repository in this search it must be public and then add the topic `k8s-at-home` or `kubesearch` to your GitHub Repository topics. 3 | 4 | Thanks to Toboshii and [Hajimari](https://github.com/toboshii/hajimari) for regulating icons to helm charts. 5 | 6 | And also thanks to k8s@home community for great charts and configurations. 7 | 8 | image 9 | 10 | image 11 | 12 | ## development 13 | Overview: 14 | ```mermaid 15 | graph LR 16 | I[interesting.py] 17 | I-->|repos.json|Init[init-db.py] 18 | Init-->|repos.db: repos|download[download.py] 19 | Init-->|repos.db: repos|search 20 | download-->|repos/ submodules|search[search.py] 21 | search-->|repos.db: repos,charts|frontend 22 | ``` 23 | 24 | **To build repos.db (optional for frontend, check step below)** 25 | 26 | Python requirements: `pip install -r requirements.txt` 27 | 28 | Updating `repos.json` (can be skipped, already included in source): 29 | ``` 30 | python3 interesting.py 31 | ``` 32 | 33 | Setting up `repos.db` repos table (requires `repos.json`): 34 | ``` 35 | python3 init-db.py 36 | ``` 37 | 38 | Download repos into `repos/` (requires repo.db): 39 | ``` 40 | python3 download.py 41 | ``` 42 | 43 | Setting up `repos.db` charts table: 44 | ``` 45 | python3 search.py 46 | ``` 47 | 48 | **Setting up the frontend** 49 | 50 | ``` 51 | wget https://github.com/Whazor/k8s-at-home-search/releases/latest/download/repos.db.zz -P frontend/public/ 52 | wget https://github.com/Whazor/k8s-at-home-search/releases/latest/download/repos-extended.db.zz -P frontend/public/ 53 | 54 | cd frontend/ 55 | yarn install 56 | yarn run dev 57 | ``` 58 | 59 | ### tables 60 | 61 | **repo** 62 | | **column name** | repo_name | url | branch | stars | 63 | |-----------------|------------------|------------------------------------|-----------------|---------| 64 | | **value** | text primary key | text | text | integer | 65 | | **example** | user-reponame | "https://github.com/user/reponame" | main/master/... | 42 | 66 | 67 | **flux_helm_release** 68 | | **column name** | chart_name | repo_name | url | hajimari_icon | timestamp | 69 | |-----------------|------------|---------------|---------------------------------------------------------------------|---------------|------------| 70 | | **value** | text | text | text | text null | integer | 71 | | **example** | plex | user-reponame | "https://github.com/user/reponame/.../../traefik/helm-release.yaml" | tv | 1644404532 | 72 | -------------------------------------------------------------------------------- /web/src/generators/helm-release/models.ts: -------------------------------------------------------------------------------- 1 | 2 | export type ReleaseInfoCompressed = [ 3 | string, // release 4 | string, // chart 5 | string, // name 6 | string, // key 7 | number, // chartsUrl 8 | number, // count, 9 | string|undefined, // icon 10 | string|undefined, // group 11 | ] 12 | 13 | export interface RepoAlsoHas { 14 | interestingIdToName: Record 15 | repoAlsoHasMap: Record 16 | } 17 | export interface AppData { 18 | chartURLs: string[]; 19 | releases: ReleaseInfoCompressed[], 20 | repos: string[], 21 | keyFileMap: Record, 22 | repoAlsoHas: RepoAlsoHas 23 | } 24 | export interface ReleaseInfo { 25 | release: string; 26 | chart: string; 27 | name: string; 28 | key: string; 29 | chartsUrl: string; 30 | count: number; 31 | icon?: string; 32 | } 33 | 34 | export interface RepoReleaseInfo { 35 | name: string, 36 | chart: string, 37 | url: string, 38 | icon?: string, 39 | version: string, 40 | timestamp: number, 41 | } 42 | 43 | export interface RepoPageData { 44 | name: string, 45 | url: string, 46 | releases: RepoReleaseInfo[], 47 | } 48 | 49 | export function denormalize( 50 | appData: AppData, 51 | ): { 52 | releases: ReleaseInfo[], 53 | } { 54 | return { 55 | releases: appData.releases.map( 56 | ([release, chart, name, key, chartsUrl, count, icon, group]) => ({ 57 | release, 58 | chart, 59 | name, 60 | key, 61 | chartsUrl: appData.chartURLs[chartsUrl], 62 | count, 63 | icon, 64 | group, 65 | }) 66 | ), 67 | } 68 | } 69 | 70 | export interface ValueTree { 71 | [key: string]: ValueTree | string; 72 | } 73 | 74 | 75 | export interface RepoInfo { 76 | name: string, 77 | repo: string, 78 | helm_repo_name: string, 79 | helm_repo_url: string, 80 | url: string, 81 | repo_url: string, 82 | chart_version: string, 83 | stars: number, 84 | icon: string, 85 | group: string, 86 | timestamp: number, 87 | } 88 | 89 | export interface CollectorData { 90 | releases: ReleaseInfo[]; 91 | keys: string[]; 92 | count: Record; 93 | repos: Record; 94 | values: Record; 95 | } 96 | 97 | export type ValueList = { 98 | name: string, 99 | count: number, 100 | types: string[], 101 | urls: number[] 102 | }[]; 103 | 104 | export interface ValuesData { 105 | list: ValueList, 106 | urlMap: Record, 107 | valueMap: Record> 108 | } 109 | 110 | export interface PageData { 111 | key: string; 112 | title: string; 113 | name: string; 114 | chartName: string; 115 | doc?: string; 116 | icon?: string; 117 | helmRepoName?: string, 118 | helmRepoURL?: string, 119 | values: ValuesData, 120 | repos: RepoInfo[]; 121 | } 122 | 123 | export interface GrepData { 124 | values: ValuesData 125 | } 126 | 127 | export interface ImagePageData { 128 | images: Record>, 129 | } 130 | 131 | export const MINIMUM_COUNT = 3; 132 | -------------------------------------------------------------------------------- /scripts/trends.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | import os 3 | 4 | base_path = os.environ['DEVENV_ROOT'] 5 | 6 | if not base_path: 7 | print("DEVENV_ROOT not set") 8 | exit(1) 9 | 10 | old_date = os.environ.get("DATE") 11 | if not old_date: 12 | print("DATE not set") 13 | exit(1) 14 | 15 | 16 | CURRENT_DB_PATH = os.path.join(base_path, "web", "repos.db") 17 | OLD_DB_PATH = os.path.join(base_path, "web", "repos-" + old_date + ".db") 18 | 19 | # check if files exist 20 | if not os.path.exists(CURRENT_DB_PATH): 21 | print("CURRENT_DB_PATH does not exist") 22 | exit(1) 23 | if not os.path.exists(OLD_DB_PATH): 24 | print("OLD_DB_PATH does not exist") 25 | exit(1) 26 | 27 | # in memory 28 | conn = sqlite3.connect(':memory:') 29 | cur = conn.cursor() 30 | cur.execute("ATTACH DATABASE ? AS repos", (CURRENT_DB_PATH,)) 31 | cur.execute("ATTACH DATABASE ? AS oldrepos", (OLD_DB_PATH,)) 32 | 33 | # get table names 34 | def get_tables(cur, db): 35 | cur.execute("SELECT name FROM "+db+".sqlite_master WHERE type='table'") 36 | tables = cur.fetchall() 37 | return [t[0] for t in tables] 38 | 39 | # check if has flux_helm_release 40 | if "flux_helm_release" not in get_tables(cur, "oldrepos"): 41 | print("flux_helm_release table not found in oldrepos", OLD_DB_PATH) 42 | exit(1) 43 | if "flux_helm_release" not in get_tables(cur, "repos"): 44 | print("flux_helm_release table not found in repos", CURRENT_DB_PATH) 45 | exit(1) 46 | 47 | def get_count(cur, db): 48 | cur.execute( 49 | "select release_name, count(*) " 50 | "from "+db+".flux_helm_release " 51 | "group by release_name " 52 | "order by count(*) desc " 53 | ) 54 | # convert to map 55 | repos = dict(cur.fetchall()) 56 | return repos 57 | 58 | current = get_count(cur, "repos") 59 | old = get_count(cur, "oldrepos") 60 | 61 | # calculate diff 62 | diff = {} 63 | for repo in current: 64 | if repo in old: 65 | diff[repo] = current[repo] - old[repo] 66 | 67 | how_many = 15 68 | top = sorted(diff.items(), key=lambda x: x[1], reverse=True)[:how_many] 69 | bottom = sorted(diff.items(), key=lambda x: x[1])[:how_many] 70 | 71 | first_column = len(old_date) 72 | second_column = len('current') 73 | third_column = len('release name') 74 | extra = max(third_column, max(len(v[0]) for v in top)) 75 | 76 | l1 = '─'*(first_column+2) 77 | l2 = '─'*(second_column+2) 78 | l3 = '─'*(extra+2) 79 | 80 | print("Top " + str(len(top)) + " added") 81 | print("```") 82 | print(f"┌{l1}┬{l2}┬{l3}┐") 83 | print(f"| {old_date} | current | {'release name':<{extra}} |") 84 | print(f"├{l1}┼{l2}┼{l3}┤") 85 | for (k, v) in top: 86 | month_ago, cur = old[k], current[k] 87 | print(f"| {month_ago:<{first_column}} | {cur:<{second_column}} | {k:<{extra}} |") 88 | 89 | print(f"└{l1}┴{l2}┴{l3}┘") 90 | print("```") 91 | 92 | print("") 93 | print("Most " + str(len(bottom)) + " deleted") 94 | extra = max(third_column, max(len(v[0]) for v in bottom)) 95 | l3 = '─'*(extra+2) 96 | print("```") 97 | print(f"┌{l1}┬{l2}┬{l3}┐") 98 | print(f"| {old_date} | current | {'release name':<{extra}} |") 99 | print(f"├{l1}┼{l2}┼{l3}┤") 100 | for (k, v) in bottom: 101 | month_ago, cur = old[k], current[k] 102 | print(f"| {month_ago:<{first_column}} | {cur:<{second_column}} | {k:<{extra}} |") 103 | print(f"└{l1}┴{l2}┴{l3}┘") 104 | print("```") 105 | -------------------------------------------------------------------------------- /web/src/components/search/index.tsx: -------------------------------------------------------------------------------- 1 | import { useCallback, useEffect, useRef, useState } from "react"; 2 | 3 | import { useLocation } from "react-router-dom"; 4 | 5 | export const searchModes = ["hr", "image", "grep"] as const; 6 | 7 | export const names: Record = { 8 | hr: "Helm Release", 9 | image: "Image", 10 | grep: "Grep" 11 | } 12 | 13 | export type SearchMode = typeof searchModes[number]; 14 | 15 | interface P { 16 | search: string, 17 | setSearch: (s: string) => void, 18 | mode: SearchMode | undefined 19 | setMode: (m: SearchMode) => void, 20 | onEnter: () => void 21 | } 22 | 23 | function getTextWidth(text: string, element: HTMLElement) { 24 | const canvas = document.createElement('canvas'); 25 | const context = canvas.getContext('2d')!; 26 | 27 | context.font = getComputedStyle(element).font; 28 | 29 | return context.measureText(text).width; 30 | } 31 | 32 | 33 | export function SearchBar(props: P) { 34 | const { search, setSearch, mode, setMode } = props; 35 | const [selectWidth, setSelectWidth] = useState(30); 36 | const selectRef = useRef(null); 37 | const inputRef = useRef(null); 38 | let location = useLocation(); 39 | useEffect(() => { 40 | if (location.pathname !== "/" && location.pathname !== "/" && search.length > 0) { 41 | setSearch(mode + ' '); 42 | } 43 | }, [location.pathname]) 44 | useEffect(() => { 45 | for (const m of searchModes) { 46 | if ((search.startsWith(m + ' ') 47 | ) && mode !== m) { 48 | setMode(m); 49 | } else if (search.startsWith(mode + ' ' + m + ' ') && mode !== m) { 50 | setMode(m); 51 | setSearch(m + ' ' + search.slice((mode + ' ' + m + ' ').length)); 52 | } 53 | } 54 | }, [search]); 55 | 56 | const keyDown = useCallback((e: React.KeyboardEvent) => { 57 | if (e.key === "Escape") { 58 | setSearch(""); 59 | } 60 | if (e.key === "Enter") { 61 | props.onEnter() 62 | } 63 | }, [setSearch]); 64 | useEffect(() => { 65 | if (!selectRef.current) return; 66 | setSelectWidth(getTextWidth(selectRef.current.value, selectRef.current) + 35); 67 | }, [selectRef, mode]) 68 | 69 | const message = { 70 | "hr": "Search for a helm release...", 71 | "image": "Search for an image...", 72 | "grep": "Search for a grep pattern..." 73 | }[mode || "hr"] 74 | return ; 100 | } 101 | -------------------------------------------------------------------------------- /web/playwright.config.ts: -------------------------------------------------------------------------------- 1 | import type { PlaywrightTestConfig } from '@playwright/test'; 2 | import { devices } from '@playwright/test'; 3 | 4 | import glob from 'glob'; 5 | 6 | /** 7 | * Read environment variables from file. 8 | * https://github.com/motdotla/dotenv 9 | */ 10 | // require('dotenv').config(); 11 | 12 | // const webServer = { 13 | // command: 'yarn run dev', 14 | // url: 'http://localhost:5173/', 15 | // timeout: 60 * 1000, 16 | // reuseExistingServer: true 17 | // }; 18 | 19 | const webServer = { 20 | command: 'yarn run serve', 21 | url: 'http://localhost:4173/', 22 | timeout: 10 * 1000, 23 | reuseExistingServer: true 24 | }; 25 | 26 | function findBrowserPath(browserName: string) { 27 | // "PLAYWRIGHT_BROWSERS_PATH" in process.env ? process.env["PLAYWRIGHT_BROWSERS_PATH"] + "/chromium-1028/chrome-linux/chrome" : undefined, 28 | if ("PLAYWRIGHT_BROWSERS_PATH" in process.env) { 29 | // glob browser 30 | if (browserName === "chromium") { 31 | const browserPath = glob.sync(process.env["PLAYWRIGHT_BROWSERS_PATH"] + "/chromium-*/chrome-linux/chrome")[0]; 32 | console.log("browserPath: " + browserPath); 33 | return browserPath; 34 | } 35 | // no other browsers are there 36 | 37 | // return process.env["PLAYWRIGHT_BROWSERS_PATH"] + "/chromium-1028/chrome-linux/chrome"; 38 | } 39 | return undefined; 40 | } 41 | 42 | 43 | 44 | /** 45 | * See https://playwright.dev/docs/test-configuration. 46 | */ 47 | const config: PlaywrightTestConfig = { 48 | testDir: './e2e', 49 | /* Maximum time one test can run for. */ 50 | timeout: 30 * 1000, 51 | expect: { 52 | /** 53 | * Maximum time expect() should wait for the condition to be met. 54 | * For example in `await expect(locator).toHaveText();` 55 | */ 56 | timeout: 5000 57 | }, 58 | /* Run tests in files in parallel */ 59 | fullyParallel: true, 60 | /* Fail the build on CI if you accidentally left test.only in the source code. */ 61 | forbidOnly: !!process.env.CI, 62 | /* Retry on CI only */ 63 | retries: process.env.CI ? 2 : 0, 64 | /* Opt out of parallel tests on CI. */ 65 | workers: process.env.CI ? 1 : undefined, 66 | /* Reporter to use. See https://playwright.dev/docs/test-reporters */ 67 | reporter: 'html', 68 | /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ 69 | use: { 70 | /* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */ 71 | actionTimeout: 0, 72 | /* Base URL to use in actions like `await page.goto('/')`. */ 73 | baseURL: webServer.url, 74 | 75 | /* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */ 76 | trace: 'retain-on-failure', 77 | }, 78 | 79 | /* Configure projects for major browsers */ 80 | projects: [ 81 | { 82 | name: 'chromium', 83 | use: { 84 | ...devices['Desktop Chrome'], 85 | launchOptions: { 86 | executablePath: findBrowserPath("chromium"), 87 | }, 88 | }, 89 | 90 | }, 91 | 92 | // { 93 | // name: 'firefox', 94 | // use: { 95 | // ...devices['Desktop Firefox'], 96 | // launchOptions: { 97 | // executablePath: process.env["PLAYWRIGHT_BROWSERS_PATH"] + "/firefox-1357/firefox/firefox", 98 | // }, 99 | // }, 100 | // }, 101 | 102 | // { 103 | // name: 'webkit', 104 | // use: { 105 | // ...devices['Desktop Safari'], 106 | // }, 107 | // }, 108 | 109 | /* Test against mobile viewports. */ 110 | // { 111 | // name: 'Mobile Chrome', 112 | // use: { 113 | // ...devices['Pixel 5'], 114 | // }, 115 | // }, 116 | // { 117 | // name: 'Mobile Safari', 118 | // use: { 119 | // ...devices['iPhone 12'], 120 | // }, 121 | // }, 122 | ], 123 | 124 | /* Folder for test artifacts such as screenshots, videos, traces, etc. */ 125 | // outputDir: 'test-results/', 126 | 127 | /* Run your local dev server before starting the tests */ 128 | 129 | webServer 130 | }; 131 | 132 | export default config; 133 | -------------------------------------------------------------------------------- /web/src/pages/grep.tsx: -------------------------------------------------------------------------------- 1 | import { useState } from "react" 2 | import type { GrepData } from "../generators/helm-release/models" 3 | 4 | function Highlight(props: {text: string, keyword: string}) { 5 | const { text, keyword } = props; 6 | const parts = text.split(new RegExp(`(${keyword})`, 'gi')); 7 | return <>{parts.map((part, i) => 8 | 14 | {part} 15 | 16 | )}; 17 | } 18 | 19 | 20 | export default function (props : GrepData & { search : string }) { 21 | const { valueMap, list, urlMap } = props.values; 22 | const search = props.search.replace(/^grep /, "") 23 | const results = search.length > 0 ? list.filter((item) => 24 | item.name.toLowerCase().includes(search.toLowerCase()) || 25 | Object.entries(valueMap[item.name]).some(([url, values]) => 26 | values.some((v) => v.toLowerCase().includes(search.toLowerCase())) 27 | ) 28 | ).slice(0, 30) : []; 29 | 30 | const [expanded, setExpanded] = useState(new Set()) 31 | 32 | const toggleExpanded = (name: string) => { 33 | if (expanded.has(name)) { 34 | expanded.delete(name) 35 | } else { 36 | expanded.add(name) 37 | } 38 | setExpanded(new Set(expanded)) 39 | } 40 | 41 | return
42 | {search.length > 0 &&
43 |

Results

44 |
    45 | {results.map((item) => { 46 | return
  • 47 |

    48 | 49 | {/* small expand button */} 50 | 58 |

    59 | {!expanded.has(item.name) && 60 | {Object.entries(valueMap[item.name]).map(([url, values], i) => { 61 | const vals = values.filter((v) => v.toLowerCase().includes(search.toLowerCase())) 62 | return
      63 | {vals.map((value, j) => { 64 | return
    • 65 | 66 | 67 | 68 |
    • 69 | })} 70 |
    71 | })} 72 |
    } 73 | {expanded.has(item.name) &&
    74 |
      75 | {Object.entries(valueMap[item.name]).map(([url, values], i) => { 76 | return <> 77 | {values.map((value, j) => { 78 | return
    • 79 | 80 | 81 | 82 |
    • 83 | })} 84 | 85 | })} 86 |
    87 |
    } 88 | 89 |
  • 90 | })} 91 |
92 |
} 93 | 94 |
95 | } -------------------------------------------------------------------------------- /web/src/pages/image.tsx: -------------------------------------------------------------------------------- 1 | import type { ImagePageData } from "../generators/helm-release/models" 2 | 3 | function Highlight(props: {text: string, keyword: string}) { 4 | const { text, keyword } = props; 5 | const parts = text.split(new RegExp(`(${keyword})`, 'gi')); 6 | return <>{parts.map((part, i) => 7 | 13 | {part} 14 | 15 | )}; 16 | } 17 | 18 | function Copy({id, text}: {id: string, text: string}) { 19 | return 37 | } 38 | 39 | // we sort by version, but we need to ensure non version parts are sorted correctly 40 | function TagSorter(a: string, b: string) { 41 | const aParts = a.split("."); 42 | const bParts = b.split("."); 43 | 44 | // if the first part is not a number, sort by string 45 | if (isNaN(parseInt(aParts[0])) || isNaN(parseInt(bParts[0]))) { 46 | return a.localeCompare(b); 47 | } 48 | 49 | // if the first part is a number, sort by number 50 | for (let i = 0; i < aParts.length; i++) { 51 | const aPart = parseInt(aParts[i]); 52 | const bPart = parseInt(bParts[i]); 53 | // check if not number 54 | if (isNaN(aPart) || isNaN(bPart)) { 55 | return a.localeCompare(b); 56 | } 57 | 58 | if (aPart > bPart) { 59 | return -1; 60 | } else if (aPart < bPart) { 61 | return 1; 62 | } 63 | } 64 | 65 | return 0; 66 | } 67 | 68 | export default function (props : ImagePageData & { search : string }) { 69 | // repository -> tag -> url[] 70 | const images: Record> = props.images; 71 | const search = props.search.replace(/^image /, "") 72 | 73 | const results = search.length > 0 ? 74 | Object.keys(images).filter((repo) => repo.includes(search)) : []; 75 | 76 | const urlName = (url: string) => { 77 | // https://github.com/solidDoWant/infra-mk2/blob/main/cluster/apps/media/sonarr/helm-release.yaml 78 | // extract solidDoWant 79 | const parts = url.split("/"); 80 | return parts[3]; 81 | } 82 | 83 | return
84 | {search.length > 0 &&
85 |

Results

86 |
    87 | {/* {results.map((item, i) => { 88 | return
  • 89 | 90 |
  • 91 | })} */} 92 | {results.map((repo, i) => { 93 | return
  • 94 | 95 |
      96 | {Object.keys(images[repo]).sort( 97 | // (a, b) => images[repo][b].length - images[repo][a].length 98 | TagSorter 99 | ).map((tag, j) => { 100 | return
    • 101 | {tag} ({images[repo][tag].length}) 102 |
      103 | {images[repo][tag].map((url, i) => { 104 | return 105 | {urlName(url)} 106 | 107 | })} 108 |
    • 109 | })} 110 |
    111 |
  • 112 | })} 113 | {results.length === 0 &&
  • No results
  • } 114 |
115 |
} 116 | 117 |
118 | } -------------------------------------------------------------------------------- /web/src/components/search/hr.tsx: -------------------------------------------------------------------------------- 1 | import { Link, redirect } from "react-router-dom"; 2 | import { MINIMUM_COUNT, ReleaseInfo } from "../..//generators/helm-release/models"; 3 | import { simplifyURL } from "../../utils"; 4 | import Icon from "../icon"; 5 | import { forwardRef, useImperativeHandle, useMemo, useRef } from "react"; 6 | 7 | interface P { 8 | search: string, 9 | releases: ReleaseInfo[] 10 | } 11 | 12 | export interface SearchInterface { 13 | onEnter: () => void 14 | } 15 | 16 | const SEARCH_WEIGHTS = { 17 | fullMatch: 10, 18 | length: 1, 19 | count: 5, 20 | } 21 | 22 | const HRSearchResults = forwardRef(function HRSearchResults({search, releases}, ref) { 23 | 24 | const fullHeight = "max-h-128"; 25 | const peerFullHeight = "peer-focus:max-h-128"; 26 | const prevSearch = useRef(""); 27 | const prevResults = useRef(releases); 28 | 29 | const [availableSearches, unavailableSearches] = useMemo(() => { 30 | if (search.length < 2) { 31 | return [[], []]; 32 | } 33 | if(!search.toLowerCase().startsWith(prevSearch.current.toLowerCase())) { 34 | prevResults.current = releases; 35 | } 36 | prevSearch.current = search; 37 | let searches; 38 | searches = prevResults.current = prevResults.current.filter(({ chart, release, chartsUrl }) => { 39 | return chart.toLowerCase().includes(search.toLowerCase()) || release.toLowerCase().includes(search.toLowerCase()) || simplifyURL(chartsUrl).toLowerCase().includes(search.toLowerCase()) 40 | }).sort((a, b) => { 41 | // Calculate full match score 42 | const fullMatchScoreA = a.name.toLowerCase() === search ? SEARCH_WEIGHTS.fullMatch : 0; 43 | const fullMatchScoreB = b.name.toLowerCase() === search ? SEARCH_WEIGHTS.fullMatch : 0; 44 | 45 | // Calculate length score based on how much longer the name is compared to the query 46 | const lengthScoreA = (a.name.length - search.length) * SEARCH_WEIGHTS.length; 47 | const lengthScoreB = (b.name.length - search.length) * SEARCH_WEIGHTS.length; 48 | 49 | // Calculate count score 50 | const countScoreA = a.count * SEARCH_WEIGHTS.count; 51 | const countScoreB = b.count * SEARCH_WEIGHTS.count; 52 | 53 | // Total score for each item 54 | const totalScoreA = fullMatchScoreA - lengthScoreA + countScoreA; 55 | const totalScoreB = fullMatchScoreB - lengthScoreB + countScoreB; 56 | 57 | // Compare the total scores 58 | return totalScoreB - totalScoreA; // Sort in descending order of score 59 | }); 60 | const availableSearches = searches.filter(({ count }) => count >= MINIMUM_COUNT); 61 | const unavailableSearches = searches.filter(({ count }) => count < MINIMUM_COUNT); 62 | return [availableSearches, unavailableSearches]; 63 | }, [releases, search]); 64 | 65 | useImperativeHandle( 66 | ref, 67 | () => ({ 68 | onEnter() { 69 | if(availableSearches.length >= 1) { 70 | window.location.href = `/hr/${availableSearches[0].key}`; 71 | } else if(unavailableSearches.length >= 1) { 72 | // react router go to link 73 | redirect(`/hr/${unavailableSearches[0].key}`); 74 | } 75 | } 76 | }), 77 | ) 78 | 79 | return
80 | {search !== "" && search.length > 1 && 81 |
plexbjw-s/app-template35plexbjw-s/app-template35
82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | {availableSearches.map(({ key, icon, chart, release, chartsUrl, count }) => { 91 | return 92 | 93 | 94 | 95 | 96 | })} 97 | {unavailableSearches.map(({ key, icon, chart, release, chartsUrl, count }) => { 98 | return 99 | 100 | 101 | 102 | 103 | })} 104 | 105 |
ReleaseChartCount
{release}{simplifyURL(chartsUrl) + '/' + chart}{count}
{release}{simplifyURL(chartsUrl) + '/' + chart}{count}
106 | } 107 |
108 | }); 109 | export default HRSearchResults; -------------------------------------------------------------------------------- /search.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | import os 3 | import sqlite3 4 | import sys 5 | from typing import Any, List 6 | from ruamel.yaml import YAML 7 | from ruamel.yaml.error import YAMLError 8 | import sqlite3 9 | from subprocess import check_output 10 | import warnings 11 | from urllib.parse import urlparse 12 | import shlex 13 | 14 | from ruamel.yaml.error import ReusedAnchorWarning 15 | from info_model import InfoModel 16 | from pydantic import ValidationError 17 | 18 | from scanners.flux_helm_release import FluxHelmReleaseScanner 19 | from scanners.argo_helm_application import ArgoHelmApplicationScanner 20 | from scanners.flux_helm_repo import FluxHelmRepoScanner 21 | from scanners.flux_oci_repository import FluxOCIRepositoryScanner 22 | warnings.simplefilter("ignore", ReusedAnchorWarning) 23 | 24 | # create sqlite db 25 | conn1 = sqlite3.connect('repos.db') 26 | conn2 = sqlite3.connect('repos-extended.db') 27 | c1 = conn1.cursor() 28 | c2 = conn2.cursor() 29 | 30 | c1.execute("SELECT replace(repo_name, '/', '-'), url FROM repo") 31 | # to map 32 | repos = dict(c1.fetchall()) 33 | c1.execute("SELECT replace(repo_name, '/', '-'), branch FROM repo") 34 | branches = dict(c1.fetchall()) 35 | 36 | yaml=YAML(typ="safe") 37 | yaml.composer.return_alias = lambda s: deepcopy(s) 38 | 39 | scanners = [ 40 | FluxHelmReleaseScanner(), 41 | FluxHelmRepoScanner(), 42 | ArgoHelmApplicationScanner(), 43 | FluxOCIRepositoryScanner() 44 | ] 45 | 46 | for scanner in scanners: 47 | scanner.create_table(c1, c2) 48 | 49 | for root, dirs, files in os.walk("repos/"): 50 | for file in files: 51 | file_path = os.path.join(root, file) 52 | if file.endswith(".yaml") and "/.archive/" not in file_path and "/archive/" not in file_path: 53 | repo_dir_name = file_path.split('/')[1] 54 | if repo_dir_name not in repos: 55 | print("repo", repo_dir_name, "not found in repos") 56 | continue 57 | 58 | with open(file_path, "r") as stream: 59 | found_scanners = [] 60 | try: 61 | for s in scanners: 62 | stream.seek(0) 63 | if s.pre_check(stream): 64 | found_scanners.append(s) 65 | except UnicodeDecodeError as e: 66 | print("unicode error", e) 67 | continue 68 | if len(found_scanners) > 0: 69 | stream.seek(0) 70 | try: 71 | amount_lines = len(stream.readlines()) 72 | except UnicodeDecodeError as e: 73 | print("unicode error", e) 74 | continue 75 | stream.seek(0) 76 | try: 77 | docs: List[Any] = yaml.load_all(stream) 78 | def prepare_walk(doc: Any): 79 | def walk(path, check=lambda x: x): 80 | cur = doc 81 | keys = [key.replace('@', '.') for key in path.replace('\\.', '@').split('.')] 82 | for key in keys: 83 | if not isinstance(cur, dict) or key not in cur or cur[key] is None: 84 | return None 85 | cur = cur[key] 86 | return check(cur) 87 | return walk 88 | for doc in docs: 89 | current_scanners = [ 90 | s for s in found_scanners 91 | if s.check(prepare_walk(doc))] 92 | if len(current_scanners) > 0: 93 | rel_file_path = os.path.relpath(file_path, "repos/"+repo_dir_name+"/") 94 | safe_file_path = shlex.quote(rel_file_path) 95 | cmd = f"git log -1 --format=%ct -- {safe_file_path}" 96 | timestamp = check_output( 97 | cmd, 98 | shell=True, 99 | cwd="repos/" + repo_dir_name, 100 | ) 101 | url = repos[repo_dir_name] 102 | 103 | branch = branches[repo_dir_name] 104 | full_url = f"{url}/blob/{branch}/{os.path.relpath(file_path, 'repos/' + repo_dir_name + '/')}" 105 | repo_name = urlparse(url).path[1:] 106 | 107 | rest = InfoModel( 108 | repo_name=repo_name, 109 | timestamp=timestamp.decode("utf-8").strip(), 110 | url=full_url, 111 | amount_lines=amount_lines, 112 | ) 113 | for s in current_scanners: 114 | try: 115 | result = s.parse(prepare_walk(doc), rest) 116 | s.insert(c1, c2, result) 117 | except ValidationError as e: 118 | print("validation error", e) 119 | except ValueError as e: 120 | print("value error", e) 121 | except YAMLError as exc: 122 | print("yaml err") 123 | print(exc) 124 | 125 | # Update chart_version in flux_helm_release for OCI repositories 126 | conn1.execute( 127 | """UPDATE flux_helm_release 128 | SET chart_version = tag FROM flux_oci_repository 129 | WHERE flux_helm_release.repo_name = flux_oci_repository.repo_name 130 | AND flux_helm_release.chart_name = flux_oci_repository.name 131 | AND flux_helm_release.chart_version IS NULL""") 132 | conn1.commit() 133 | conn2.commit() 134 | 135 | for scanner in scanners: 136 | if not scanner.test(c1, c2): 137 | print("scanner", str(type(scanner)), "failed") 138 | sys.exit(1) 139 | 140 | c1.close() 141 | c2.close() 142 | -------------------------------------------------------------------------------- /scanners/flux_helm_release.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | from typing import Optional 4 | 5 | from info_model import InfoModel 6 | 7 | # release_name, chart_name, repo_name, hajimari_icon, amount_lines, url, timestamp 8 | 9 | class FluxHelmRelease(InfoModel): 10 | release_name: str 11 | chart_name: str 12 | chart_version: Optional[str] 13 | namespace: Optional[str] 14 | hajimari_icon: Optional[str] 15 | hajimari_group: Optional[str] 16 | chart_ref_kind: Optional[str] 17 | helm_repo_name: str 18 | helm_repo_namespace: Optional[str] 19 | values: Optional[str] 20 | 21 | class FluxHelmReleaseScanner: 22 | api_version = "helm.toolkit.fluxcd.io" 23 | kind = "HelmRelease" 24 | def pre_check(self, stream) -> bool: 25 | try: 26 | contains_api_version = False 27 | contains_kind = False 28 | for line in stream: 29 | if line.strip().startswith("apiVersion: " + self.api_version): 30 | contains_api_version = True 31 | if line.strip() == "kind: " + self.kind: 32 | contains_kind = True 33 | if contains_api_version and contains_kind: 34 | return True 35 | except UnicodeDecodeError as e: 36 | print("unicode error", e) 37 | return False 38 | 39 | def check(self, walk) -> bool: 40 | return walk('apiVersion', lambda x: x.startswith(self.api_version)) and \ 41 | walk('kind', lambda x: x == self.kind) and \ 42 | (self._helm_or_git_repo(walk) or self._oci_repo(walk)) and \ 43 | walk('metadata.name', lambda x: re.match(r'^[^{}]+$', x) is not None) 44 | 45 | def _helm_or_git_repo(self, walk): 46 | return walk('spec.chart.spec.chart', lambda x: x is not None) and \ 47 | walk('spec.chart.spec.sourceRef.kind', lambda x: x == "HelmRepository" or x == "GitRepository") 48 | 49 | def _oci_repo(self, walk): 50 | return walk('spec.chartRef.kind', lambda x: x == "OCIRepository") 51 | 52 | def parse(self, walk, rest: InfoModel) -> FluxHelmRelease: 53 | release_name = walk('metadata.name') 54 | namespace = walk('metadata.namespace') 55 | chart_name = walk('spec.chart.spec.chart') or walk('spec.chartRef.name') 56 | chart_version = walk('spec.chart.spec.version') 57 | helm_repo_name = walk('spec.chart.spec.sourceRef.name') or walk('spec.chartRef.name') 58 | helm_repo_namespace = walk('spec.chart.spec.sourceRef.namespace') or walk('spec.chartRef.namespace') 59 | chart_ref_kind = walk('spec.chart.spec.sourceRef.kind') or walk('spec.chartRef.kind') 60 | values = walk('spec.values') 61 | 62 | 63 | hajimari_icon = walk( 64 | 'spec.values.ingress.main.annotations.hajimari\.io/icon', 65 | lambda x: x.strip()) or None 66 | hajimari_group = walk( 67 | 'spec.values.ingress.main.annotations.hajimari\.io/group', 68 | lambda x: x.strip()) or None 69 | 70 | return FluxHelmRelease.parse_obj(rest.dict() | { 71 | 'chart_name': chart_name, 72 | 'chart_version': chart_version, 73 | 'release_name': release_name, 74 | 'hajimari_icon': hajimari_icon, 75 | 'hajimari_group': hajimari_group, 76 | 'namespace': namespace, 77 | 'helm_repo_name': helm_repo_name, 78 | 'helm_repo_namespace': helm_repo_namespace, 79 | 'chart_ref_kind': chart_ref_kind, 80 | 'values': json.dumps(values, default=str) 81 | }) 82 | 83 | def create_table(self, c1, c2): 84 | c1.execute('''DROP TABLE IF EXISTS flux_helm_release''') 85 | c1.execute('''CREATE TABLE IF NOT EXISTS flux_helm_release 86 | (release_name text NOT NULL, 87 | chart_name text NOT NULL, 88 | chart_version text NULL, 89 | namespace text NULL, 90 | repo_name text NOT NULL, 91 | hajimari_icon text NULL, 92 | hajimari_group text NULL, 93 | chart_ref_kind text NULL, 94 | lines number NOT NULL, 95 | url text NOT NULL, 96 | timestamp text NOT NULL, 97 | helm_repo_name text NOT NULL, 98 | helm_repo_namespace text NULL)''') 99 | # in second DB, store the val longtext. create table in first db for later copying 100 | for c in [c1, c2]: 101 | c.execute('''DROP TABLE IF EXISTS flux_helm_release_values''') 102 | c.execute('''CREATE TABLE IF NOT EXISTS flux_helm_release_values 103 | (url text NOT NULL, val longtext null)''') 104 | 105 | def insert(self, c1, c2, data: FluxHelmRelease): 106 | c1.execute( 107 | "INSERT INTO flux_helm_release VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", 108 | ( 109 | data.release_name, 110 | data.chart_name, 111 | data.chart_version, 112 | data.namespace, 113 | data.repo_name, 114 | data.hajimari_icon, 115 | data.hajimari_group, 116 | data.chart_ref_kind, 117 | data.amount_lines, 118 | data.url, 119 | data.timestamp, 120 | data.helm_repo_name, 121 | data.helm_repo_namespace, 122 | )) 123 | c2.execute("INSERT INTO flux_helm_release_values VALUES (?, ?)", 124 | ( 125 | data.url, 126 | data.values 127 | )) 128 | 129 | def test(self, c1, c2) -> bool: 130 | c1.execute("SELECT count(*) FROM flux_helm_release") 131 | c = c1.fetchone()[0] 132 | print("flux_helm_release count", c) 133 | return c > 100 134 | -------------------------------------------------------------------------------- /scanners/argo_helm_application.py: -------------------------------------------------------------------------------- 1 | from info_model import InfoModel 2 | from typing import Optional 3 | import json 4 | 5 | from ruamel.yaml import YAML 6 | from ruamel.yaml.error import YAMLError 7 | class ArgoHelmApplication(InfoModel): 8 | release_name: str 9 | chart_name: str 10 | chart_version: Optional[str] 11 | namespace: Optional[str] 12 | hajimari_icon: Optional[str] 13 | hajimari_group: Optional[str] 14 | values: Optional[str] 15 | helm_repo_url: str 16 | 17 | 18 | class ArgoHelmApplicationScanner: 19 | api_version = "argoproj.io" 20 | kind = "Application" 21 | 22 | def pre_check(self, stream) -> bool: 23 | try: 24 | contains_api_version = False 25 | contains_kind = False 26 | contains_helm = False 27 | for line in stream: 28 | if line.strip().startswith("apiVersion: " + self.api_version): 29 | contains_api_version = True 30 | if line.strip() == "kind: " + self.kind: 31 | contains_kind = True 32 | if line.strip() == "helm:": 33 | contains_helm = True 34 | if contains_api_version and contains_kind and contains_helm: 35 | return True 36 | except UnicodeDecodeError as e: 37 | print("unicode error", e) 38 | return False 39 | 40 | def check(self, walk) -> bool: 41 | return walk('apiVersion', lambda x: x.startswith(self.api_version)) and \ 42 | walk('kind', lambda x: x == self.kind) and \ 43 | walk('spec.source.repoURL', lambda x: x is not None) and \ 44 | walk('spec.source.chart', lambda x: x is not None) 45 | 46 | def parse(self, walk, rest: InfoModel) -> ArgoHelmApplication: 47 | chart_name = walk('spec.source.chart') 48 | chart_version = walk('spec.source.targetRevision') 49 | release_name = walk('metadata.name') 50 | namespace = walk('spec.destination.namespace') 51 | helm_repo_url = walk('spec.source.repoURL') 52 | valuesObject = walk('spec.source.helm.valuesObject') 53 | valuesString = walk('spec.source.helm.values') 54 | values = valuesObject or valuesString 55 | # if string, parse yaml 56 | if values and isinstance(values, str): 57 | try: 58 | yaml=YAML(typ="safe", pure=True) 59 | values = yaml.load(values) 60 | except YAMLError as exc: 61 | print("yaml err") 62 | print(exc) 63 | 64 | 65 | hajimari_icon = walk( 66 | 'spec.source.helm.valuesObject.ingress.main.annotations.hajimari\.io/icon', 67 | lambda x: x.strip()) or None 68 | hajimari_group = walk( 69 | 'spec.source.helm.valuesObject.ingress.main.annotations.hajimari\.io/group', 70 | lambda x: x.strip()) or None 71 | return ArgoHelmApplication.parse_obj(rest.dict() | { 72 | 'chart_name': chart_name, 73 | 'chart_version': chart_version, 74 | 'release_name': release_name, 75 | 'hajimari_icon': hajimari_icon, 76 | 'hajimari_group': hajimari_group, 77 | 'namespace': namespace, 78 | 'helm_repo_url': helm_repo_url, 79 | 'values': json.dumps(values, default=str) if values else None 80 | }) 81 | 82 | def create_table(self, c1, c2): 83 | c1.execute('''DROP TABLE IF EXISTS argo_helm_application''') 84 | c1.execute('''CREATE TABLE IF NOT EXISTS argo_helm_application( 85 | release_name text NOT NULL, 86 | chart_name text NOT NULL, 87 | chart_version text NULL, 88 | namespace text NULL, 89 | repo_name text NOT NULL, 90 | hajimari_icon text NULL, 91 | hajimari_group text NULL, 92 | lines number NOT NULL, 93 | url text NOT NULL, 94 | timestamp text NOT NULL, 95 | helm_repo_url text NOT NULL)''') 96 | # in second DB, store the val longtext. create table in first db for later copying 97 | for c in [c1, c2]: 98 | c.execute('''DROP TABLE IF EXISTS argo_helm_application_values''') 99 | c.execute('''CREATE TABLE IF NOT EXISTS argo_helm_application_values 100 | (url text NOT NULL, val longtext null)''') 101 | 102 | def insert(self, c1, c2, data: ArgoHelmApplication): 103 | c1.execute( 104 | "INSERT INTO argo_helm_application VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", 105 | ( 106 | data.release_name, 107 | data.chart_name, 108 | data.chart_version, 109 | data.namespace, 110 | data.repo_name, 111 | data.hajimari_icon, 112 | data.hajimari_group, 113 | data.amount_lines, 114 | data.url, 115 | data.timestamp, 116 | data.helm_repo_url)) 117 | c2.execute("INSERT INTO argo_helm_application_values VALUES (?, ?)", 118 | ( 119 | data.url, 120 | data.values 121 | )) 122 | 123 | def test(self, c1, c2) -> bool: 124 | c1.execute("SELECT count(*) FROM argo_helm_application") 125 | c = c1.fetchone()[0] 126 | print("argo_helm_application count", c) 127 | return c > 10 -------------------------------------------------------------------------------- /interesting.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import os 3 | import json 4 | from datetime import datetime 5 | 6 | github_header = {"content-type": "application/json"} 7 | if "GITHUB_TOKEN" in os.environ: 8 | github_header["Authorization"] = "Bearer " + os.environ["GITHUB_TOKEN"] 9 | else: 10 | print("Missing GITHUB_TOKEN") 11 | print("try: set -x GITHUB_TOKEN $(gh auth token)") 12 | print("or: export GITHUB_TOKEN=$(gh auth token)") 13 | exit(1) 14 | 15 | isDry = os.environ.get("DRY_RUN", "false") == "true" 16 | 17 | results = dict() 18 | 19 | topics = ["k8s-at-home", "kubesearch"] 20 | 21 | # first read all repos from repos.json 22 | with open("repos.json", "r") as f: 23 | for repo in json.load(f): 24 | key = repo[0].lower() 25 | # only add if not already in results or if it has more stars 26 | if key not in results or repo[3] > results[key][3]: 27 | results[key] = repo 28 | 29 | items = [] 30 | page = 1 31 | for topic in topics: 32 | if isDry: 33 | break 34 | while len(items) > 0 or page == 1: 35 | result = requests.get( 36 | "https://api.github.com/search/repositories", 37 | params={"q": "topic:"+topic, "per_page": 100, "page": page}, 38 | headers=github_header, 39 | ).json() 40 | items = result["items"] if "items" in result else [] 41 | for repo_info in items: 42 | pushed_at = datetime.strptime(repo_info["pushed_at"], "%Y-%m-%dT%H:%M:%SZ") 43 | # filter out unmaintained repos 44 | if (datetime.now() - pushed_at).days > 90: 45 | continue 46 | repo_name = repo_info["full_name"] 47 | key = repo_name.lower() 48 | stars = repo_info["stargazers_count"] 49 | url = repo_info["html_url"] 50 | branch = repo_info["default_branch"] 51 | if key in results and stars < results[key][3]: 52 | continue 53 | results[key] = (repo_name, url, branch, stars) 54 | page += 1 55 | 56 | # graphql query to get all repos from github, as their api is unreliable 57 | def run_graphql_query(topic, variables): 58 | query = """ 59 | query ($cursor: String) { 60 | topic(name: \"%s\") { 61 | repositories(first: 100, after: $cursor) { 62 | pageInfo { 63 | endCursor 64 | startCursor 65 | } 66 | nodes { 67 | id 68 | nameWithOwner 69 | stargazerCount 70 | url 71 | defaultBranchRef { 72 | name 73 | } 74 | pushedAt 75 | } 76 | } 77 | } 78 | }""" % topic 79 | payload = {"query": query, "variables": variables} 80 | response = requests.post("https://api.github.com/graphql", json=payload, headers=github_header) 81 | return response.json() 82 | 83 | for topic in topics: 84 | if isDry: 85 | break 86 | cursor = None 87 | has_next_page = True 88 | while has_next_page: 89 | variables = {"cursor": cursor} 90 | result = run_graphql_query(topic, variables) 91 | nodes = result["data"]["topic"]["repositories"]["nodes"] 92 | for repo in nodes: 93 | pushed_at = datetime.strptime(repo["pushedAt"], "%Y-%m-%dT%H:%M:%SZ") 94 | # filter out unmaintained repos 95 | if (datetime.now() - pushed_at).days > 90: 96 | continue 97 | repo_name = repo["nameWithOwner"] 98 | key = repo_name.lower() 99 | stars = repo["stargazerCount"] 100 | url = repo["url"] 101 | branch = repo["defaultBranchRef"]["name"] 102 | if key in results and stars < results[key][3]: 103 | continue 104 | results[repo_name] = (repo_name, url, branch, stars) 105 | has_next_page = result["data"]["topic"]["repositories"]["pageInfo"]["endCursor"] 106 | cursor = result["data"]["topic"]["repositories"]["pageInfo"]["endCursor"] 107 | 108 | for topic in topics: 109 | if isDry: 110 | break 111 | url = "https://gitlab.com/api/v4/projects?topic="+topic 112 | items = requests.get(url).json() 113 | for repo_info in items: 114 | pushed_at = datetime.strptime(repo_info["last_activity_at"], "%Y-%m-%dT%H:%M:%S.%fZ") 115 | # filter out unmaintained repos 116 | if (datetime.now() - pushed_at).days > 90: 117 | continue 118 | repo_name = repo_info["path_with_namespace"] 119 | key = repo_name.lower() 120 | stars = repo_info["star_count"] 121 | url = repo_info["web_url"] 122 | branch = repo_info["default_branch"] 123 | if key in results and stars < results[key][3]: 124 | continue 125 | results[key] = (repo_name, url, branch, stars) 126 | 127 | 128 | # check if all must_have repos are in results 129 | must_have = { 130 | "onedr0p/home-ops", 131 | "billimek/k8s-gitops", 132 | "bjw-s-labs/home-ops", 133 | "xunholy/k8s-gitops", 134 | "bjw-s/home-ops", 135 | "toboshii/home-ops", 136 | "carpenike/k8s-gitops", 137 | "wrmilling/k3s-gitops", 138 | "brettinternet/homeops", 139 | "buroa/k8s-gitops", 140 | "szinn/k8s-homelab", 141 | "auricom/home-ops", 142 | "ahinko/home-ops", 143 | "budimanjojo/home-cluster", 144 | "angelnu/k8s-gitops", 145 | "joryirving/home-ops", 146 | "truxnell/home-cluster", 147 | "haraldkoch/kochhaus-home", 148 | "ishioni/homelab-ops", 149 | "samip5/k8s-cluster", 150 | "kashalls/home-cluster", 151 | "anthr76/infra" 152 | } 153 | 154 | filter_out = [ 155 | "true_charts", 156 | "truecharts" 157 | ] 158 | 159 | results = {k: v for k, v in results.items() if not any(filter_text in k for filter_text in filter_out)} 160 | 161 | for repo in must_have: 162 | if repo not in results: 163 | print(f"Missing {repo}") 164 | exit(1) 165 | 166 | if len(results) < 200: 167 | print("Not enough repos, error fetching topic github repos") 168 | exit(1) 169 | 170 | 171 | 172 | # sort results on repo_name 173 | results = sorted(results.values(), key=lambda x: x[0]) 174 | 175 | j = json.dumps(results, indent=2) 176 | with open("repos.json", "w") as f: 177 | f.write(j) 178 | -------------------------------------------------------------------------------- /web/renderer.ts: -------------------------------------------------------------------------------- 1 | import sqlite3 from 'sqlite3'; 2 | import { Database, open } from 'sqlite'; 3 | import { 4 | collector as hrCollector, 5 | appDataGenerator as hrAppDataGenerator, 6 | pageGenerator as hrPageGenerator, 7 | generateTopReposPageData, 8 | generateRepoPagesData, 9 | generateGrepPageData, 10 | generateImagePageData, 11 | } from './src/generators/helm-release/generator'; 12 | import type { RenderFunction } from './src/entry-server'; 13 | import pako from 'pako'; 14 | 15 | // use class, to avoid variables going back and forth 16 | export class Renderer { 17 | public db?: Database; 18 | public dbExtended?: Database; 19 | private appData: any; 20 | // we split the pageData, most popular pages get html files, the rest get bundled into a json file 21 | private htmlPageData: { 22 | [key: string]: any 23 | } = {}; 24 | public jsonFilesData: Record = {}; 25 | public jsonFilesKeyMap: Record = {}; 26 | 27 | // methods 28 | async prepareData() { 29 | this.db = await open({ 30 | filename: 'repos.db', 31 | driver: sqlite3.Database 32 | }); 33 | this.dbExtended = await open({ 34 | filename: 'repos-extended.db', 35 | driver: sqlite3.Database 36 | }); 37 | 38 | const hrPageData = await hrCollector(this.db, this.dbExtended); 39 | for (const [key, pageData] of Object.entries(hrPageGenerator(hrPageData))) { 40 | this.htmlPageData[key] = pageData; 41 | } 42 | this.htmlPageData['/top'] = generateTopReposPageData(hrPageData); 43 | for (const [key, pageData] of Object.entries(generateRepoPagesData(hrPageData))) { 44 | this.htmlPageData['/repo/'+key] = pageData; 45 | } 46 | 47 | this.htmlPageData['/grep'] = generateGrepPageData(hrPageData); 48 | 49 | this.htmlPageData['/image'] = generateImagePageData(hrPageData); 50 | 51 | const jsonPageData: Record = {}; 52 | for (const [key, pageData] of Object.entries(hrPageGenerator(hrPageData, false))) { 53 | jsonPageData[key] = pageData; 54 | } 55 | 56 | const { fileData, keyFileMap } = this.getJsonPageData(jsonPageData); 57 | this.jsonFilesData = fileData; 58 | this.jsonFilesKeyMap = keyFileMap; 59 | 60 | this.appData = { 61 | ...hrAppDataGenerator(hrPageData), 62 | keyFileMap 63 | } 64 | 65 | } 66 | 67 | public getJsonPageData(jsonPageData: Record) { 68 | // split jsonPageData into multiple files 69 | const jsonPageDataKeys = Object.keys(jsonPageData); 70 | // max size 300kb 71 | const jsonMaxSize = (300 - 5) * 1024; 72 | 73 | const keyFileMap: Record = {}; 74 | 75 | let currentFile = 0; 76 | let currentSize = 0; 77 | 78 | for (const key of jsonPageDataKeys) { 79 | const jsonPageDataString = JSON.stringify(jsonPageData[key]); 80 | if (currentSize + jsonPageDataString.length > jsonMaxSize) { 81 | currentFile++; 82 | currentSize = 0; 83 | } 84 | keyFileMap[key] = currentFile; 85 | currentSize += jsonPageDataString.length; 86 | } 87 | // [0, 1, 2, ..., currentFile] 88 | const fileData = Array.from(Array(currentFile + 1).keys()).map(i => { 89 | const data = JSON.stringify(jsonPageDataKeys.reduce((acc, key) => { 90 | if (keyFileMap[key] === i) { 91 | acc[key] = jsonPageData[key]; 92 | } 93 | return acc; 94 | }, {} as typeof jsonPageData)); 95 | return data; 96 | }); 97 | return { 98 | fileData, 99 | keyFileMap, 100 | } 101 | } 102 | 103 | getPages() { 104 | return Object.keys(this.htmlPageData); 105 | } 106 | 107 | async generatePage(render: RenderFunction, url: string, template: string) { 108 | const pageData = ( 109 | () => { 110 | let strippedUrl = url; 111 | if (url.startsWith("/k8s-at-home-search")) { 112 | strippedUrl = url.replace("/k8s-at-home-search", ""); 113 | } 114 | if (strippedUrl in this.htmlPageData) { 115 | return this.htmlPageData[strippedUrl]; 116 | } 117 | strippedUrl = url.replace(/\.html$/, ''); 118 | if (strippedUrl in this.htmlPageData) { 119 | return this.htmlPageData[strippedUrl]; 120 | } 121 | return undefined; 122 | } 123 | )(); 124 | 125 | function b64EncodeUnicode(data: string | undefined) { 126 | return Buffer.from(pako.gzip(Buffer.from(data || "null", 'utf8'))).toString('base64'); 127 | } 128 | console.log("rendering", url); 129 | 130 | const appHtml = await render(url, this.appData, pageData) 131 | 132 | const title = pageData && "title" in pageData ? pageData.title + ' - ' : ""; 133 | 134 | // Generate description for helm release pages 135 | let description = "Search through community Kubernetes repositories for Helm chart examples and configurations - explore how others deploy their applications"; 136 | if (pageData && url.startsWith("/hr/")) { 137 | const name = pageData.title || ""; 138 | const chartName = pageData.chartName || ""; 139 | description = `Find real-world examples and configurations of the ${name} Helm chart. See how the community deploys ${chartName} in their Kubernetes clusters.`; 140 | } 141 | 142 | const pageDataJS = `window.__PAGE_DATA__ = "${b64EncodeUnicode(JSON.stringify(pageData))}";` 143 | const appDataJS = `window.__APP_DATA__ = "${b64EncodeUnicode(JSON.stringify(this.appData))}";` 144 | 145 | const html = template 146 | .replace(``, title) 147 | .replace(``, description) 148 | .replace(``, appHtml) 149 | .replace(`/**--app-data--**/`, appDataJS) 150 | .replace(`/**--page-data--**/`, pageDataJS); 151 | 152 | return html; 153 | } 154 | 155 | public generateSitemap() { 156 | const pages = this.getPages(); 157 | const sitemap = ` 158 | 159 | ${pages.map(page => ` 160 | 161 | https://kubesearch.dev${page} 162 | ${new Date().toISOString()} 163 | 164 | `).join('')} 165 | 166 | https://kubesearch.dev/ 167 | ${new Date().toISOString()} 168 | 169 | 170 | `; 171 | return sitemap; 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /web/src/App.tsx: -------------------------------------------------------------------------------- 1 | import { BrowserRouter, HashRouter, Route, RouterProvider, Routes, createHashRouter } from 'react-router-dom' 2 | import HelmRelease from './pages/helm-release'; 3 | import Home from './pages/index'; 4 | import styles from "./index.css?inline" 5 | import { SearchBar, SearchMode } from './components/search'; 6 | import { AppData as HRAppData, denormalize } from './generators/helm-release/models'; 7 | import GitHubButton from 'react-github-btn'; 8 | import { Top } from './pages/top'; 9 | import { Repo } from './pages/repo'; 10 | import Grep from './pages/grep'; 11 | import Image from './pages/image'; 12 | import { useEffect, useRef, useState } from 'react'; 13 | import HRSearchResults, { SearchInterface } from './components/search/hr'; 14 | 15 | export type AppData = HRAppData; 16 | 17 | 18 | export default function App(props: AppData & { pageData: any }) { 19 | const { pageData, repoAlsoHas, } = props; 20 | const releases = denormalize(props).releases; 21 | const [search, setSearch2] = useState(""); 22 | const childRef = useRef(null); 23 | 24 | const [mode, setMode] = useState(undefined); 25 | if (!import.meta.env.SSR) { 26 | useEffect(() => { 27 | let handler: any; 28 | function checkHash() { 29 | let hash = window.location.hash; 30 | if (hash === "#/top") { 31 | window.location.href = "/top" 32 | } else if (hash.startsWith("#/repo:")) { 33 | window.location.href = "/repo/" + hash.slice("#/repo:".length) 34 | } else if (hash.startsWith("#/")) { 35 | hash = hash.slice(2); 36 | if (hash.startsWith("chart:")) { 37 | hash = hash.slice("chart:".length) 38 | } 39 | // decode 40 | hash = decodeURIComponent(hash); 41 | if (search !== hash) { 42 | setSearch2(hash); 43 | return true; 44 | } 45 | } else if (hash.length > 1) { 46 | hash = hash.slice(1); 47 | hash = decodeURIComponent(hash); 48 | if (search !== hash) { 49 | setSearch2(hash); 50 | return true; 51 | } 52 | } 53 | return false; 54 | } 55 | if (!checkHash()) { 56 | if (window.location.hash !== search) { 57 | handler = setTimeout(() => { 58 | if (search) { 59 | history.replaceState(undefined, "", "#" + search); 60 | } else if (!search && window.location.hash) { 61 | history.replaceState(undefined, "", window.location.pathname + window.location.search); 62 | } 63 | }, 100); 64 | } 65 | } 66 | window.addEventListener("popstate", checkHash); 67 | return () => { 68 | if (handler) { 69 | clearTimeout(handler); 70 | } 71 | window.removeEventListener("popstate", checkHash); 72 | }; 73 | }, []); 74 | } 75 | const setSearch = (s: string) => { 76 | if (s.length === 0) { 77 | history.replaceState(undefined, "", window.location.pathname + window.location.search); 78 | } else { 79 | history.replaceState(undefined, "", "#" + s) 80 | } 81 | setSearch2(s); 82 | }; 83 | useEffect(() => { 84 | // on grep mode, redirect to /grep and keep location hash 85 | const isOnGrepPage = window.location.pathname === "/grep"; 86 | const isOnImagePage = window.location.pathname === "/image"; 87 | if (mode === "grep" && !isOnGrepPage) { 88 | // history.replaceState(undefined, "", "/grep" + window.location.hash); 89 | const s = search === "grep" ? "grep " : search; 90 | window.location.href = "/grep#" + encodeURI(s); 91 | } 92 | else if (mode === "image" && !isOnImagePage) { 93 | window.location.href = "/image#" + encodeURI(search); 94 | } 95 | // if not grep mode, but on grep page, redirect to / and keep location hash 96 | else if (isOnGrepPage) { 97 | if (mode && (mode !== "grep")) { 98 | window.location.href = "/" + encodeURI(window.location.hash); 99 | } else { 100 | setMode("grep"); 101 | } 102 | } else if (isOnImagePage) { 103 | if (mode && (mode !== "image")) { 104 | window.location.href = "/" + encodeURI(window.location.hash); 105 | } else { 106 | setMode("image"); 107 | } 108 | } else if (!mode) { 109 | setMode("hr"); 110 | } 111 | }, [mode, search]); 112 | return ( 113 |
114 |