├── .formatter.exs ├── .gitignore ├── .tool-versions ├── Dockerfile ├── Makefile ├── README.md ├── config ├── config.exs ├── dev.exs ├── prod.exs ├── runtime.exs └── test.exs ├── data ├── backup │ └── .gitkeep └── khepri │ └── .gitkeep ├── docker-compose-metrics.yml ├── docker-compose.yml ├── docs ├── README.md ├── backup-download.md └── backup-restore.md ├── grafana └── provisioning │ └── datasources │ └── victoriametrics.yml ├── lib ├── hydra_srt.ex ├── hydra_srt │ ├── api.ex │ ├── api │ │ ├── destination.ex │ │ └── route.ex │ ├── application.ex │ ├── db.ex │ ├── erl_sys_mon.ex │ ├── helpers.ex │ ├── metrics.ex │ ├── metrics │ │ └── connection.ex │ ├── monitoring │ │ └── os_mon.ex │ ├── process_monitor.ex │ ├── release.ex │ ├── repo.ex │ ├── route_handler.ex │ ├── routes_supervisor.ex │ ├── signal_handler.ex │ └── unix_sock_handler.ex ├── hydra_srt_web.ex ├── hydra_srt_web │ ├── controllers │ │ ├── auth_controller.ex │ │ ├── backup_controller.ex │ │ ├── changeset_json.ex │ │ ├── destination_controller.ex │ │ ├── destination_json.ex │ │ ├── error_json.ex │ │ ├── fallback_controller.ex │ │ ├── health_controller.ex │ │ ├── node_controller.ex │ │ ├── page_controller.ex │ │ ├── route_controller.ex │ │ ├── route_json.ex │ │ └── system_controller.ex │ ├── endpoint.ex │ ├── router.ex │ └── telemetry.ex └── mix │ └── tasks │ └── compile_c_app.ex ├── mix.exs ├── mix.lock ├── native ├── .clang-format ├── .gitignore ├── Makefile ├── README.md ├── build │ └── .gitkeep ├── include │ ├── gst_pipeline.h │ └── unix_socket.h ├── src │ ├── gst_pipeline.c │ ├── main.c │ └── unix_socket.c └── tests │ ├── test_gst_pipeline.c │ └── test_unix_socket.c ├── priv ├── repo │ ├── migrations │ │ ├── .formatter.exs │ │ ├── 20250219145148_create_routes.exs │ │ └── 20250220162451_create_destinations.exs │ └── seeds.exs └── static │ ├── favicon.ico │ └── robots.txt ├── rel ├── env.bat.eex ├── env.sh.eex ├── overlays │ └── bin │ │ ├── migrate │ │ ├── migrate.bat │ │ ├── server │ │ └── server.bat └── vm.args.eex ├── run.sh ├── test ├── hydra_srt │ ├── api_test.exs │ ├── monitoring_test.exs │ ├── route_handler_test.exs │ └── unix_sock_handler_test.exs ├── hydra_srt_web │ └── controllers │ │ ├── destination_controller_test.exs │ │ ├── error_json_test.exs │ │ └── route_controller_test.exs ├── support │ ├── conn_case.ex │ ├── data_case.ex │ └── fixtures │ │ └── api_fixtures.ex └── test_helper.exs └── web_app ├── .gitignore ├── README.md ├── eslint.config.js ├── index.html ├── package-lock.json ├── package.json ├── public ├── logo.webp ├── logo2.webp └── vite.svg ├── src ├── App.css ├── App.jsx ├── assets │ └── react.svg ├── components │ └── MainLayout.jsx ├── index.css ├── main.jsx ├── pages │ ├── Dashboard.jsx │ ├── Login.jsx │ ├── Settings.jsx │ ├── routes │ │ ├── RouteDestEdit.jsx │ │ ├── RouteItem.jsx │ │ ├── RouteSourceEdit.jsx │ │ └── Routes.jsx │ └── system │ │ ├── SystemNodes.jsx │ │ └── SystemPipelines.jsx └── utils │ ├── api.js │ ├── auth.js │ └── constants.js ├── vite.config.js └── yarn.lock /.formatter.exs: -------------------------------------------------------------------------------- 1 | [ 2 | import_deps: [:ecto, :ecto_sql, :phoenix], 3 | subdirectories: ["priv/*/migrations"], 4 | inputs: ["*.{ex,exs}", "{config,lib,test}/**/*.{ex,exs}", "priv/*/seeds.exs"] 5 | ] 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where 3rd-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Temporary files, for example, from tests. 23 | /tmp/ 24 | 25 | # Ignore package tarball (built via "mix hex.build"). 26 | hydra_srt-*.tar 27 | 28 | # Database files 29 | *.db 30 | *.db-* 31 | 32 | /khepri#* 33 | 34 | /data/backup/* 35 | !/data/backup/.gitkeep 36 | /data/khepri/* 37 | !/data/khepri/.gitkeep 38 | -------------------------------------------------------------------------------- /.tool-versions: -------------------------------------------------------------------------------- 1 | elixir 1.17.1-otp-27 2 | erlang 27.0 3 | nodejs 18.13.0 -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG ELIXIR_VERSION=1.17.3 2 | ARG OTP_VERSION=27.1.2 3 | ARG DEBIAN_VERSION=bookworm-20241111-slim 4 | ARG BUILDER_IMAGE="hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}" 5 | ARG RUNNER_IMAGE="debian:${DEBIAN_VERSION}" 6 | 7 | FROM ${BUILDER_IMAGE} as builder 8 | 9 | ENV MIX_ENV="prod" 10 | 11 | # Install build dependencies 12 | RUN apt-get update -y \ 13 | && apt-get install -y build-essential git curl ca-certificates gnupg \ 14 | && apt-get clean 15 | 16 | # Install Node.js 18.x 17 | RUN mkdir -p /etc/apt/keyrings \ 18 | && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key \ 19 | | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \ 20 | && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_18.x nodistro main" \ 21 | > /etc/apt/sources.list.d/nodesource.list \ 22 | && apt-get update -y \ 23 | && apt-get install -y nodejs \ 24 | && apt-get clean 25 | 26 | # Install GStreamer and related libraries for the C application 27 | RUN apt-get update -y \ 28 | && apt-get install -y \ 29 | libgstreamer1.0-dev \ 30 | libgstreamer-plugins-base1.0-dev \ 31 | gstreamer1.0-plugins-good \ 32 | gstreamer1.0-plugins-bad \ 33 | libcjson-dev \ 34 | libsrt-openssl-dev \ 35 | libcmocka-dev \ 36 | libglib2.0-dev \ 37 | pkg-config \ 38 | && apt-get clean 39 | 40 | # Prepare build directory 41 | WORKDIR /app 42 | 43 | # Install hex + rebar 44 | RUN mix local.hex --force && \ 45 | mix local.rebar --force 46 | 47 | # Install mix dependencies 48 | COPY mix.exs mix.lock ./ 49 | RUN mix deps.get --only $MIX_ENV 50 | RUN mkdir config 51 | 52 | # Copy compile-time config files before we compile dependencies 53 | # to ensure any relevant config change will trigger the dependencies 54 | # to be re-compiled. 55 | COPY config/config.exs config/${MIX_ENV}.exs config/ 56 | RUN mix deps.compile 57 | 58 | # Copy the rest of the application code 59 | COPY priv priv 60 | COPY lib lib 61 | COPY native native 62 | COPY web_app web_app 63 | COPY rel rel 64 | 65 | # Build the C application - ensure we clean first to force a rebuild for Linux 66 | RUN cd native && make clean && make 67 | 68 | # Build the web application 69 | RUN cd web_app \ 70 | && npm install \ 71 | && npm run build 72 | 73 | # Compile the Elixir application 74 | RUN mix compile 75 | 76 | # Changes to config/runtime.exs don't require recompiling the code 77 | COPY config/runtime.exs config/ 78 | RUN mix release 79 | 80 | # Start a new build stage so that the final image will only contain 81 | # the compiled release and other runtime necessities 82 | FROM ${RUNNER_IMAGE} 83 | 84 | ENV LANG en_US.UTF-8 85 | ENV LANGUAGE en_US:en 86 | ENV LC_ALL en_US.UTF-8 87 | ENV MIX_ENV="prod" 88 | ENV ECTO_IPV6 false 89 | # Use IPv4 instead of IPv6 for Erlang distribution 90 | ENV ERL_AFLAGS "-proto_dist inet_tcp" 91 | # Set the DATABASE_DATA_DIR environment variable to point to the mounted volume 92 | ENV DATABASE_DATA_DIR="/app/khepri" 93 | 94 | # Install runtime dependencies 95 | RUN apt-get update -y && \ 96 | apt-get install -y \ 97 | libstdc++6 \ 98 | openssl \ 99 | libncurses5 \ 100 | locales \ 101 | iptables \ 102 | sudo \ 103 | tini \ 104 | curl \ 105 | gstreamer1.0-plugins-good \ 106 | gstreamer1.0-plugins-bad \ 107 | libcjson1 \ 108 | libsrt1.5-openssl \ 109 | libgstreamer1.0-0 \ 110 | libgstreamer-plugins-base1.0-0 \ 111 | && apt-get clean && rm -f /var/lib/apt/lists/*_* 112 | 113 | # Set the locale 114 | RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen 115 | 116 | WORKDIR "/app" 117 | 118 | # Create directory structure for mounted volumes 119 | # These directories will be overridden by the volumes 120 | RUN mkdir -p /app/khepri /app/backup && \ 121 | chmod -R 777 /app/khepri /app/backup 122 | 123 | # Copy the release from the builder stage 124 | COPY --from=builder /app/_build/prod/rel/hydra_srt ./ 125 | 126 | COPY run.sh run.sh 127 | RUN chmod +x run.sh 128 | 129 | # Set the entrypoint 130 | ENTRYPOINT ["/usr/bin/tini", "-s", "-g", "--", "/app/run.sh"] 131 | CMD ["/app/bin/server"] -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | help: 2 | @make -qpRr | egrep -e '^[a-z].*:$$' | sed -e 's~:~~g' | sort 3 | 4 | .PHONY: dev 5 | dev: 6 | MIX_ENV=dev \ 7 | VAULT_ENC_KEY="12345678901234567890123456789012" \ 8 | API_JWT_SECRET=dev \ 9 | METRICS_JWT_SECRET=dev \ 10 | VICTORIOMETRICS_HOST=localhost \ 11 | VICTORIOMETRICS_PORT=8428 \ 12 | API_AUTH_USERNAME=admin \ 13 | API_AUTH_PASSWORD=password123 \ 14 | ERL_AFLAGS="-kernel shell_history enabled +zdbbl 2097151" \ 15 | iex --name hydra@127.0.0.1 --cookie cookie -S mix phx.server --no-halt 16 | 17 | clean: 18 | rm -rf _build && rm -rf deps 19 | 20 | dev_udp0: 21 | ffmpeg -f lavfi -re -i smptebars=duration=6000:size=1280x720:rate=25 -f lavfi -re -i sine=frequency=1000:duration=6000:sample_rate=44100 \ 22 | -pix_fmt yuv420p -c:v libx264 -b:v 1000k -g 25 -keyint_min 100 -profile:v baseline -preset veryfast \ 23 | -f mpegts "udp://224.0.0.3:1234?pkt_size=1316" 24 | 25 | dev_udp: 26 | ffmpeg -f lavfi -re -i smptebars=duration=6000:size=1280x720:rate=25 -f lavfi -re -i sine=frequency=1000:duration=6000:sample_rate=44100 \ 27 | -pix_fmt yuv420p -c:v libx264 -b:v 1000k -g 25 -keyint_min 100 -profile:v baseline -preset veryfast \ 28 | -f mpegts "srt://127.0.0.1:4201?mode=listener" 29 | 30 | dev_play: 31 | ffplay udp://224.0.0.3:1234 32 | 33 | dev_play1: 34 | srt-live-transmit "srt://127.0.0.1:4201?mode=listener" udp://:1234 -v -statspf default -stats 1000 35 | 36 | dev_udp1: 37 | ffmpeg -i "srt://127.0.0.1:4201?mode=caller" -f mpegts udp://239.0.0.1:1234?pkt_size=1316 38 | 39 | docker_restart: 40 | docker-compose down && docker-compose up -d 41 | 42 | docker_ssh: 43 | docker compose exec hydra_srt bash 44 | 45 | docker_logs: 46 | docker compose logs -f 47 | 48 | docker_stop: 49 | docker compose down 50 | 51 | docker_start: 52 | docker compose up -d 53 | 54 | docker_clean: 55 | docker compose down && docker compose rm -f hydra_srt 56 | -------------------------------------------------------------------------------- /config/config.exs: -------------------------------------------------------------------------------- 1 | # This file is responsible for configuring your application 2 | # and its dependencies with the aid of the Config module. 3 | # 4 | # This configuration file is loaded before any dependency and 5 | # is restricted to this project. 6 | 7 | # General application configuration 8 | import Config 9 | 10 | config :hydra_srt, 11 | ecto_repos: [HydraSrt.Repo], 12 | generators: [timestamp_type: :utc_datetime, binary_id: true] 13 | 14 | # Configures the endpoint 15 | config :hydra_srt, HydraSrtWeb.Endpoint, 16 | url: [host: "localhost"], 17 | adapter: Phoenix.Endpoint.Cowboy2Adapter, 18 | render_errors: [ 19 | formats: [json: HydraSrtWeb.ErrorJSON], 20 | layout: false 21 | ], 22 | pubsub_server: HydraSrt.PubSub, 23 | live_view: [signing_salt: "+CT93K1p"], 24 | server: true 25 | 26 | # Configures Elixir's Logger 27 | config :logger, :console, 28 | format: "$time $metadata[$level] $message\n", 29 | metadata: [:request_id] 30 | 31 | # Use Jason for JSON parsing in Phoenix 32 | config :phoenix, :json_library, Jason 33 | 34 | # Import environment specific config. This must remain at the bottom 35 | # of this file so it overrides the configuration defined above. 36 | import_config "#{config_env()}.exs" 37 | -------------------------------------------------------------------------------- /config/dev.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | # Configure your database 4 | config :hydra_srt, HydraSrt.Repo, 5 | database: Path.expand("../hydra_srt_dev.db", __DIR__), 6 | pool_size: 5, 7 | stacktrace: true, 8 | show_sensitive_data_on_connection_error: true 9 | 10 | # For development, we disable any cache and enable 11 | # debugging and code reloading. 12 | # 13 | # The watchers configuration can be used to run external 14 | # watchers to your application. For example, we can use it 15 | # to bundle .js and .css sources. 16 | config :hydra_srt, HydraSrtWeb.Endpoint, 17 | # Binding to loopback ipv4 address prevents access from other machines. 18 | # Change to `ip: {0, 0, 0, 0}` to allow access from other machines. 19 | http: [ip: {127, 0, 0, 1}, port: 4000], 20 | check_origin: false, 21 | code_reloader: true, 22 | debug_errors: true, 23 | secret_key_base: "9re8gLwrcmLnNcUbxe8xgKSCNfm8gIpgoBBiCXhV0dVfJMB8DVFB3QQJwOye0iIo", 24 | watchers: [] 25 | 26 | # ## SSL Support 27 | # 28 | # In order to use HTTPS in development, a self-signed 29 | # certificate can be generated by running the following 30 | # Mix task: 31 | # 32 | # mix phx.gen.cert 33 | # 34 | # Run `mix help phx.gen.cert` for more information. 35 | # 36 | # The `http:` config above can be replaced with: 37 | # 38 | # https: [ 39 | # port: 4001, 40 | # cipher_suite: :strong, 41 | # keyfile: "priv/cert/selfsigned_key.pem", 42 | # certfile: "priv/cert/selfsigned.pem" 43 | # ], 44 | # 45 | # If desired, both `http:` and `https:` keys can be 46 | # configured to run both http and https servers on 47 | # different ports. 48 | 49 | # Enable dev routes for dashboard and mailbox 50 | config :hydra_srt, dev_routes: true 51 | 52 | # Do not include metadata nor timestamps in development logs 53 | config :logger, :console, 54 | format: "$time [$level] $message $metadata\n", 55 | level: :debug, 56 | # level: :notice, 57 | metadata: [ 58 | :error_code, 59 | :file, 60 | :line, 61 | :pid 62 | ] 63 | 64 | # Set a higher stacktrace during development. Avoid configuring such 65 | # in production as building large stacktraces may be expensive. 66 | config :phoenix, :stacktrace_depth, 20 67 | 68 | # Initialize plugs at runtime for faster development compilation 69 | config :phoenix, :plug_init_mode, :runtime 70 | -------------------------------------------------------------------------------- /config/prod.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | # Do not print debug messages in production 4 | config :logger, level: :info 5 | 6 | # Runtime production configuration, including reading 7 | # of environment variables, is done on config/runtime.exs. 8 | -------------------------------------------------------------------------------- /config/runtime.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | # config/runtime.exs is executed for all environments, including 4 | # during releases. It is executed after compilation and before the 5 | # system starts, so it is typically used to load production configuration 6 | # and secrets from environment variables or elsewhere. Do not define 7 | # any compile-time configuration in here, as it won't be applied. 8 | # The block below contains prod specific runtime configuration. 9 | 10 | # Environment variables: 11 | # - PHX_SERVER: Set to true to enable the server 12 | # - API_AUTH_USERNAME: Username for API authentication 13 | # - API_AUTH_PASSWORD: Password for API authentication 14 | # - DATABASE_DATA_DIR: Directory for Khepri database storage 15 | # - VICTORIAMETRICS_HOST: Host for VictoriaMetrics metrics export 16 | # - VICTORIAMETRICS_PORT: Port for VictoriaMetrics metrics export 17 | # - PORT: HTTP port for the API server 18 | # - PHX_HOST: Host for the Phoenix endpoint 19 | 20 | # ## Using releases 21 | # 22 | # If you use `mix release`, you need to explicitly enable the server 23 | # by passing the PHX_SERVER=true when you start it: 24 | # 25 | # PHX_SERVER=true bin/hydra_srt start 26 | # 27 | # Alternatively, you can use `mix phx.gen.release` to generate a `bin/server` 28 | # script that automatically sets the env var above. 29 | if System.get_env("PHX_SERVER") do 30 | config :hydra_srt, HydraSrtWeb.Endpoint, server: true 31 | end 32 | 33 | if config_env() != :test do 34 | export_metrics? = 35 | !!(System.get_env("VICTORIOMETRICS_HOST") && System.get_env("VICTORIOMETRICS_PORT")) 36 | 37 | config :hydra_srt, 38 | export_metrics?: export_metrics?, 39 | api_auth_username: 40 | System.get_env("API_AUTH_USERNAME") || raise("API_AUTH_USERNAME is not set"), 41 | api_auth_password: 42 | System.get_env("API_AUTH_PASSWORD") || raise("API_AUTH_PASSWORD is not set") 43 | 44 | # database_path = 45 | # System.get_env("DATABASE_PATH") || 46 | # raise """ 47 | # environment variable DATABASE_PATH is missing. 48 | # For example: /etc/hydra_srt/hydra_srt.db 49 | # """ 50 | 51 | # config :hydra_srt, HydraSrt.Repo, 52 | # database: database_path, 53 | # pool_size: String.to_integer(System.get_env("POOL_SIZE") || "5") 54 | 55 | # The secret key base is used to sign/encrypt cookies and other secrets. 56 | # A default value is used in config/dev.exs and config/test.exs but you 57 | # want to use a different value for prod and you most likely don't want 58 | # to check this value into version control, so we use an environment 59 | # variable instead. 60 | # secret_key_base = 61 | # System.get_env("SECRET_KEY_BASE") || 62 | # raise """ 63 | # environment variable SECRET_KEY_BASE is missing. 64 | # You can generate one by calling: mix phx.gen.secret 65 | # """ 66 | 67 | host = System.get_env("PHX_HOST") || "example.com" 68 | port = String.to_integer(System.get_env("PORT") || "4000") 69 | 70 | # config :hydra_srt, :dns_cluster_query, System.get_env("DNS_CLUSTER_QUERY") 71 | 72 | config :hydra_srt, HydraSrtWeb.Endpoint, 73 | url: [host: host, port: port, scheme: "http"], 74 | http: [ 75 | # Always bind to all IPv4 interfaces (0.0.0.0) in Docker 76 | ip: {0, 0, 0, 0}, 77 | port: port 78 | ], 79 | secret_key_base: nil 80 | 81 | if export_metrics? do 82 | config :hydra_srt, HydraSrt.Metrics.Connection, 83 | host: System.get_env("VICTORIOMETRICS_HOST"), 84 | port: System.get_env("VICTORIOMETRICS_PORT"), 85 | version: :v2 86 | end 87 | end 88 | -------------------------------------------------------------------------------- /config/test.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | # Configure your database 4 | # 5 | # The MIX_TEST_PARTITION environment variable can be used 6 | # to provide built-in test partitioning in CI environment. 7 | # Run `mix help test` for more information. 8 | config :hydra_srt, HydraSrt.Repo, 9 | database: Path.expand("../hydra_srt_test.db", __DIR__), 10 | pool_size: 5, 11 | pool: Ecto.Adapters.SQL.Sandbox 12 | 13 | # We don't run a server during test. If one is required, 14 | # you can enable the server option below. 15 | config :hydra_srt, HydraSrtWeb.Endpoint, 16 | http: [ip: {127, 0, 0, 1}, port: 4002], 17 | secret_key_base: "o4JBd+wOK5JJIHHOZ/WMk00xrG9dN0//FF1MIBkDPzM+nRTN+5+L9hvMVX+805L0", 18 | server: false 19 | 20 | # Print only warnings and errors during test 21 | config :logger, level: :warning 22 | 23 | # Initialize plugs at runtime for faster test compilation 24 | config :phoenix, :plug_init_mode, :runtime 25 | -------------------------------------------------------------------------------- /data/backup/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abc3/hydra-srt/5b5b32527dd24a91e7778ac8c7f8c8fadc8dcc8d/data/backup/.gitkeep -------------------------------------------------------------------------------- /data/khepri/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abc3/hydra-srt/5b5b32527dd24a91e7778ac8c7f8c8fadc8dcc8d/data/khepri/.gitkeep -------------------------------------------------------------------------------- /docker-compose-metrics.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | victoriametrics: 5 | image: victoriametrics/victoria-metrics:latest 6 | container_name: victoriametrics 7 | ports: 8 | - "8428:8428" 9 | volumes: 10 | - vm-data:/victoria-metrics-data 11 | command: 12 | - "--storageDataPath=/victoria-metrics-data" 13 | - "--httpListenAddr=:8428" 14 | restart: unless-stopped 15 | networks: 16 | - monitoring-network 17 | 18 | grafana: 19 | image: grafana/grafana:latest 20 | container_name: grafana 21 | ports: 22 | - "3000:3000" 23 | environment: 24 | - GF_SECURITY_ADMIN_USER=admin 25 | - GF_SECURITY_ADMIN_PASSWORD=admin 26 | - GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource 27 | volumes: 28 | - grafana-data:/var/lib/grafana 29 | - ./grafana/provisioning:/etc/grafana/provisioning 30 | depends_on: 31 | - victoriametrics 32 | restart: unless-stopped 33 | networks: 34 | - monitoring-network 35 | 36 | victorialogs: 37 | image: victoriametrics/victoria-logs:latest 38 | container_name: victorialogs 39 | ports: 40 | - "9428:9428" 41 | volumes: 42 | - vl-data:/victoria-logs-data 43 | command: 44 | - "--httpListenAddr=:9428" 45 | - "--storageDataPath=/victoria-logs-data" 46 | restart: unless-stopped 47 | networks: 48 | - monitoring-network 49 | 50 | volumes: 51 | vm-data: 52 | grafana-data: 53 | vl-data: 54 | 55 | networks: 56 | monitoring-network: 57 | driver: bridge -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | hydra_srt: 5 | network_mode: "host" 6 | build: 7 | context: . 8 | dockerfile: Dockerfile 9 | restart: unless-stopped 10 | environment: 11 | - NODE_IP=127.0.0.1 12 | - RELEASE_COOKIE="Vt8gXnEI2zRH4l3eqUEBrcGQ22mHSj73CgkYm02u" 13 | - PHX_SERVER=true 14 | - PORT=4000 15 | - PHX_HOST=0.0.0.0 16 | - API_AUTH_USERNAME=admin 17 | - API_AUTH_PASSWORD=password123 18 | - DATABASE_DATA_DIR=/app/khepri 19 | - RLIMIT_NOFILE=65536 20 | volumes: 21 | - ./data/khepri:/app/khepri 22 | - ./data/backup:/app/backup 23 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Hydra SRT Documentation 2 | 3 | This directory contains documentation for the Hydra SRT system. 4 | 5 | ## Contents 6 | 7 | - [Backup Download Feature](backup-download.md) - Documentation for the routes backup download feature 8 | - [Backup and Restore System](backup-restore.md) - Documentation for the complete system backup and restore functionality 9 | 10 | ## Documentation Structure 11 | 12 | The documentation is organized as follows: 13 | 14 | - **Feature Documentation**: Detailed explanations of specific features 15 | - **API Documentation**: Information about API endpoints and usage 16 | - **User Guides**: Instructions for end users 17 | - **Developer Guides**: Information for developers working on the system 18 | 19 | ## Contributing to Documentation 20 | 21 | When adding new documentation: 22 | 23 | 1. Create a new Markdown file with a descriptive name 24 | 2. Add a link to the new file in this README 25 | 3. Follow the existing documentation style and format 26 | 4. Include sections for: 27 | - Overview 28 | - How it works 29 | - Implementation details 30 | - Examples (if applicable) 31 | - Future enhancements (if applicable) 32 | 33 | ## Documentation Standards 34 | 35 | - Use Markdown format for all documentation 36 | - Use descriptive headings and subheadings 37 | - Include code examples where appropriate 38 | - Keep documentation up to date with code changes 39 | -------------------------------------------------------------------------------- /docs/backup-download.md: -------------------------------------------------------------------------------- 1 | # Backup Download Feature 2 | 3 | ## Overview 4 | 5 | The Backup Download feature allows users to download a JSON backup of all routes and their destinations from the Hydra SRT system. This document explains how the feature works, its security considerations, and implementation details. 6 | 7 | ## How It Works 8 | 9 | The backup download process uses a time-limited, one-time-use link to ensure that only authenticated users can download backups while avoiding authentication issues with direct file downloads. 10 | 11 | ### Process Flow 12 | 13 | 1. **User Initiates Download**: 14 | 15 | - User navigates to the Settings page and selects the "Routes" tab 16 | - User clicks the "Export Routes as JSON" button 17 | - Frontend makes an authenticated API request to get a download link 18 | 19 | 2. **Backend Creates Link**: 20 | 21 | - Backend generates a unique session ID (UUID) 22 | - Session ID is stored in cache with a 5-minute expiration 23 | - Backend returns a download URL containing the session ID 24 | 25 | 3. **Frontend Opens Download Link**: 26 | 27 | - Frontend receives the URL and opens it in a new browser tab/window 28 | 29 | 4. **Backend Processes Download Request**: 30 | 31 | - Backend verifies the session ID is valid by checking the cache 32 | - If valid, the backend generates the backup file (JSON of all routes with destinations) 33 | - The session ID is deleted from cache to prevent reuse (one-time use) 34 | - The backup file is sent to the browser as a download with a timestamped filename 35 | 36 | 5. **Security Fallbacks**: 37 | - If the session ID is invalid or expired, a 403 Forbidden error is returned 38 | - The download link expires after 5 minutes if not used 39 | 40 | ## Security Considerations 41 | 42 | This approach addresses several security concerns: 43 | 44 | 1. **Authentication**: Only authenticated users can request a download link 45 | 2. **Link Security**: The download link contains a random UUID that is nearly impossible to guess 46 | 3. **Time Limitation**: Links expire after 5 minutes 47 | 4. **One-Time Use**: Each link can only be used once 48 | 5. **No Authentication Leakage**: The download process doesn't expose authentication tokens in URLs 49 | 50 | ## Implementation Details 51 | 52 | ### Backend Components 53 | 54 | 1. **Controller**: `HydraSrtWeb.BackupController` 55 | 56 | - `create_download_link/2`: Generates and caches a session ID, returns a download link 57 | - `download/2`: Verifies the session ID and serves the backup file with a timestamped filename 58 | 59 | 2. **Router Configuration**: 60 | 61 | - `/api/backup/create-download-link`: Authenticated endpoint to get a download link 62 | - `/backup/:session_id/download`: Public endpoint that serves the file after verifying the session ID 63 | 64 | 3. **Caching**: 65 | - Uses `Cachex` to store session IDs with a TTL of 5 minutes 66 | - Cache key format: `"backup_session:#{session_id}"` 67 | 68 | ### Frontend Components 69 | 70 | 1. **API Utility**: `backupApi` in `api.js` 71 | 72 | - `getDownloadLink()`: Makes an authenticated request to get a secure download link 73 | - `download()`: Gets a secure link and opens it in a new tab/window 74 | 75 | 2. **UI Component**: "Export Routes as JSON" button in `Settings.jsx` 76 | - Located in the "Routes" tab of the Settings page 77 | - Calls `backupApi.download()` when clicked 78 | - Shows success/error messages to the user 79 | 80 | ## Data Format 81 | 82 | The downloaded backup file is a JSON file containing all routes with their destinations. The file is named `hydra-routes-MM-DD-YY-HH:MM:SS.json` (with a timestamp) and has the following structure: 83 | 84 | ```json 85 | [ 86 | { 87 | "id": "route-uuid", 88 | "name": "Route Name", 89 | "enabled": true, 90 | "status": "started", 91 | "created_at": "2023-01-01T00:00:00Z", 92 | "updated_at": "2023-01-02T00:00:00Z", 93 | "destinations": [ 94 | { 95 | "id": "destination-uuid", 96 | "route_id": "route-uuid", 97 | "name": "Destination Name", 98 | "enabled": true, 99 | "created_at": "2023-01-01T00:00:00Z", 100 | "updated_at": "2023-01-02T00:00:00Z" 101 | } 102 | ] 103 | } 104 | ] 105 | ``` 106 | 107 | ## Future Enhancements 108 | 109 | Potential future enhancements for the backup feature: 110 | 111 | 1. **Scheduled Backups**: Automatically create backups on a schedule 112 | 2. **Backup History**: Keep a history of backups with the ability to restore from any point 113 | 3. **Selective Backup**: Allow users to select specific routes to back up 114 | 4. **Different Formats**: Support additional formats like YAML or CSV 115 | -------------------------------------------------------------------------------- /docs/backup-restore.md: -------------------------------------------------------------------------------- 1 | # Backup and Restore System 2 | 3 | ## Overview 4 | 5 | The Backup and Restore system allows users to create complete system backups and restore them when needed. This feature ensures data safety and provides disaster recovery capabilities for the Hydra SRT system. 6 | 7 | ## Features 8 | 9 | The system provides two main functions: 10 | 11 | 1. **System Backup**: Create a complete binary backup of the entire system configuration 12 | 2. **System Restore**: Restore the system from a previously created backup file 13 | 14 | ## How It Works 15 | 16 | ### Backup Process 17 | 18 | 1. **User Initiates Backup**: 19 | 20 | - User navigates to the Settings page and clicks the "Download Backup" button 21 | - Frontend makes an authenticated API request to get a secure download link 22 | 23 | 2. **Backend Creates Link**: 24 | 25 | - Backend generates a unique session ID (UUID) 26 | - Session ID is stored in cache with a 5-minute expiration 27 | - Backend returns a download URL containing the session ID 28 | 29 | 3. **Frontend Opens Download Link**: 30 | 31 | - Frontend receives the URL and opens it in a new browser tab/window 32 | 33 | 4. **Backend Processes Download Request**: 34 | - Backend verifies the session ID is valid by checking the cache 35 | - If valid, the backend generates a binary backup file containing all system data 36 | - The session ID is deleted from cache to prevent reuse (one-time use) 37 | - The backup file is sent to the browser as a download with a timestamped filename 38 | 39 | ### Restore Process 40 | 41 | 1. **User Initiates Restore**: 42 | 43 | - User navigates to the Settings page and clicks the "Select Backup File" button 44 | - User selects a backup file (.backup extension) from their local system 45 | - A confirmation dialog appears, warning about data replacement 46 | 47 | 2. **User Confirms Restore**: 48 | 49 | - After confirmation, the frontend sends the backup file to the backend 50 | - A loading notification appears during the process 51 | 52 | 3. **Backend Processes Restore**: 53 | 54 | - Backend receives the binary data and deserializes it 55 | - The system data is completely replaced with the data from the backup 56 | - Backend returns a success or error message 57 | 58 | 4. **Frontend Shows Result**: 59 | - Frontend displays a success or error notification based on the backend response 60 | 61 | ## Security Considerations 62 | 63 | 1. **Authentication**: Only authenticated users can perform backup and restore operations 64 | 2. **Data Integrity**: Backup files contain serialized Erlang terms that maintain data integrity 65 | 3. **Secure Download**: Backup downloads use secure, time-limited, one-time-use links 66 | 4. **Confirmation**: Restore operations require explicit user confirmation to prevent accidental data loss 67 | 68 | ## Implementation Details 69 | 70 | ### Backend Components 71 | 72 | 1. **Controller**: `HydraSrtWeb.BackupController` 73 | 74 | - `create_backup_download_link/2`: Generates and caches a session ID for binary backup 75 | - `download_backup/2`: Verifies the session ID and serves the binary backup file 76 | - `restore/2`: Processes the uploaded backup file and restores the system 77 | 78 | 2. **Database Module**: `HydraSrt.Db` 79 | 80 | - `backup/0`: Creates a binary backup of all system data 81 | - `restore_backup/1`: Restores the system from a binary backup 82 | 83 | 3. **Router Configuration**: 84 | - `/api/backup/create-backup-download-link`: Authenticated endpoint to get a download link 85 | - `/backup/:session_id/download_backup`: Public endpoint that serves the backup file 86 | - `/api/restore`: Special endpoint for handling binary data uploads 87 | 88 | ### Frontend Components 89 | 90 | 1. **API Utility**: `backupApi` in `api.js` 91 | 92 | - `getBackupDownloadLink()`: Makes an authenticated request to get a secure download link 93 | - `downloadBackup()`: Gets a secure link and opens it in a new tab/window 94 | - `restore(file)`: Uploads a backup file to the backend for restoration 95 | 96 | 2. **UI Component**: Settings page in `Settings.jsx` 97 | - "Download Backup" button for creating and downloading backups 98 | - "Select Backup File" button for initiating the restore process 99 | - Confirmation dialog before performing restore 100 | - Notifications for operation status (loading, success, error) 101 | 102 | ## Backup File Format 103 | 104 | The backup file is a binary file with the `.backup` extension containing serialized Erlang terms. The file represents the complete state of the Khepri database, including all routes, destinations, and system configuration. 105 | 106 | The filename format is: `hydra-srt-MM-DD-YY-HH:MM:SS.backup` where the timestamp represents the creation time. 107 | 108 | ## Future Enhancements 109 | 110 | Potential future enhancements for the backup and restore system: 111 | 112 | 1. **Scheduled Backups**: Automatically create backups on a schedule 113 | 2. **Backup History**: Keep a history of backups with the ability to restore from any point 114 | 3. **Selective Restore**: Allow users to select specific components to restore 115 | 4. **Cloud Storage**: Integrate with cloud storage providers for backup storage 116 | 5. **Backup Encryption**: Add encryption to backup files for additional security 117 | -------------------------------------------------------------------------------- /grafana/provisioning/datasources/victoriametrics.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: VictoriaMetrics 5 | type: prometheus 6 | access: proxy 7 | url: http://victoriametrics:8428 8 | isDefault: true 9 | editable: true 10 | jsonData: 11 | timeInterval: 15s 12 | queryTimeout: 120s 13 | httpMethod: POST 14 | -------------------------------------------------------------------------------- /lib/hydra_srt.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt do 2 | @moduledoc false 3 | require Logger 4 | alias HydraSrt.Db 5 | 6 | @spec start_route(String.t()) :: {:ok, pid()} | {:error, term()} 7 | def start_route(id) do 8 | DynamicSupervisor.start_child( 9 | {:via, PartitionSupervisor, {HydraSrt.DynamicSupervisor, id}}, 10 | {HydraSrt.RoutesSupervisor, %{id: id}} 11 | ) 12 | end 13 | 14 | @spec get_route(String.t()) :: {:ok, pid()} | {:error, term()} 15 | def get_route(id) do 16 | case :syn.lookup(:routes, id) do 17 | {pid, _} when is_pid(pid) -> {:ok, pid} 18 | :undefined -> {:error, :not_found} 19 | end 20 | end 21 | 22 | @spec stop_route(String.t()) :: :ok | {:error, term()} 23 | def stop_route(id) do 24 | case get_route(id) do 25 | {:ok, pid} -> 26 | Supervisor.stop(pid, :normal) 27 | 28 | other -> 29 | HydraSrt.set_route_status(id, "stopped") 30 | other 31 | end 32 | end 33 | 34 | @spec restart_route(String.t()) :: {:ok, term()} | {:error, term()} 35 | def restart_route(id) do 36 | case stop_route(id) do 37 | {:error, reason} -> 38 | Logger.warning("Attempt to restart route #{id}, but: #{inspect(reason)}") 39 | 40 | _ -> 41 | nil 42 | end 43 | 44 | with {:ok, _pid} <- start_route(id) do 45 | {:ok, :restarted} 46 | end 47 | end 48 | 49 | @spec set_route_status(String.t(), String.t()) :: {:ok, map()} | {:error, term()} 50 | def set_route_status(id, status) do 51 | with {:ok, route} <- Db.update_route(id, %{"status" => status}) do 52 | {:ok, route} 53 | end 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /lib/hydra_srt/api.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Api do 2 | @moduledoc """ 3 | The Api context. 4 | """ 5 | 6 | import Ecto.Query, warn: false 7 | alias HydraSrt.Repo 8 | 9 | alias HydraSrt.Api.Route 10 | 11 | @doc """ 12 | Returns the list of routes. 13 | 14 | ## Examples 15 | 16 | iex> list_routes() 17 | [%Route{}, ...] 18 | 19 | """ 20 | def list_routes do 21 | Repo.all(Route) 22 | end 23 | 24 | @doc """ 25 | Gets a single route. 26 | 27 | Raises `Ecto.NoResultsError` if the Route does not exist. 28 | 29 | ## Examples 30 | 31 | iex> get_route!(123) 32 | %Route{} 33 | 34 | iex> get_route!(456) 35 | ** (Ecto.NoResultsError) 36 | 37 | """ 38 | def get_route!(id), do: Repo.get!(Route, id) 39 | 40 | @doc """ 41 | Creates a route. 42 | 43 | ## Examples 44 | 45 | iex> create_route(%{field: value}) 46 | {:ok, %Route{}} 47 | 48 | iex> create_route(%{field: bad_value}) 49 | {:error, %Ecto.Changeset{}} 50 | 51 | """ 52 | def create_route(attrs \\ %{}) do 53 | %Route{} 54 | |> Route.changeset(attrs) 55 | |> Repo.insert() 56 | end 57 | 58 | @doc """ 59 | Updates a route. 60 | 61 | ## Examples 62 | 63 | iex> update_route(route, %{field: new_value}) 64 | {:ok, %Route{}} 65 | 66 | iex> update_route(route, %{field: bad_value}) 67 | {:error, %Ecto.Changeset{}} 68 | 69 | """ 70 | def update_route(%Route{} = route, attrs) do 71 | route 72 | |> Route.changeset(attrs) 73 | |> Repo.update() 74 | end 75 | 76 | @doc """ 77 | Deletes a route. 78 | 79 | ## Examples 80 | 81 | iex> delete_route(route) 82 | {:ok, %Route{}} 83 | 84 | iex> delete_route(route) 85 | {:error, %Ecto.Changeset{}} 86 | 87 | """ 88 | def delete_route(%Route{} = route) do 89 | Repo.delete(route) 90 | end 91 | 92 | @doc """ 93 | Returns an `%Ecto.Changeset{}` for tracking route changes. 94 | 95 | ## Examples 96 | 97 | iex> change_route(route) 98 | %Ecto.Changeset{data: %Route{}} 99 | 100 | """ 101 | def change_route(%Route{} = route, attrs \\ %{}) do 102 | Route.changeset(route, attrs) 103 | end 104 | 105 | alias HydraSrt.Api.Destination 106 | 107 | @doc """ 108 | Returns the list of destinations. 109 | 110 | ## Examples 111 | 112 | iex> list_destinations() 113 | [%Destination{}, ...] 114 | 115 | """ 116 | def list_destinations do 117 | Repo.all(Destination) 118 | end 119 | 120 | @doc """ 121 | Gets a single destination. 122 | 123 | Raises `Ecto.NoResultsError` if the Destination does not exist. 124 | 125 | ## Examples 126 | 127 | iex> get_destination!(123) 128 | %Destination{} 129 | 130 | iex> get_destination!(456) 131 | ** (Ecto.NoResultsError) 132 | 133 | """ 134 | def get_destination!(id), do: Repo.get!(Destination, id) 135 | 136 | @doc """ 137 | Creates a destination. 138 | 139 | ## Examples 140 | 141 | iex> create_destination(%{field: value}) 142 | {:ok, %Destination{}} 143 | 144 | iex> create_destination(%{field: bad_value}) 145 | {:error, %Ecto.Changeset{}} 146 | 147 | """ 148 | def create_destination(attrs \\ %{}) do 149 | %Destination{} 150 | |> Destination.changeset(attrs) 151 | |> Repo.insert() 152 | end 153 | 154 | @doc """ 155 | Updates a destination. 156 | 157 | ## Examples 158 | 159 | iex> update_destination(destination, %{field: new_value}) 160 | {:ok, %Destination{}} 161 | 162 | iex> update_destination(destination, %{field: bad_value}) 163 | {:error, %Ecto.Changeset{}} 164 | 165 | """ 166 | def update_destination(%Destination{} = destination, attrs) do 167 | destination 168 | |> Destination.changeset(attrs) 169 | |> Repo.update() 170 | end 171 | 172 | @doc """ 173 | Deletes a destination. 174 | 175 | ## Examples 176 | 177 | iex> delete_destination(destination) 178 | {:ok, %Destination{}} 179 | 180 | iex> delete_destination(destination) 181 | {:error, %Ecto.Changeset{}} 182 | 183 | """ 184 | def delete_destination(%Destination{} = destination) do 185 | Repo.delete(destination) 186 | end 187 | 188 | @doc """ 189 | Returns an `%Ecto.Changeset{}` for tracking destination changes. 190 | 191 | ## Examples 192 | 193 | iex> change_destination(destination) 194 | %Ecto.Changeset{data: %Destination{}} 195 | 196 | """ 197 | def change_destination(%Destination{} = destination, attrs \\ %{}) do 198 | Destination.changeset(destination, attrs) 199 | end 200 | end 201 | -------------------------------------------------------------------------------- /lib/hydra_srt/api/destination.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Api.Destination do 2 | use Ecto.Schema 3 | import Ecto.Changeset 4 | 5 | @primary_key {:id, :binary_id, autogenerate: true} 6 | @foreign_key_type :binary_id 7 | schema "destinations" do 8 | field :alias, :string 9 | field :enabled, :boolean, default: false 10 | field :name, :string 11 | field :status, :string 12 | field :started_at, :utc_datetime 13 | field :stopped_at, :utc_datetime 14 | 15 | timestamps(type: :utc_datetime) 16 | end 17 | 18 | @doc false 19 | def changeset(destination, attrs) do 20 | destination 21 | |> cast(attrs, [:enabled, :name, :alias, :status, :started_at, :stopped_at]) 22 | |> validate_required([:enabled, :name, :alias, :status, :started_at, :stopped_at]) 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /lib/hydra_srt/api/route.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Api.Route do 2 | use Ecto.Schema 3 | import Ecto.Changeset 4 | 5 | @primary_key {:id, :binary_id, autogenerate: true} 6 | @foreign_key_type :binary_id 7 | schema "routes" do 8 | field :alias, :string 9 | field :enabled, :boolean, default: false 10 | field :name, :string 11 | field :status, :string 12 | field :started_at, :utc_datetime 13 | field :source, :map 14 | field :destinations, :map 15 | field :stopped_at, :utc_datetime 16 | 17 | timestamps(type: :utc_datetime) 18 | end 19 | 20 | @doc false 21 | def changeset(route, attrs) do 22 | route 23 | |> cast(attrs, [ 24 | :enabled, 25 | :name, 26 | :alias, 27 | :status, 28 | :source, 29 | :destinations, 30 | :started_at, 31 | :stopped_at 32 | ]) 33 | |> validate_required([:enabled, :name, :alias, :status, :started_at, :stopped_at]) 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /lib/hydra_srt/application.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Application do 2 | @moduledoc false 3 | use Application 4 | require Logger 5 | 6 | @impl true 7 | def start(_type, _args) do 8 | :ok = 9 | :gen_event.swap_sup_handler( 10 | :erl_signal_server, 11 | {:erl_signal_handler, []}, 12 | {HydraSrt.SignalHandler, []} 13 | ) 14 | 15 | khepri_data_dir = System.get_env("DATABASE_DATA_DIR", "#{File.cwd!()}/khepri##{node()}") 16 | Logger.notice("Database directory: #{khepri_data_dir}") 17 | Logger.notice("Starting database: #{inspect(:khepri.start(khepri_data_dir))}") 18 | 19 | :syn.add_node_to_scopes([:routes]) 20 | runtime_schedulers = System.schedulers_online() 21 | Logger.info("Runtime schedulers: #{runtime_schedulers}") 22 | 23 | {:ok, ranch_listener} = 24 | :ranch.start_listener( 25 | :hydra_unix_sock, 26 | :ranch_tcp, 27 | %{ 28 | max_connections: String.to_integer(System.get_env("MAX_CONNECTIONS") || "75000"), 29 | num_acceptors: String.to_integer(System.get_env("NUM_ACCEPTORS") || "100"), 30 | socket_opts: [ 31 | ip: {:local, "/tmp/hydra_unix_sock"}, 32 | port: 0, 33 | keepalive: true 34 | ] 35 | }, 36 | HydraSrt.UnixSockHandler, 37 | %{} 38 | ) 39 | 40 | Logger.info("Ranch listener: #{inspect(ranch_listener)}") 41 | 42 | children = [ 43 | HydraSrt.ErlSysMon, 44 | {PartitionSupervisor, 45 | child_spec: DynamicSupervisor, strategy: :one_for_one, name: HydraSrt.DynamicSupervisor}, 46 | {Registry, 47 | keys: :unique, name: HydraSrt.Registry.MsgHandlers, partitions: runtime_schedulers}, 48 | HydraSrtWeb.Telemetry, 49 | # HydraSrt.Repo, 50 | # {Ecto.Migrator, 51 | # repos: Application.fetch_env!(:hydra_srt, :ecto_repos), skip: skip_migrations?()}, 52 | {Phoenix.PubSub, name: HydraSrt.PubSub, partitions: runtime_schedulers}, 53 | HydraSrtWeb.Endpoint, 54 | HydraSrt.Metrics.Connection 55 | ] 56 | 57 | # start Cachex only if the node uses names, this is necessary for test setup 58 | children = 59 | if node() != :nonode@nohost do 60 | [{Cachex, name: HydraSrt.Cache} | children] 61 | else 62 | children 63 | end 64 | 65 | opts = [strategy: :one_for_one, name: HydraSrt.Supervisor] 66 | Supervisor.start_link(children, opts) 67 | end 68 | 69 | @impl true 70 | def config_change(changed, _new, removed) do 71 | HydraSrtWeb.Endpoint.config_change(changed, removed) 72 | :ok 73 | end 74 | 75 | @impl true 76 | def stop(_state) do 77 | Logger.info("Stopping application") 78 | end 79 | end 80 | -------------------------------------------------------------------------------- /lib/hydra_srt/db.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Db do 2 | @moduledoc false 3 | require Logger 4 | 5 | @spec create_route(map, binary | nil) :: {:ok, map} | {:error, any} 6 | def create_route(data, id \\ nil) when is_map(data) do 7 | id = if id, do: id, else: UUID.uuid1() 8 | 9 | update = %{ 10 | "id" => id, 11 | "created_at" => now(), 12 | "updated_at" => now() 13 | } 14 | 15 | with :ok <- :khepri.put(["routes", id], Map.merge(data, update)), 16 | {:ok, result} <- get_route(id) do 17 | {:ok, result} 18 | else 19 | other -> 20 | Logger.error("Failed to create route: #{inspect(other)}") 21 | {:error, other} 22 | end 23 | end 24 | 25 | @spec get_route(String.t(), boolean) :: {:ok, map} | {:error, any} 26 | def get_route(id, include_dest? \\ false) when is_binary(id) do 27 | route = :khepri.get!(["routes", id]) 28 | 29 | route = 30 | if include_dest? do 31 | destinations_list = 32 | :khepri.get_many!("routes/#{id}/destinations/*") 33 | |> Enum.reduce([], fn 34 | {["routes", _, "destinations", _dest_id], dest}, acc when is_map(dest) -> 35 | [dest | acc] 36 | 37 | _, acc -> 38 | acc 39 | end) 40 | 41 | Map.put(route, "destinations", destinations_list) 42 | else 43 | route 44 | end 45 | 46 | {:ok, route} 47 | end 48 | 49 | @spec update_route(String.t(), map) :: {:ok, map} | {:error, any} 50 | def update_route(id, data) do 51 | path = ["routes", id] 52 | now = now() 53 | 54 | :khepri.transaction(fn -> 55 | case :khepri_tx.get(path) do 56 | {:ok, route} -> 57 | new_route = Map.merge(route, Map.put(data, "updated_at", now)) 58 | 59 | :ok = :khepri_tx.put(path, new_route) 60 | :khepri_tx.get(path) 61 | 62 | _ -> 63 | {:error, :route_not_found} 64 | end 65 | end) 66 | |> case do 67 | {:ok, result} -> result 68 | other -> {:error, inspect(other)} 69 | end 70 | end 71 | 72 | @spec delete_route(String.t()) :: [:ok] | [{:error, any}] 73 | def delete_route(id) when is_binary(id) do 74 | [:khepri.delete(["routes", id]), :khepri.delete_many("routes/#{id}/destinations/*")] 75 | end 76 | 77 | @spec create_destination(String.t(), map, binary | nil) :: {:ok, map} | {:error, any} 78 | def create_destination(route_id, data, id \\ nil) do 79 | id = if id, do: id, else: UUID.uuid1() 80 | 81 | data = 82 | Map.merge(data, %{ 83 | "id" => id, 84 | "route_id" => route_id, 85 | "created_at" => now(), 86 | "updated_at" => now() 87 | }) 88 | 89 | with :ok <- :khepri.put(["routes", route_id, "destinations", id], data), 90 | {:ok, result} <- get_destination(route_id, id) do 91 | {:ok, result} 92 | else 93 | other -> 94 | Logger.error("Failed to create route: #{inspect(other)}") 95 | {:error, other} 96 | end 97 | end 98 | 99 | @spec get_destination(String.t(), String.t()) :: {:ok, map} | {:error, any} 100 | def get_destination(route_id, id) when is_binary(route_id) and is_binary(id) do 101 | :khepri.get(["routes", route_id, "destinations", id]) 102 | end 103 | 104 | @spec update_destination(String.t(), String.t(), map) :: {:ok, map} | {:error, any} 105 | def update_destination(route_id, id, data) do 106 | path = ["routes", route_id, "destinations", id] 107 | now = now() 108 | 109 | :khepri.transaction(fn -> 110 | case :khepri_tx.get(path) do 111 | {:ok, destination} -> 112 | new_destination = Map.merge(destination, Map.put(data, "updated_at", now)) 113 | 114 | :ok = :khepri_tx.put(path, new_destination) 115 | :khepri_tx.get(path) 116 | 117 | _ -> 118 | {:error, :destination_not_found} 119 | end 120 | end) 121 | |> case do 122 | {:ok, result} -> result 123 | other -> {:error, inspect(other)} 124 | end 125 | end 126 | 127 | def del_destination(route_id, id) when is_binary(route_id) and is_binary(id) do 128 | :khepri.delete(["routes", route_id, "destinations", id]) 129 | end 130 | 131 | @spec get_all_routes(boolean, binary) :: {:ok, list(map)} | {:error, any} 132 | def get_all_routes(with_destinations \\ false, sort_by \\ "created_at") do 133 | case :khepri.get_many("routes/*") do 134 | {:ok, routes} -> 135 | routes = 136 | routes 137 | |> Enum.map(fn {_path, route} -> route end) 138 | |> Enum.filter(&is_map/1) 139 | |> Enum.map(fn route -> 140 | if with_destinations do 141 | route_id = route["id"] 142 | 143 | destinations_list = 144 | :khepri.get_many!("routes/#{route_id}/destinations/*") 145 | |> Enum.reduce([], fn 146 | {["routes", _, "destinations", _dest_id], dest}, acc when is_map(dest) -> 147 | [dest | acc] 148 | 149 | _, acc -> 150 | acc 151 | end) 152 | 153 | Map.put(route, "destinations", destinations_list) 154 | else 155 | route 156 | end 157 | end) 158 | |> Enum.sort_by(fn route -> route[sort_by] end, DateTime) 159 | |> Enum.reverse() 160 | 161 | {:ok, routes} 162 | 163 | other -> 164 | Logger.error("Failed to get all routes: #{inspect(other)}") 165 | other 166 | end 167 | end 168 | 169 | def get_all_destinations(route_id) when is_binary(route_id) do 170 | :khepri.get_many("routes/#{route_id}/destinations/*") 171 | end 172 | 173 | @spec backup() :: {:ok, binary} | {:error, any} 174 | def backup() do 175 | case :khepri.get_many("**") do 176 | {:ok, data} -> 177 | binary_data = :erlang.term_to_binary(data) 178 | {:ok, binary_data} 179 | 180 | error -> 181 | Logger.error("Failed to create backup: #{inspect(error)}") 182 | {:error, error} 183 | end 184 | end 185 | 186 | @spec restore_backup(binary) :: :ok | {:error, any} 187 | def restore_backup(binary_data) when is_binary(binary_data) do 188 | Logger.warning("Restoring backup") 189 | 190 | try do 191 | data = :erlang.binary_to_term(binary_data) 192 | 193 | :khepri.delete_many("**") 194 | 195 | Enum.each(data, fn {path, value} -> 196 | :khepri.put(path, value) 197 | end) 198 | 199 | :ok 200 | rescue 201 | e -> 202 | Logger.error("Failed to restore backup: #{inspect(e)}") 203 | {:error, e} 204 | end 205 | end 206 | 207 | defp now, do: DateTime.utc_now() 208 | end 209 | -------------------------------------------------------------------------------- /lib/hydra_srt/erl_sys_mon.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.ErlSysMon do 2 | @moduledoc false 3 | 4 | use GenServer 5 | require Logger 6 | 7 | def start_link(args) do 8 | GenServer.start_link(__MODULE__, args, name: __MODULE__) 9 | end 10 | 11 | def init(_args) do 12 | Logger.info("Starting #{__MODULE__}") 13 | 14 | :erlang.system_monitor(self(), [ 15 | :busy_dist_port, 16 | :busy_port, 17 | {:long_gc, 250}, 18 | {:long_schedule, 100} 19 | ]) 20 | 21 | {:ok, []} 22 | end 23 | 24 | def handle_info(msg, state) do 25 | Logger.warning("#{__MODULE__} message: #{inspect(msg, pretty: true)}") 26 | 27 | {:noreply, state} 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /lib/hydra_srt/helpers.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Helpers do 2 | @moduledoc false 3 | 4 | @doc """ 5 | Sets the maximum heap size for the current process. The `max_heap_size` parameter is in megabytes. 6 | 7 | ## Parameters 8 | 9 | - `max_heap_size`: The maximum heap size in megabytes. 10 | """ 11 | @spec set_max_heap_size(pos_integer()) :: map() 12 | def set_max_heap_size(max_heap_size) do 13 | max_heap_words = div(max_heap_size * 1024 * 1024, :erlang.system_info(:wordsize)) 14 | Process.flag(:max_heap_size, %{size: max_heap_words}) 15 | end 16 | 17 | def sys_kill(process_id) do 18 | System.cmd("kill", ["-9", "#{process_id}"]) 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /lib/hydra_srt/metrics.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Metrics do 2 | @moduledoc """ 3 | Helper functions for working with metrics. 4 | """ 5 | 6 | require Logger 7 | 8 | alias HydraSrt.Metrics.Connection 9 | 10 | def event(k, v, tags \\ %{}, ts \\ System.system_time()) do 11 | # Logger.debug("Event: #{k} #{inspect(v)}") 12 | 13 | Connection.write(%{ 14 | measurement: "hydra_srt_routes_stats", 15 | fields: %{k => v}, 16 | tags: tags, 17 | timestamp: ts 18 | }) 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /lib/hydra_srt/metrics/connection.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Metrics.Connection do 2 | use Instream.Connection, otp_app: :hydra_srt 3 | end 4 | -------------------------------------------------------------------------------- /lib/hydra_srt/monitoring/os_mon.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Monitoring.OsMon do 2 | @moduledoc false 3 | 4 | require Logger 5 | 6 | @spec ram_usage() :: float() 7 | def ram_usage do 8 | mem = :memsup.get_system_memory_data() 9 | 100 - mem[:free_memory] / mem[:total_memory] * 100 10 | end 11 | 12 | @spec cpu_la() :: %{avg1: float(), avg5: float(), avg15: float()} 13 | def cpu_la do 14 | %{ 15 | avg1: :cpu_sup.avg1() / 256, 16 | avg5: :cpu_sup.avg5() / 256, 17 | avg15: :cpu_sup.avg15() / 256 18 | } 19 | end 20 | 21 | @spec cpu_util() :: float() | {:error, term()} 22 | def cpu_util do 23 | :cpu_sup.util() 24 | end 25 | 26 | @spec swap_usage() :: float() | nil 27 | def swap_usage do 28 | mem = :memsup.get_system_memory_data() 29 | 30 | with total_swap when is_integer(total_swap) <- Keyword.get(mem, :total_swap), 31 | free_swap when is_integer(free_swap) <- Keyword.get(mem, :free_swap) do 32 | 100 - free_swap / total_swap * 100 33 | else 34 | _ -> nil 35 | end 36 | end 37 | 38 | @doc """ 39 | Get all system stats in a single call 40 | """ 41 | @spec get_all_stats() :: %{ 42 | cpu: float() | {:error, term()}, 43 | ram: float(), 44 | swap: float() | nil, 45 | cpu_la: %{avg1: float(), avg5: float(), avg15: float()} 46 | } 47 | def get_all_stats do 48 | %{ 49 | cpu: cpu_util(), 50 | ram: ram_usage(), 51 | swap: swap_usage(), 52 | cpu_la: cpu_la() 53 | } 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /lib/hydra_srt/process_monitor.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.ProcessMonitor do 2 | @moduledoc false 3 | 4 | def list_pipeline_processes do 5 | case :os.type() do 6 | {:unix, :darwin} -> list_pipeline_processes_darwin() 7 | {:unix, :linux} -> list_pipeline_processes_linux() 8 | _ -> {:error, "Unsupported operating system"} 9 | end 10 | end 11 | 12 | def list_pipeline_processes_detailed do 13 | case :os.type() do 14 | {:unix, :darwin} -> list_pipeline_processes_detailed_darwin() 15 | {:unix, :linux} -> list_pipeline_processes_detailed_linux() 16 | _ -> {:error, "Unsupported operating system"} 17 | end 18 | end 19 | 20 | defp list_pipeline_processes_darwin do 21 | {output, 0} = System.cmd("ps", ["-eo", "pid,%cpu,%mem,vsz,rss,user,lstart,command", "-ww"]) 22 | 23 | output 24 | |> String.split("\n", trim: true) 25 | |> Enum.drop(1) 26 | |> Enum.filter(&String.contains?(&1, "hydra_srt_pipeline")) 27 | |> Enum.map(&parse_process_darwin/1) 28 | end 29 | 30 | defp list_pipeline_processes_detailed_darwin do 31 | {output, 0} = 32 | System.cmd("ps", [ 33 | "-eo", 34 | "pid,%cpu,%mem,vsz,rss,time,state,ppid,user,lstart,command", 35 | "-ww" 36 | ]) 37 | 38 | output 39 | |> String.split("\n", trim: true) 40 | |> Enum.drop(1) 41 | |> Enum.filter(&String.contains?(&1, "hydra_srt_pipeline")) 42 | |> Enum.map(&parse_process_detailed_darwin/1) 43 | end 44 | 45 | defp parse_process_darwin(line) do 46 | parts = line |> String.split(" ", trim: true) 47 | 48 | pid = Enum.at(parts, 0) |> String.to_integer() 49 | cpu = Enum.at(parts, 1) <> "%" 50 | memory_percent = Enum.at(parts, 2) <> "%" 51 | vsz = Enum.at(parts, 3) |> String.to_integer() 52 | rss = Enum.at(parts, 4) |> String.to_integer() 53 | user = Enum.at(parts, 5) 54 | 55 | memory_bytes = rss * 1024 56 | swap_bytes = max(0, (vsz - rss) * 1024) 57 | 58 | swap_percent = 59 | if vsz > 0, do: "#{Float.round(swap_bytes / (1024 * 1024 * 1024) * 100, 1)}%", else: "0.0%" 60 | 61 | start_time_parts = Enum.slice(parts, 6..11) 62 | start_time = Enum.join(start_time_parts, " ") 63 | 64 | command_parts = Enum.slice(parts, 12..(length(parts) - 1)) 65 | command = Enum.join(command_parts, " ") 66 | 67 | %{ 68 | pid: pid, 69 | cpu: cpu, 70 | memory: format_memory(memory_bytes), 71 | memory_percent: memory_percent, 72 | memory_bytes: memory_bytes, 73 | swap_percent: swap_percent, 74 | swap_bytes: swap_bytes, 75 | user: user, 76 | start_time: start_time, 77 | command: command 78 | } 79 | end 80 | 81 | defp parse_process_detailed_darwin(line) do 82 | parts = line |> String.split(" ", trim: true) 83 | 84 | pid = Enum.at(parts, 0) |> String.to_integer() 85 | cpu = Enum.at(parts, 1) <> "%" 86 | memory_percent = Enum.at(parts, 2) <> "%" 87 | vsz = Enum.at(parts, 3) |> String.to_integer() 88 | rss = Enum.at(parts, 4) |> String.to_integer() 89 | 90 | memory_bytes = rss * 1024 91 | swap_bytes = max(0, (vsz - rss) * 1024) 92 | 93 | swap_percent = 94 | if vsz > 0, do: "#{Float.round(swap_bytes / (1024 * 1024 * 1024) * 100, 1)}%", else: "0.0%" 95 | 96 | virtual_memory = format_memory(vsz * 1024) 97 | resident_memory = format_memory(memory_bytes) 98 | 99 | cpu_time = Enum.at(parts, 5) 100 | state = Enum.at(parts, 6) 101 | ppid = Enum.at(parts, 7) |> String.to_integer() 102 | user = Enum.at(parts, 8) 103 | 104 | start_time_parts = Enum.slice(parts, 9..14) 105 | start_time = Enum.join(start_time_parts, " ") 106 | 107 | command_parts = Enum.slice(parts, 15..(length(parts) - 1)) 108 | command = Enum.join(command_parts, " ") 109 | 110 | %{ 111 | pid: pid, 112 | cpu: cpu, 113 | memory_percent: memory_percent, 114 | memory_bytes: memory_bytes, 115 | virtual_memory: virtual_memory, 116 | resident_memory: resident_memory, 117 | swap_percent: swap_percent, 118 | swap_bytes: swap_bytes, 119 | cpu_time: cpu_time, 120 | state: state, 121 | ppid: ppid, 122 | user: user, 123 | start_time: start_time, 124 | command: command 125 | } 126 | end 127 | 128 | defp list_pipeline_processes_linux do 129 | {output, 0} = 130 | System.cmd("ps", ["-eo", "pid,%cpu,%mem,vsz,rss,user,lstart,cmd", "--sort=-%cpu"]) 131 | 132 | output 133 | |> String.split("\n", trim: true) 134 | |> Enum.drop(1) 135 | |> Enum.filter(&String.contains?(&1, "hydra_srt_pipeline")) 136 | |> Enum.map(&parse_process_linux/1) 137 | end 138 | 139 | defp list_pipeline_processes_detailed_linux do 140 | {output, 0} = 141 | System.cmd("ps", [ 142 | "-eo", 143 | "pid,%cpu,%mem,vsz,rss,time,s,ppid,user,lstart,cmd", 144 | "--sort=-%cpu" 145 | ]) 146 | 147 | output 148 | |> String.split("\n", trim: true) 149 | |> Enum.drop(1) 150 | |> Enum.filter(&String.contains?(&1, "hydra_srt_pipeline")) 151 | |> Enum.map(&parse_process_detailed_linux/1) 152 | end 153 | 154 | defp parse_process_linux(line) do 155 | parts = line |> String.split(" ", trim: true) 156 | 157 | pid = Enum.at(parts, 0) |> String.to_integer() 158 | cpu = Enum.at(parts, 1) <> "%" 159 | memory_percent = Enum.at(parts, 2) <> "%" 160 | vsz = Enum.at(parts, 3) |> String.to_integer() 161 | rss = Enum.at(parts, 4) |> String.to_integer() 162 | user = Enum.at(parts, 5) 163 | 164 | memory_bytes = rss * 1024 165 | swap_bytes = max(0, (vsz - rss) * 1024) 166 | 167 | swap_percent = 168 | if vsz > 0, do: "#{Float.round(swap_bytes / (1024 * 1024 * 1024) * 100, 1)}%", else: "0.0%" 169 | 170 | start_time_parts = Enum.slice(parts, 6..11) 171 | start_time = Enum.join(start_time_parts, " ") 172 | 173 | command_parts = Enum.slice(parts, 12..(length(parts) - 1)) 174 | command = Enum.join(command_parts, " ") 175 | 176 | %{ 177 | pid: pid, 178 | cpu: cpu, 179 | memory: format_memory(memory_bytes), 180 | memory_percent: memory_percent, 181 | memory_bytes: memory_bytes, 182 | swap_percent: swap_percent, 183 | swap_bytes: swap_bytes, 184 | user: user, 185 | start_time: start_time, 186 | command: command 187 | } 188 | end 189 | 190 | defp parse_process_detailed_linux(line) do 191 | parts = line |> String.split(" ", trim: true) 192 | 193 | pid = Enum.at(parts, 0) |> String.to_integer() 194 | cpu = Enum.at(parts, 1) <> "%" 195 | memory_percent = Enum.at(parts, 2) <> "%" 196 | vsz = Enum.at(parts, 3) |> String.to_integer() 197 | rss = Enum.at(parts, 4) |> String.to_integer() 198 | 199 | memory_bytes = rss * 1024 200 | swap_bytes = max(0, (vsz - rss) * 1024) 201 | 202 | swap_percent = 203 | if vsz > 0, do: "#{Float.round(swap_bytes / (1024 * 1024 * 1024) * 100, 1)}%", else: "0.0%" 204 | 205 | virtual_memory = format_memory(vsz * 1024) 206 | resident_memory = format_memory(memory_bytes) 207 | 208 | cpu_time = Enum.at(parts, 5) 209 | state = Enum.at(parts, 6) 210 | ppid = Enum.at(parts, 7) |> String.to_integer() 211 | user = Enum.at(parts, 8) 212 | 213 | start_time_parts = Enum.slice(parts, 9..14) 214 | start_time = Enum.join(start_time_parts, " ") 215 | 216 | command_parts = Enum.slice(parts, 15..(length(parts) - 1)) 217 | command = Enum.join(command_parts, " ") 218 | 219 | %{ 220 | pid: pid, 221 | cpu: cpu, 222 | memory_percent: memory_percent, 223 | memory_bytes: memory_bytes, 224 | virtual_memory: virtual_memory, 225 | resident_memory: resident_memory, 226 | swap_percent: swap_percent, 227 | swap_bytes: swap_bytes, 228 | cpu_time: cpu_time, 229 | state: state, 230 | ppid: ppid, 231 | user: user, 232 | start_time: start_time, 233 | command: command 234 | } 235 | end 236 | 237 | defp format_memory(bytes) when is_integer(bytes) do 238 | cond do 239 | bytes > 1_073_741_824 -> "#{Float.round(bytes / 1_073_741_824, 2)} GB" 240 | bytes > 1_048_576 -> "#{Float.round(bytes / 1_048_576, 2)} MB" 241 | bytes > 1_024 -> "#{Float.round(bytes / 1_024, 2)} KB" 242 | true -> "#{bytes} B" 243 | end 244 | end 245 | end 246 | -------------------------------------------------------------------------------- /lib/hydra_srt/release.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Release do 2 | @moduledoc """ 3 | Used for executing DB release tasks when run in production without Mix 4 | installed. 5 | """ 6 | @app :hydra_srt 7 | 8 | def migrate do 9 | load_app() 10 | 11 | for repo <- repos() do 12 | {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true)) 13 | end 14 | end 15 | 16 | def rollback(repo, version) do 17 | load_app() 18 | {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version)) 19 | end 20 | 21 | defp repos do 22 | Application.fetch_env!(@app, :ecto_repos) 23 | end 24 | 25 | defp load_app do 26 | Application.load(@app) 27 | end 28 | end 29 | -------------------------------------------------------------------------------- /lib/hydra_srt/repo.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Repo do 2 | use Ecto.Repo, 3 | otp_app: :hydra_srt, 4 | adapter: Ecto.Adapters.SQLite3 5 | end 6 | -------------------------------------------------------------------------------- /lib/hydra_srt/routes_supervisor.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.RoutesSupervisor do 2 | @moduledoc false 3 | use Supervisor 4 | 5 | require Logger 6 | alias HydraSrt.RouteHandler 7 | 8 | def start_link(args) do 9 | name = {:via, :syn, {:routes, args.id}} 10 | Supervisor.start_link(__MODULE__, args, name: name) 11 | end 12 | 13 | @impl true 14 | def init(args) do 15 | children = [ 16 | %{ 17 | id: {:route_handler, args.id}, 18 | start: {RouteHandler, :start_link, [args]}, 19 | restart: :transient, 20 | type: :worker 21 | } 22 | ] 23 | 24 | Supervisor.init(children, 25 | strategy: :one_for_all, 26 | max_restarts: 10, 27 | max_seconds: 60 28 | ) 29 | end 30 | 31 | def child_spec(args) do 32 | %{ 33 | id: args.id, 34 | start: {__MODULE__, :start_link, [args]}, 35 | restart: :transient 36 | } 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /lib/hydra_srt/signal_handler.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.SignalHandler do 2 | @moduledoc false 3 | 4 | @behaviour :gen_event 5 | require Logger 6 | 7 | @impl true 8 | def init(_) do 9 | Logger.info("#{__MODULE__} is being initialized...") 10 | {:ok, %{}} 11 | end 12 | 13 | @impl true 14 | def handle_event(signal, state) do 15 | Logger.warning("#{__MODULE__}: #{inspect(signal)} received") 16 | 17 | :erl_signal_handler.handle_event(signal, state) 18 | end 19 | 20 | @impl true 21 | defdelegate handle_info(info, state), to: :erl_signal_handler 22 | 23 | @impl true 24 | defdelegate handle_call(request, state), to: :erl_signal_handler 25 | end 26 | -------------------------------------------------------------------------------- /lib/hydra_srt/unix_sock_handler.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.UnixSockHandler do 2 | @moduledoc false 3 | 4 | require Logger 5 | 6 | @behaviour :ranch_protocol 7 | @behaviour :gen_statem 8 | 9 | alias HydraSrt.Helpers 10 | alias HydraSrt.Metrics 11 | alias HydraSrt.Db 12 | @impl true 13 | def start_link(ref, transport, opts) do 14 | Logger.debug( 15 | "Starting UnixSockHandler with ref: #{inspect(ref)}, transport: #{inspect(transport)}, opts: #{inspect(opts)}" 16 | ) 17 | 18 | pid = :proc_lib.spawn_link(__MODULE__, :init, [ref, transport, opts]) 19 | {:ok, pid} 20 | end 21 | 22 | @impl true 23 | def callback_mode, do: [:handle_event_function] 24 | 25 | @impl true 26 | def init(_), do: :ignore 27 | 28 | def init(ref, trans, _opts) do 29 | Process.flag(:trap_exit, true) 30 | Helpers.set_max_heap_size(90) 31 | 32 | {:ok, sock} = :ranch.handshake(ref) 33 | 34 | :ok = 35 | trans.setopts(sock, 36 | # mode: :binary, 37 | # packet: :raw, 38 | # recbuf: 8192, 39 | # sndbuf: 8192, 40 | # # backlog: 2048, 41 | # send_timeout: 120, 42 | # keepalive: true, 43 | # nodelay: true, 44 | # nopush: true, 45 | active: true 46 | ) 47 | 48 | data = %{ 49 | sock: sock, 50 | trans: trans, 51 | source_stream_id: nil, 52 | route_id: nil, 53 | route_record: nil 54 | } 55 | 56 | :gen_statem.enter_loop(__MODULE__, [hibernate_after: 5_000], :exchange, data) 57 | end 58 | 59 | @impl true 60 | def handle_event(:info, {:tcp, _port, "route_id:" <> route_id}, _state, data) do 61 | Logger.info("route_id: #{route_id}") 62 | 63 | route_record = 64 | case Db.get_route(route_id, true) do 65 | {:ok, record} -> 66 | Logger.info("route_record: #{inspect(record, pretty: true)}") 67 | record 68 | 69 | other -> 70 | Logger.error("Error getting route record: #{inspect(other)}") 71 | nil 72 | end 73 | 74 | {:keep_state, %{data | route_id: route_id, route_record: route_record}} 75 | end 76 | 77 | def handle_event( 78 | :info, 79 | {:tcp, _port, "{" <> _ = json}, 80 | _, 81 | %{route_record: %{"exportStats" => true}} = data 82 | ) do 83 | case Jason.decode(json) do 84 | {:ok, stats} -> 85 | try do 86 | stats_to_metrics(stats, data) 87 | rescue 88 | error -> 89 | Logger.error("Error processing stats: #{inspect(error)} #{inspect(json)}") 90 | end 91 | 92 | {error, _} -> 93 | Logger.error("Error decoding stats: #{inspect(error)} #{inspect(json)}") 94 | end 95 | 96 | :keep_state_and_data 97 | end 98 | 99 | def handle_event(:info, {:tcp, _port, "{" <> _}, _, _) do 100 | # ignore stats 101 | :keep_state_and_data 102 | end 103 | 104 | def handle_event(:info, {:tcp, _port, "stats_source_stream_id:" <> stream_id}, _state, data) do 105 | Logger.info("stats_source_stream_id: #{stream_id}") 106 | {:keep_state, %{data | source_stream_id: stream_id}} 107 | end 108 | 109 | def handle_event(type, content, state, data) do 110 | msg = [ 111 | {"type", type}, 112 | {"content", content}, 113 | {"state", state}, 114 | {"data", data} 115 | ] 116 | 117 | Logger.error("SocketHandler: Undefined msg: #{inspect(msg, pretty: true)}") 118 | 119 | :keep_state_and_data 120 | end 121 | 122 | @impl true 123 | def terminate(reason, _state, _data) do 124 | Logger.debug("SocketHandler: socket closed with reason #{inspect(reason)}") 125 | :ok 126 | end 127 | 128 | def stats_to_metrics(stats, data) do 129 | stats 130 | |> Map.keys() 131 | |> Enum.map(fn key -> 132 | cond do 133 | is_list(stats[key]) -> 134 | Enum.each(stats[key], fn item -> 135 | stats_to_metrics(item, data) 136 | end) 137 | 138 | is_map(stats[key]) -> 139 | stats_to_metrics(stats[key], data) 140 | 141 | true -> 142 | tags = %{ 143 | type: "source", 144 | route_id: data.route_id, 145 | route_name: data.route_record["name"], 146 | source_stream_id: data.source_stream_id 147 | } 148 | 149 | Metrics.event(norm_names(key), stats[key], tags) 150 | end 151 | end) 152 | end 153 | 154 | def norm_names(name) do 155 | name 156 | |> String.replace("-", "_") 157 | |> String.downcase() 158 | end 159 | 160 | ## Internal functions 161 | end 162 | -------------------------------------------------------------------------------- /lib/hydra_srt_web.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb do 2 | @moduledoc """ 3 | The entrypoint for defining your web interface, such 4 | as controllers, components, channels, and so on. 5 | 6 | This can be used in your application as: 7 | 8 | use HydraSrtWeb, :controller 9 | use HydraSrtWeb, :html 10 | 11 | The definitions below will be executed for every controller, 12 | component, etc, so keep them short and clean, focused 13 | on imports, uses and aliases. 14 | 15 | Do NOT define functions inside the quoted expressions 16 | below. Instead, define additional modules and import 17 | those modules here. 18 | """ 19 | 20 | def static_paths, 21 | do: ~w(assets fonts images js css favicon.ico robots.txt index.html logo.webp logo2.webp) 22 | 23 | def router do 24 | quote do 25 | use Phoenix.Router, helpers: false 26 | 27 | # Import common connection and controller functions to use in pipelines 28 | import Plug.Conn 29 | import Phoenix.Controller 30 | end 31 | end 32 | 33 | def channel do 34 | quote do 35 | use Phoenix.Channel 36 | end 37 | end 38 | 39 | def controller do 40 | quote do 41 | use Phoenix.Controller, 42 | formats: [:html, :json], 43 | layouts: [html: HydraSrtWeb.Layouts] 44 | 45 | import Plug.Conn 46 | 47 | unquote(verified_routes()) 48 | end 49 | end 50 | 51 | def verified_routes do 52 | quote do 53 | use Phoenix.VerifiedRoutes, 54 | endpoint: HydraSrtWeb.Endpoint, 55 | router: HydraSrtWeb.Router, 56 | statics: HydraSrtWeb.static_paths() 57 | end 58 | end 59 | 60 | @doc """ 61 | When used, dispatch to the appropriate controller/live_view/etc. 62 | """ 63 | defmacro __using__(which) when is_atom(which) do 64 | apply(__MODULE__, which, []) 65 | end 66 | end 67 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/auth_controller.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.AuthController do 2 | use HydraSrtWeb, :controller 3 | 4 | def login(conn, %{"login" => %{"user" => user, "password" => password}}) do 5 | # TODO: Implement a proper authentication mechanism 6 | if user == Application.get_env(:hydra_srt, :api_auth_username) && 7 | password == Application.get_env(:hydra_srt, :api_auth_password) do 8 | token = generate_token() 9 | 10 | Cachex.put(HydraSrt.Cache, "auth_session:#{token}", user, ttl: :timer.hours(24 * 14)) 11 | 12 | conn 13 | |> put_status(:ok) 14 | |> json(%{token: token, user: user}) 15 | else 16 | conn 17 | |> put_status(:unauthorized) 18 | |> json(%{error: "Invalid username or password"}) 19 | end 20 | end 21 | 22 | def login(conn, _params) do 23 | conn 24 | |> put_status(:bad_request) 25 | |> json(%{error: "Invalid request format"}) 26 | end 27 | 28 | defp generate_token do 29 | :crypto.strong_rand_bytes(30) 30 | |> Base.url_encode64(padding: false) 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/backup_controller.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.BackupController do 2 | use HydraSrtWeb, :controller 3 | 4 | alias HydraSrt.Db 5 | 6 | def export(conn, _params) do 7 | with {:ok, routes} <- Db.get_all_routes(true) do 8 | conn 9 | |> put_resp_content_type("application/json") 10 | |> put_resp_header( 11 | "content-disposition", 12 | "inline; filename=\"hydra_srt_routes_backup.json\"" 13 | ) 14 | |> json(%{data: routes}) 15 | else 16 | error -> 17 | conn 18 | |> put_status(:internal_server_error) 19 | |> json(%{error: "Failed to export routes: #{inspect(error)}"}) 20 | end 21 | end 22 | 23 | def create_download_link(conn, _params) do 24 | session_id = UUID.uuid4() 25 | 26 | Cachex.put(HydraSrt.Cache, "backup_session:#{session_id}", true, ttl: :timer.minutes(5)) 27 | 28 | conn 29 | |> put_status(:ok) 30 | |> json(%{download_link: "/backup/#{session_id}/download"}) 31 | end 32 | 33 | def create_backup_download_link(conn, _params) do 34 | session_id = UUID.uuid4() 35 | 36 | Cachex.put(HydraSrt.Cache, "backup_binary_session:#{session_id}", true, 37 | ttl: :timer.minutes(5) 38 | ) 39 | 40 | conn 41 | |> put_status(:ok) 42 | |> json(%{download_link: "/backup/#{session_id}/download_backup"}) 43 | end 44 | 45 | def download(conn, %{"session_id" => session_id}) do 46 | case Cachex.get(HydraSrt.Cache, "backup_session:#{session_id}") do 47 | {:ok, true} -> 48 | with {:ok, routes} <- Db.get_all_routes(true) do 49 | Cachex.del(HydraSrt.Cache, "backup_session:#{session_id}") 50 | 51 | json_data = Jason.encode!(routes, pretty: true) 52 | 53 | now = DateTime.utc_now() 54 | formatted_time = Calendar.strftime(now, "%m-%d-%y-%H:%M:%S") 55 | filename = "hydra-routes-#{formatted_time}.json" 56 | 57 | conn 58 | |> put_resp_content_type("application/json") 59 | |> put_resp_header( 60 | "content-disposition", 61 | "attachment; filename=\"#{filename}\"" 62 | ) 63 | |> send_resp(200, json_data) 64 | else 65 | error -> 66 | conn 67 | |> put_status(:internal_server_error) 68 | |> json(%{error: "Failed to download routes backup: #{inspect(error)}"}) 69 | end 70 | 71 | _ -> 72 | conn 73 | |> put_status(:forbidden) 74 | |> json(%{error: "Invalid or expired download link"}) 75 | end 76 | end 77 | 78 | def download_backup(conn, %{"session_id" => session_id}) do 79 | case Cachex.get(HydraSrt.Cache, "backup_binary_session:#{session_id}") do 80 | {:ok, true} -> 81 | with {:ok, binary_data} <- Db.backup() do 82 | Cachex.del(HydraSrt.Cache, "backup_binary_session:#{session_id}") 83 | 84 | now = DateTime.utc_now() 85 | formatted_time = Calendar.strftime(now, "%m-%d-%y-%H:%M:%S") 86 | filename = "hydra-srt-#{formatted_time}.backup" 87 | 88 | conn 89 | |> put_resp_content_type("application/octet-stream") 90 | |> put_resp_header( 91 | "content-disposition", 92 | "attachment; filename=\"#{filename}\"" 93 | ) 94 | |> send_resp(200, binary_data) 95 | else 96 | error -> 97 | conn 98 | |> put_status(:internal_server_error) 99 | |> json(%{error: "Failed to download backup: #{inspect(error)}"}) 100 | end 101 | 102 | _ -> 103 | conn 104 | |> put_status(:forbidden) 105 | |> json(%{error: "Invalid or expired download link"}) 106 | end 107 | end 108 | 109 | def restore(conn, _params) do 110 | try do 111 | {:ok, binary_data, _conn} = Plug.Conn.read_body(conn) 112 | IO.puts("Received binary data of size: #{byte_size(binary_data)} bytes") 113 | 114 | case Db.restore_backup(binary_data) do 115 | :ok -> 116 | conn 117 | |> put_status(:ok) 118 | |> json(%{message: "Backup restored successfully"}) 119 | 120 | {:error, reason} -> 121 | conn 122 | |> put_status(:internal_server_error) 123 | |> json(%{error: "Failed to restore backup: #{inspect(reason)}"}) 124 | end 125 | rescue 126 | e -> 127 | IO.puts("Error processing backup: #{inspect(e)}") 128 | 129 | conn 130 | |> put_status(:internal_server_error) 131 | |> json(%{error: "Failed to process backup: #{inspect(e)}"}) 132 | end 133 | end 134 | end 135 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/changeset_json.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.ChangesetJSON do 2 | @doc """ 3 | Renders changeset errors. 4 | """ 5 | def error(%{changeset: changeset}) do 6 | # When encoded, the changeset returns its errors 7 | # as a JSON object. So we just pass it forward. 8 | %{errors: Ecto.Changeset.traverse_errors(changeset, &translate_error/1)} 9 | end 10 | 11 | defp translate_error({msg, opts}) do 12 | # You can make use of gettext to translate error messages by 13 | # uncommenting and adjusting the following code: 14 | 15 | # if count = opts[:count] do 16 | # Gettext.dngettext(HydraSrtWeb.Gettext, "errors", msg, msg, count, opts) 17 | # else 18 | # Gettext.dgettext(HydraSrtWeb.Gettext, "errors", msg, opts) 19 | # end 20 | 21 | Enum.reduce(opts, msg, fn {key, value}, acc -> 22 | String.replace(acc, "%{#{key}}", fn _ -> to_string(value) end) 23 | end) 24 | end 25 | end 26 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/destination_controller.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.DestinationController do 2 | use HydraSrtWeb, :controller 3 | 4 | alias HydraSrt.Db 5 | 6 | action_fallback HydraSrtWeb.FallbackController 7 | 8 | def index(conn, %{"route_id" => route_id}) do 9 | {:ok, destinations} = Db.get_all_destinations(route_id) 10 | 11 | destinations = 12 | Enum.reduce(destinations, [], fn {["destinations", id], route}, acc -> 13 | [Map.put(route, "id", id) | acc] 14 | end) 15 | 16 | data(conn, destinations) 17 | end 18 | 19 | def create(conn, %{"destination" => dest_params, "route_id" => route_id}) do 20 | with {:ok, route} <- Db.create_destination(route_id, dest_params) do 21 | conn 22 | |> put_status(:created) 23 | |> data(route) 24 | end 25 | end 26 | 27 | def show(conn, %{"dest_id" => id, "route_id" => route_id}) do 28 | {:ok, route} = Db.get_destination(route_id, id) 29 | data(conn, route) 30 | end 31 | 32 | def update(conn, %{"dest_id" => id, "route_id" => route_id, "destination" => dest_params}) do 33 | with {:ok, route} <- Db.update_destination(route_id, id, dest_params) do 34 | data(conn, route) 35 | end 36 | end 37 | 38 | def delete(conn, %{"dest_id" => id, "route_id" => route_id}) do 39 | with :ok <- Db.del_destination(route_id, id) do 40 | send_resp(conn, :no_content, "") 41 | end 42 | end 43 | 44 | defp data(conn, data), do: json(conn, %{data: data}) 45 | end 46 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/destination_json.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.DestinationJSON do 2 | alias HydraSrt.Api.Destination 3 | 4 | @doc """ 5 | Renders a list of destinations. 6 | """ 7 | def index(%{destinations: destinations}) do 8 | %{data: for(destination <- destinations, do: data(destination))} 9 | end 10 | 11 | @doc """ 12 | Renders a single destination. 13 | """ 14 | def show(%{destination: destination}) do 15 | %{data: data(destination)} 16 | end 17 | 18 | defp data(%Destination{} = destination) do 19 | %{ 20 | id: destination.id, 21 | enabled: destination.enabled, 22 | name: destination.name, 23 | alias: destination.alias, 24 | status: destination.status, 25 | started_at: destination.started_at, 26 | stopped_at: destination.stopped_at 27 | } 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/error_json.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.ErrorJSON do 2 | @moduledoc """ 3 | This module is invoked by your endpoint in case of errors on JSON requests. 4 | 5 | See config/config.exs. 6 | """ 7 | 8 | # If you want to customize a particular status code, 9 | # you may add your own clauses, such as: 10 | # 11 | # def render("500.json", _assigns) do 12 | # %{errors: %{detail: "Internal Server Error"}} 13 | # end 14 | 15 | # Custom error handler for node errors 16 | def render("error.json", %{error: message}) do 17 | %{error: message} 18 | end 19 | 20 | # By default, Phoenix returns the status message from 21 | # the template name. For example, "404.json" becomes 22 | # "Not Found". 23 | def render(template, _assigns) do 24 | %{errors: %{detail: Phoenix.Controller.status_message_from_template(template)}} 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/fallback_controller.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.FallbackController do 2 | @moduledoc """ 3 | Translates controller action results into valid `Plug.Conn` responses. 4 | 5 | See `Phoenix.Controller.action_fallback/1` for more details. 6 | """ 7 | use HydraSrtWeb, :controller 8 | 9 | # This clause handles errors returned by Ecto's insert/update/delete. 10 | def call(conn, {:error, %Ecto.Changeset{} = changeset}) do 11 | conn 12 | |> put_status(:unprocessable_entity) 13 | |> put_view(json: HydraSrtWeb.ChangesetJSON) 14 | |> render(:error, changeset: changeset) 15 | end 16 | 17 | # This clause is an example of how to handle resources that cannot be found. 18 | def call(conn, {:error, :not_found}) do 19 | conn 20 | |> put_status(:not_found) 21 | |> put_view(html: HydraSrtWeb.ErrorHTML, json: HydraSrtWeb.ErrorJSON) 22 | |> render(:"404") 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/health_controller.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.HealthController do 2 | use HydraSrtWeb, :controller 3 | 4 | def index(conn, _params) do 5 | conn 6 | |> send_resp(200, "") 7 | end 8 | end 9 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/node_controller.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.NodeController do 2 | use HydraSrtWeb, :controller 3 | 4 | alias HydraSrt.Monitoring.OsMon 5 | 6 | def index(conn, _params) do 7 | nodes = [node() | Node.list()] 8 | 9 | node_stats = 10 | Enum.map(nodes, fn node_name -> 11 | try do 12 | stats = :rpc.call(node_name, OsMon, :get_all_stats, []) 13 | 14 | status = 15 | if is_map(stats) and (is_number(stats.cpu) or is_number(stats.ram)), 16 | do: "up", 17 | else: "down" 18 | 19 | status = if node_name == node(), do: "self", else: status 20 | 21 | la_string = 22 | if is_map(stats) and is_map(stats.cpu_la) do 23 | "#{format_float(stats.cpu_la.avg1)} / #{format_float(stats.cpu_la.avg5)} / #{format_float(stats.cpu_la.avg15)}" 24 | else 25 | "N/A / N/A / N/A" 26 | end 27 | 28 | %{ 29 | host: node_name, 30 | cpu: if(is_map(stats), do: stats.cpu, else: nil), 31 | ram: if(is_map(stats), do: stats.ram, else: nil), 32 | swap: if(is_map(stats), do: stats.swap, else: nil), 33 | la: la_string, 34 | status: status 35 | } 36 | rescue 37 | _ -> 38 | %{ 39 | host: node_name, 40 | cpu: nil, 41 | ram: nil, 42 | swap: nil, 43 | la: "N/A / N/A / N/A", 44 | status: "down" 45 | } 46 | catch 47 | _, _ -> 48 | %{ 49 | host: node_name, 50 | cpu: nil, 51 | ram: nil, 52 | swap: nil, 53 | la: "N/A / N/A / N/A", 54 | status: "down" 55 | } 56 | end 57 | end) 58 | 59 | json(conn, node_stats) 60 | end 61 | 62 | def show(conn, %{"id" => node_name}) do 63 | node_atom = String.to_atom(node_name) 64 | 65 | try do 66 | stats = :rpc.call(node_atom, OsMon, :get_all_stats, []) 67 | 68 | status = 69 | if is_map(stats) and (is_number(stats.cpu) or is_number(stats.ram)), 70 | do: "up", 71 | else: "down" 72 | 73 | status = if node_atom == node(), do: "self", else: status 74 | 75 | la_string = 76 | if is_map(stats) and is_map(stats.cpu_la) do 77 | "#{format_float(stats.cpu_la.avg1)} / #{format_float(stats.cpu_la.avg5)} / #{format_float(stats.cpu_la.avg15)}" 78 | else 79 | "N/A / N/A / N/A" 80 | end 81 | 82 | node_data = %{ 83 | host: node_atom, 84 | cpu: if(is_map(stats), do: stats.cpu, else: nil), 85 | ram: if(is_map(stats), do: stats.ram, else: nil), 86 | swap: if(is_map(stats), do: stats.swap, else: nil), 87 | la: la_string, 88 | status: status 89 | } 90 | 91 | json(conn, node_data) 92 | rescue 93 | e -> 94 | conn 95 | |> put_status(:not_found) 96 | |> json(%{error: "Node not available: #{inspect(e)}"}) 97 | catch 98 | _, reason -> 99 | conn 100 | |> put_status(:not_found) 101 | |> json(%{error: "Node not available: #{inspect(reason)}"}) 102 | end 103 | end 104 | 105 | defp format_float(nil), do: "N/A" 106 | defp format_float(value) when is_float(value), do: :erlang.float_to_binary(value, decimals: 1) 107 | defp format_float(_), do: "N/A" 108 | end 109 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/page_controller.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.PageController do 2 | use HydraSrtWeb, :controller 3 | 4 | def index(conn, %{"path" => ["index.html" | _rest]}) do 5 | conn 6 | |> redirect(to: "/") 7 | |> halt() 8 | end 9 | 10 | def index(conn, %{"path" => _path}) do 11 | serve_index_html(conn) 12 | end 13 | 14 | def index(conn, _params) do 15 | serve_index_html(conn) 16 | end 17 | 18 | defp serve_index_html(conn) do 19 | conn 20 | |> put_resp_header("content-type", "text/html; charset=utf-8") 21 | |> Plug.Conn.send_file(200, Application.app_dir(:hydra_srt, "priv/static/index.html")) 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/route_controller.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.RouteController do 2 | use HydraSrtWeb, :controller 3 | 4 | alias HydraSrt.Db 5 | 6 | action_fallback HydraSrtWeb.FallbackController 7 | 8 | def index(conn, _params) do 9 | with {:ok, routes} <- Db.get_all_routes() do 10 | data(conn, routes) 11 | else 12 | error -> 13 | conn 14 | |> put_status(:internal_server_error) 15 | |> json(%{error: "Failed to fetch routes: #{inspect(error)}"}) 16 | end 17 | end 18 | 19 | def create(conn, %{"route" => route_params}) do 20 | with {:ok, route} <- Db.create_route(route_params) do 21 | conn 22 | |> put_status(:created) 23 | |> data(route) 24 | end 25 | end 26 | 27 | def show(conn, %{"id" => id}) do 28 | # :timer.sleep(1500) 29 | {:ok, route} = Db.get_route(id, true) 30 | data(conn, route) 31 | end 32 | 33 | def update(conn, %{"id" => id, "route" => route_params}) do 34 | with {:ok, route} <- Db.update_route(id, route_params) do 35 | data(conn, route) 36 | end 37 | end 38 | 39 | def delete(conn, %{"id" => id}) do 40 | with [:ok, :ok] <- Db.delete_route(id) do 41 | send_resp(conn, :no_content, "") 42 | end 43 | end 44 | 45 | def start(conn, %{"route_id" => route_id}) do 46 | case HydraSrt.start_route(route_id) do 47 | {:ok, _pid} -> 48 | conn 49 | |> put_status(:ok) 50 | |> data(%{status: "started", route_id: route_id}) 51 | 52 | {:error, reason} -> 53 | conn 54 | |> put_status(:unprocessable_entity) 55 | |> json(%{error: inspect(reason)}) 56 | end 57 | end 58 | 59 | def stop(conn, %{"route_id" => route_id}) do 60 | case HydraSrt.stop_route(route_id) do 61 | :ok -> 62 | conn 63 | |> put_status(:ok) 64 | |> data(%{status: "stopped", route_id: route_id}) 65 | 66 | {:error, reason} -> 67 | conn 68 | |> put_status(:unprocessable_entity) 69 | |> json(%{error: inspect(reason)}) 70 | end 71 | end 72 | 73 | def restart(conn, %{"route_id" => route_id}) do 74 | case HydraSrt.restart_route(route_id) do 75 | {:ok, _pid} -> 76 | conn 77 | |> put_status(:ok) 78 | |> data(%{status: "restarted", route_id: route_id}) 79 | 80 | {:error, reason} -> 81 | conn 82 | |> put_status(:unprocessable_entity) 83 | |> json(%{error: inspect(reason)}) 84 | end 85 | end 86 | 87 | defp data(conn, data), do: json(conn, %{data: data}) 88 | end 89 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/route_json.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.RouteJSON do 2 | alias HydraSrt.Api.Route 3 | 4 | @doc """ 5 | Renders a list of routes. 6 | """ 7 | def index(%{routes: routes}) do 8 | %{data: for(route <- routes, do: data(route))} 9 | end 10 | 11 | @doc """ 12 | Renders a single route. 13 | """ 14 | def show(%{route: route}) do 15 | %{data: data(route)} 16 | end 17 | 18 | defp data(%Route{} = route) do 19 | %{ 20 | id: route.id, 21 | enabled: route.enabled, 22 | name: route.name, 23 | alias: route.alias, 24 | status: route.status, 25 | source: route.source, 26 | destinations: route.destinations, 27 | started_at: route.started_at, 28 | stopped_at: route.stopped_at 29 | } 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/controllers/system_controller.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.SystemController do 2 | use HydraSrtWeb, :controller 3 | 4 | alias HydraSrt.ProcessMonitor 5 | alias HydraSrt.Helpers 6 | 7 | def list_pipelines(conn, _params) do 8 | pipelines = ProcessMonitor.list_pipeline_processes() 9 | json(conn, pipelines) 10 | end 11 | 12 | def list_pipelines_detailed(conn, _params) do 13 | pipelines = ProcessMonitor.list_pipeline_processes_detailed() 14 | json(conn, pipelines) 15 | end 16 | 17 | def kill_pipeline(conn, %{"pid" => pid_str}) do 18 | with {pid, _} <- Integer.parse(pid_str), 19 | {_, 0} <- Helpers.sys_kill(pid_str) do 20 | json(conn, %{success: true, message: "Process #{pid} killed successfully"}) 21 | else 22 | :error -> 23 | conn 24 | |> put_status(400) 25 | |> json(%{error: "Invalid PID format"}) 26 | 27 | {error, _} -> 28 | conn 29 | |> put_status(500) 30 | |> json(%{error: "Failed to kill process: #{inspect(error)}"}) 31 | end 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/endpoint.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.Endpoint do 2 | use Phoenix.Endpoint, otp_app: :hydra_srt 3 | 4 | # The session will be stored in the cookie and signed, 5 | # this means its contents can be read but not tampered with. 6 | # Set :encryption_salt if you would also like to encrypt it. 7 | @session_options [ 8 | store: :cookie, 9 | key: "_hydra_srt_key", 10 | signing_salt: "J9DwMByU", 11 | same_site: "Lax" 12 | ] 13 | 14 | # socket "/live", Phoenix.LiveView.Socket, 15 | # websocket: [connect_info: [session: @session_options]], 16 | # longpoll: [connect_info: [session: @session_options]] 17 | 18 | # Serve at "/" the static files from "priv/static" directory. 19 | # 20 | # You should set gzip to true if you are running phx.digest 21 | # when deploying your static files in production. 22 | plug Plug.Static, 23 | at: "/", 24 | from: :hydra_srt, 25 | gzip: false, 26 | only: HydraSrtWeb.static_paths(), 27 | index: "index.html" 28 | 29 | # Code reloading can be explicitly enabled under the 30 | # :code_reloader configuration of your endpoint. 31 | if code_reloading? do 32 | plug Phoenix.CodeReloader 33 | plug Phoenix.Ecto.CheckRepoStatus, otp_app: :hydra_srt 34 | end 35 | 36 | plug Plug.RequestId 37 | plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint] 38 | 39 | plug Plug.Parsers, 40 | parsers: [:urlencoded, :multipart, :json], 41 | pass: ["*/*"], 42 | json_decoder: Phoenix.json_library() 43 | 44 | plug Plug.MethodOverride 45 | plug Plug.Head 46 | plug Plug.Session, @session_options 47 | 48 | plug CORSPlug, 49 | origin: ["*"], 50 | methods: ["*"], 51 | headers: ["*"], 52 | credentials: true, 53 | max_age: 86400 54 | 55 | plug HydraSrtWeb.Router 56 | end 57 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/router.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.Router do 2 | use HydraSrtWeb, :router 3 | 4 | pipeline :browser do 5 | plug(:accepts, ["html"]) 6 | end 7 | 8 | pipeline :api do 9 | plug :accepts, ["json"] 10 | end 11 | 12 | pipeline :api_no_parse do 13 | plug :accepts, ["*/*"] 14 | plug :check_auth 15 | end 16 | 17 | pipeline :auth do 18 | plug :check_auth 19 | end 20 | 21 | scope "/health", HydraSrtWeb do 22 | get "/", HealthController, :index 23 | end 24 | 25 | scope "/api", HydraSrtWeb do 26 | pipe_through :api 27 | 28 | post "/login", AuthController, :login 29 | end 30 | 31 | scope "/api", HydraSrtWeb do 32 | pipe_through [:api, :auth] 33 | resources "/routes", RouteController, except: [:new, :edit] 34 | get "/routes/:route_id/start", RouteController, :start 35 | get "/routes/:route_id/stop", RouteController, :stop 36 | get "/routes/:route_id/restart", RouteController, :restart 37 | get "/routes/:route_id/destinations", DestinationController, :index 38 | post "/routes/:route_id/destinations", DestinationController, :create 39 | get "/routes/:route_id/destinations/:dest_id", DestinationController, :show 40 | put "/routes/:route_id/destinations/:dest_id", DestinationController, :update 41 | delete "/routes/:route_id/destinations/:dest_id", DestinationController, :delete 42 | 43 | get "/backup/export", BackupController, :export 44 | get "/backup/create-download-link", BackupController, :create_download_link 45 | get "/backup/create-backup-download-link", BackupController, :create_backup_download_link 46 | 47 | get "/system/pipelines", SystemController, :list_pipelines 48 | get "/system/pipelines/detailed", SystemController, :list_pipelines_detailed 49 | post "/system/pipelines/:pid/kill", SystemController, :kill_pipeline 50 | 51 | get "/nodes", NodeController, :index 52 | get "/nodes/:id", NodeController, :show 53 | end 54 | 55 | # TODO: improve this 56 | scope "/api", HydraSrtWeb do 57 | pipe_through [:api_no_parse] 58 | post "/restore", BackupController, :restore 59 | end 60 | 61 | scope "/backup", HydraSrtWeb do 62 | get "/:session_id/download", BackupController, :download 63 | get "/:session_id/download_backup", BackupController, :download_backup 64 | end 65 | 66 | scope "/", HydraSrtWeb do 67 | pipe_through(:browser) 68 | 69 | get "/", PageController, :index 70 | get "/*path", PageController, :index 71 | end 72 | 73 | defp check_auth(conn, _opts) do 74 | case get_req_header(conn, "authorization") do 75 | ["Bearer " <> token] -> 76 | case Cachex.get(HydraSrt.Cache, "auth_session:#{token}") do 77 | {:ok, nil} -> 78 | conn 79 | |> put_status(403) 80 | |> Phoenix.Controller.json(%{error: "Unauthorized"}) 81 | |> halt() 82 | 83 | {:ok, _value} -> 84 | conn 85 | 86 | _ -> 87 | conn 88 | |> put_status(403) 89 | |> Phoenix.Controller.json(%{error: "Unauthorized"}) 90 | |> halt() 91 | end 92 | 93 | _ -> 94 | conn 95 | |> put_status(403) 96 | |> Phoenix.Controller.json(%{error: "Authorization header missing"}) 97 | |> halt() 98 | end 99 | end 100 | end 101 | -------------------------------------------------------------------------------- /lib/hydra_srt_web/telemetry.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.Telemetry do 2 | use Supervisor 3 | import Telemetry.Metrics 4 | 5 | def start_link(arg) do 6 | Supervisor.start_link(__MODULE__, arg, name: __MODULE__) 7 | end 8 | 9 | @impl true 10 | def init(_arg) do 11 | children = [ 12 | # Telemetry poller will execute the given period measurements 13 | # every 10_000ms. Learn more here: https://hexdocs.pm/telemetry_metrics 14 | {:telemetry_poller, measurements: periodic_measurements(), period: 10_000} 15 | # Add reporters as children of your supervision tree. 16 | # {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} 17 | ] 18 | 19 | Supervisor.init(children, strategy: :one_for_one) 20 | end 21 | 22 | def metrics do 23 | [ 24 | # Phoenix Metrics 25 | summary("phoenix.endpoint.start.system_time", 26 | unit: {:native, :millisecond} 27 | ), 28 | summary("phoenix.endpoint.stop.duration", 29 | unit: {:native, :millisecond} 30 | ), 31 | summary("phoenix.router_dispatch.start.system_time", 32 | tags: [:route], 33 | unit: {:native, :millisecond} 34 | ), 35 | summary("phoenix.router_dispatch.exception.duration", 36 | tags: [:route], 37 | unit: {:native, :millisecond} 38 | ), 39 | summary("phoenix.router_dispatch.stop.duration", 40 | tags: [:route], 41 | unit: {:native, :millisecond} 42 | ), 43 | summary("phoenix.socket_connected.duration", 44 | unit: {:native, :millisecond} 45 | ), 46 | summary("phoenix.channel_joined.duration", 47 | unit: {:native, :millisecond} 48 | ), 49 | summary("phoenix.channel_handled_in.duration", 50 | tags: [:event], 51 | unit: {:native, :millisecond} 52 | ), 53 | 54 | # Database Metrics 55 | summary("hydra_srt.repo.query.total_time", 56 | unit: {:native, :millisecond}, 57 | description: "The sum of the other measurements" 58 | ), 59 | summary("hydra_srt.repo.query.decode_time", 60 | unit: {:native, :millisecond}, 61 | description: "The time spent decoding the data received from the database" 62 | ), 63 | summary("hydra_srt.repo.query.query_time", 64 | unit: {:native, :millisecond}, 65 | description: "The time spent executing the query" 66 | ), 67 | summary("hydra_srt.repo.query.queue_time", 68 | unit: {:native, :millisecond}, 69 | description: "The time spent waiting for a database connection" 70 | ), 71 | summary("hydra_srt.repo.query.idle_time", 72 | unit: {:native, :millisecond}, 73 | description: 74 | "The time the connection spent waiting before being checked out for the query" 75 | ), 76 | 77 | # VM Metrics 78 | summary("vm.memory.total", unit: {:byte, :kilobyte}), 79 | summary("vm.total_run_queue_lengths.total"), 80 | summary("vm.total_run_queue_lengths.cpu"), 81 | summary("vm.total_run_queue_lengths.io") 82 | ] 83 | end 84 | 85 | defp periodic_measurements do 86 | [ 87 | # A module, function and arguments to be invoked periodically. 88 | # This function must call :telemetry.execute/3 and a metric must be added above. 89 | # {HydraSrtWeb, :count_users, []} 90 | ] 91 | end 92 | end 93 | -------------------------------------------------------------------------------- /lib/mix/tasks/compile_c_app.ex: -------------------------------------------------------------------------------- 1 | defmodule Mix.Tasks.CompileCApp do 2 | @moduledoc """ 3 | Compiles the native C application. 4 | 5 | ## Examples 6 | 7 | $ mix compile_c_app 8 | 9 | """ 10 | use Mix.Task 11 | 12 | @shortdoc "Compiles the native C application" 13 | def run(_) do 14 | IO.puts("Compiling C application...") 15 | {result, exit_code} = System.cmd("make", ["-C", "native"]) 16 | IO.puts(result) 17 | 18 | if exit_code != 0 do 19 | Mix.raise("Failed to compile C application") 20 | end 21 | 22 | # Verify the binary was created 23 | binary_path = Path.join(["native", "build", "hydra_srt_pipeline"]) 24 | 25 | unless File.exists?(binary_path) do 26 | Mix.raise("C application binary was not created at #{binary_path}") 27 | end 28 | 29 | IO.puts("C application compiled successfully at #{binary_path}") 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.MixProject do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :hydra_srt, 7 | version: "0.1.0", 8 | elixir: "~> 1.14", 9 | elixirc_paths: elixirc_paths(Mix.env()), 10 | start_permanent: Mix.env() == :prod, 11 | aliases: aliases(), 12 | deps: deps(), 13 | releases: releases() 14 | ] 15 | end 16 | 17 | # Configuration for the OTP application. 18 | # 19 | # Type `mix help compile.app` for more information. 20 | def application do 21 | [ 22 | mod: {HydraSrt.Application, []}, 23 | extra_applications: 24 | [:logger, :os_mon, :ssl, :runtime_tools] ++ extra_applications(Mix.env()) 25 | ] 26 | end 27 | 28 | defp extra_applications(:dev), do: [:wx, :observer] 29 | defp extra_applications(_), do: [] 30 | 31 | # Specifies which paths to compile per environment. 32 | defp elixirc_paths(:test), do: ["lib", "test/support"] 33 | defp elixirc_paths(_), do: ["lib"] 34 | 35 | # Specifies your project dependencies. 36 | # 37 | # Type `mix help deps` for examples and options. 38 | defp deps do 39 | [ 40 | {:phoenix, "~> 1.7.14"}, 41 | {:phoenix_ecto, "~> 4.5"}, 42 | {:ecto_sql, "~> 3.10"}, 43 | {:ecto_sqlite3, ">= 0.0.0"}, 44 | {:telemetry_metrics, "~> 1.0"}, 45 | {:telemetry_poller, "~> 1.0"}, 46 | {:jason, "~> 1.2"}, 47 | {:plug_cowboy, "~> 2.7"}, 48 | {:khepri, "0.16.0"}, 49 | {:uuid, "~> 1.1"}, 50 | {:cors_plug, "~> 3.0"}, 51 | {:syn, "~> 3.3"}, 52 | {:cachex, "~> 3.6"}, 53 | {:observer_cli, "~> 1.7"}, 54 | {:meck, "~> 1.0", only: [:dev, :test], override: true}, 55 | {:credo, "~> 1.7", only: [:dev, :test], runtime: false}, 56 | {:dialyxir, "~> 1.4", only: [:dev, :test], runtime: false}, 57 | {:benchee, "~> 1.3", only: :dev}, 58 | {:instream, "~> 2.0"} 59 | ] 60 | end 61 | 62 | # Aliases are shortcuts or tasks specific to the current project. 63 | # For example, to install project dependencies and perform other setup tasks, run: 64 | # 65 | # $ mix setup 66 | # 67 | # See the documentation for `Mix` for more info on aliases. 68 | defp aliases do 69 | [ 70 | setup: ["deps.get", "ecto.setup"], 71 | "ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"], 72 | "ecto.reset": ["ecto.drop", "ecto.setup"], 73 | test: ["ecto.create --quiet", "ecto.migrate --quiet", "test"] 74 | ] 75 | end 76 | 77 | defp releases do 78 | [ 79 | hydra_srt: [ 80 | steps: [:assemble, ©_c_app/1, ©_web_app/1], 81 | cookie: System.get_env("RELEASE_COOKIE", Base.url_encode64(:crypto.strong_rand_bytes(30))) 82 | ] 83 | ] 84 | end 85 | 86 | defp copy_c_app(release) do 87 | IO.puts("Copying C application to release...") 88 | 89 | {result, exit_code} = System.cmd("make", ["-C", "native"]) 90 | IO.puts(result) 91 | 92 | if exit_code != 0 do 93 | raise "Failed to compile C application" 94 | end 95 | 96 | source_path = Path.join(["native", "build", "hydra_srt_pipeline"]) 97 | 98 | unless File.exists?(source_path) do 99 | raise "C application binary was not created at #{source_path}" 100 | end 101 | 102 | app_dir = Path.join([release.path, "lib", "hydra_srt-#{release.version}"]) 103 | priv_dest_dir = Path.join(app_dir, "priv/native/build") 104 | File.mkdir_p!(priv_dest_dir) 105 | 106 | priv_dest_path = Path.join(priv_dest_dir, "hydra_srt_pipeline") 107 | File.cp!(source_path, priv_dest_path) 108 | File.chmod!(priv_dest_path, 0o755) 109 | 110 | IO.puts("C application copied to priv directory at #{priv_dest_path}") 111 | 112 | release 113 | end 114 | 115 | defp copy_web_app(release) do 116 | IO.puts("Building and copying web app to release...") 117 | 118 | web_app_dir = "web_app" 119 | IO.puts("Building web app with npm run build...") 120 | 121 | {build_result, build_exit_code} = System.cmd("npm", ["run", "build"], cd: web_app_dir) 122 | IO.puts(build_result) 123 | 124 | if build_exit_code != 0 do 125 | raise "Failed to build web app with npm run build" 126 | end 127 | 128 | web_app_source = Path.join([web_app_dir, "dist"]) 129 | 130 | unless File.dir?(web_app_source) do 131 | raise "Web app dist directory not found at #{web_app_source} after build. Build may have failed." 132 | end 133 | 134 | app_dir = Path.join([release.path, "lib", "hydra_srt-#{release.version}"]) 135 | web_app_dest = Path.join(app_dir, "priv/static") 136 | 137 | File.mkdir_p!(web_app_dest) 138 | 139 | web_app_source 140 | |> File.ls!() 141 | |> Enum.each(fn file -> 142 | source_file = Path.join(web_app_source, file) 143 | dest_file = Path.join(web_app_dest, file) 144 | 145 | if File.dir?(source_file) do 146 | File.cp_r!(source_file, dest_file) 147 | else 148 | File.cp!(source_file, dest_file) 149 | end 150 | end) 151 | 152 | IO.puts("Web app built and copied successfully to #{web_app_dest}") 153 | 154 | release 155 | end 156 | end 157 | -------------------------------------------------------------------------------- /native/.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: Google 2 | IndentWidth: 4 3 | UseTab: Never 4 | ColumnLimit: 120 5 | BreakBeforeBraces: Custom 6 | BraceWrapping: 7 | AfterFunction: true 8 | AfterControlStatement: false 9 | AfterEnum: false 10 | AfterStruct: false 11 | AfterUnion: false 12 | BeforeElse: false 13 | BeforeCatch: false 14 | PointerAlignment: Left 15 | SpaceBeforeParens: ControlStatements 16 | SpacesBeforeTrailingComments: 1 17 | -------------------------------------------------------------------------------- /native/.gitignore: -------------------------------------------------------------------------------- 1 | /build/* 2 | !/build/.gitkeep 3 | /temp 4 | -------------------------------------------------------------------------------- /native/Makefile: -------------------------------------------------------------------------------- 1 | CC := gcc 2 | CFLAGS := -Wall -Wextra -g `pkg-config --cflags gstreamer-1.0 libcjson cmocka srt` 3 | LDFLAGS := `pkg-config --libs gstreamer-1.0 libcjson cmocka gio-2.0 srt` 4 | 5 | SRC_DIR := src 6 | BUILD_DIR := build 7 | INCLUDE_DIR := include 8 | TEST_DIR := tests 9 | 10 | SRCS := $(wildcard $(SRC_DIR)/*.c) 11 | OBJS := $(patsubst $(SRC_DIR)/%.c, $(BUILD_DIR)/%.o, $(SRCS)) 12 | 13 | TESTS := $(wildcard $(TEST_DIR)/*.c) 14 | TEST_OBJS := $(patsubst $(TEST_DIR)/%.c, $(BUILD_DIR)/%.o, $(TESTS)) 15 | 16 | OBJS_NO_MAIN := $(filter-out $(BUILD_DIR)/main.o, $(OBJS)) 17 | 18 | MAIN_EXEC := $(BUILD_DIR)/hydra_srt_pipeline 19 | TEST_EXEC := $(BUILD_DIR)/test_runner 20 | 21 | all: $(MAIN_EXEC) 22 | 23 | $(MAIN_EXEC): $(OBJS) 24 | $(CC) -o $@ $^ $(LDFLAGS) 25 | 26 | $(TEST_EXEC): $(TEST_OBJS) $(OBJS_NO_MAIN) 27 | $(CC) -o $@ $^ $(LDFLAGS) 28 | 29 | $(BUILD_DIR)/%.o: $(SRC_DIR)/%.c | $(BUILD_DIR) 30 | $(CC) $(CFLAGS) -I$(INCLUDE_DIR) -c $< -o $@ 31 | 32 | $(BUILD_DIR)/%.o: $(TEST_DIR)/%.c | $(BUILD_DIR) 33 | $(CC) $(CFLAGS) -I$(INCLUDE_DIR) -c $< -o $@ 34 | 35 | $(BUILD_DIR): 36 | mkdir -p $(BUILD_DIR) 37 | 38 | clean: 39 | rm -rf $(BUILD_DIR) 40 | 41 | help: 42 | @echo "Available targets:" 43 | @echo " make - Build all targets" 44 | @echo " make clean - Remove compiled files" 45 | @echo " make help - Show this help message" 46 | @echo " make test - Run tests" 47 | @echo " make dummy_signal - Run dymmy_signal" 48 | 49 | test: $(TEST_EXEC) 50 | ./$(TEST_EXEC) 51 | 52 | dummy_signal: 53 | ffmpeg -re \ 54 | -f lavfi -i "testsrc=size=1280x720:rate=30" \ 55 | -f lavfi -i "sine=frequency=440:sample_rate=48000" \ 56 | -c:v libx264 -preset veryfast -tune zerolatency -b:v 2000k \ 57 | -c:a aac -b:a 128k \ 58 | -f mpegts \ 59 | "srt://127.0.0.1:8000?mode=caller&streamid=test1" 60 | 61 | dummy_signal_with_pass: 62 | ffmpeg -re \ 63 | -f lavfi -i "testsrc=size=1280x720:rate=30" \ 64 | -f lavfi -i "sine=frequency=440:sample_rate=48000" \ 65 | -c:v libx264 -preset veryfast -tune zerolatency -b:v 2000k \ 66 | -c:a aac -b:a 128k \ 67 | -f mpegts \ 68 | "srt://127.0.0.1:8000?mode=caller&streamid=test1&passphrase=some_pass&pbkeylen=16" 69 | 70 | 71 | -------------------------------------------------------------------------------- /native/README.md: -------------------------------------------------------------------------------- 1 | ### debug input 2 | {"source":{"type":"srtsrc","localaddress":"127.0.0.1","localport":8000,"auto-reconnect":true,"keep-listening":false,"mode":"listener"},"sinks":[{"type":"srtsink","localaddress":"127.0.0.1","localport":8002,"mode":"listener"},{"type":"udpsink","host":"127.0.0.1","port":8003}]} 3 | 4 | {"source":{"type":"srtsrc","localaddress":"127.0.0.1","localport":8000,"auto-reconnect":true,"keep-listening":false,"mode":"listener", "streamid": "test1", "passphrase": "secure_pass_123", "pbkeylen": 16},"sinks":[{"type":"srtsink","localaddress":"127.0.0.1","localport":8002,"mode":"listener"},{"type":"udpsink","host":"127.0.0.1","port":8003}]} 5 | -------------------------------------------------------------------------------- /native/build/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abc3/hydra-srt/5b5b32527dd24a91e7778ac8c7f8c8fadc8dcc8d/native/build/.gitkeep -------------------------------------------------------------------------------- /native/include/gst_pipeline.h: -------------------------------------------------------------------------------- 1 | #ifndef GST_PIPELINE_H 2 | #define GST_PIPELINE_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | GstElement *create_pipeline(cJSON *json); 10 | void cleanup_pipeline(GstElement *pipeline); 11 | void print_srt_stats(GstElement *source); 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /native/include/unix_socket.h: -------------------------------------------------------------------------------- 1 | #ifndef UNIX_SOCKET_H 2 | #define UNIX_SOCKET_H 3 | 4 | #include 5 | #include 6 | 7 | extern int sock; 8 | 9 | void init_unix_socket(const char *socket_path); 10 | void send_message_to_unix_socket(const char *message); 11 | void cleanup_socket(void); 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /native/src/main.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "gst_pipeline.h" 5 | #include "unix_socket.h" 6 | 7 | // stdin expects a JSON object: 8 | // { 9 | // "source": { 10 | // "type": "srtsrc", 11 | // "localaddress": "127.0.0.1", 12 | // "localport": 8000, 13 | // "auto-reconnect": true, 14 | // "keep-listening": false, 15 | // "mode": "listener" 16 | // }, 17 | // "sinks": [ 18 | // { 19 | // "type": "srtsink", 20 | // "localaddress": "127.0.0.1", 21 | // "localport": 8002, 22 | // "mode": "listener" 23 | // }, 24 | // { 25 | // "type": "udpsink", 26 | // "address": "127.0.0.1", 27 | // "port": 8003 28 | // } 29 | // ] 30 | // } 31 | // 32 | 33 | // Example JSON: 34 | // {\"sinks\":[{\"localaddress\":\"127.0.0.1\",\"localport\":8002,\"mode\":\"listener\",\"type\":\"srtsink\"},{\"address\":\"127.0.0.1\",\"port\":8003,\"type\":\"udpsink\"}],\"source\":{\"auto-reconnect\":true,\"keep-listening\":false,\"localaddress\":\"127.0.0.1\",\"localport\":8000,\"type\":\"srtsrc\"}} 35 | 36 | int main(int argc, char* argv[]) 37 | { 38 | setvbuf(stdout, NULL, _IONBF, 0); 39 | char buffer[1024]; 40 | 41 | init_unix_socket("/tmp/hydra_unix_sock"); 42 | atexit(cleanup_socket); 43 | 44 | printf("Argument %d: %s\n", argc, argv[1]); 45 | send_message_to_unix_socket("route_id:"); 46 | send_message_to_unix_socket(argv[1]); 47 | 48 | printf("Waiting for JSON input...\n"); 49 | fgets(buffer, sizeof(buffer), stdin); 50 | printf("Received JSON: %s\n", buffer); 51 | 52 | cJSON* json = cJSON_Parse(buffer); 53 | if (!json) { 54 | printf("Error parsing JSON\n"); 55 | return 1; 56 | } 57 | 58 | gst_init(NULL, NULL); 59 | 60 | GstElement* pipeline = create_pipeline(json); 61 | if (!pipeline) { 62 | cJSON_Delete(json); 63 | return 1; 64 | } 65 | 66 | GstStateChangeReturn ret = gst_element_set_state(pipeline, GST_STATE_PLAYING); 67 | if (ret == GST_STATE_CHANGE_FAILURE) { 68 | g_printerr("Unable to set the pipeline to the playing state.\n"); 69 | cleanup_pipeline(pipeline); 70 | cJSON_Delete(json); 71 | return 1; 72 | } 73 | 74 | GMainLoop* loop = g_main_loop_new(NULL, FALSE); 75 | g_main_loop_run(loop); 76 | 77 | g_main_loop_unref(loop); 78 | cleanup_pipeline(pipeline); 79 | cJSON_Delete(json); 80 | 81 | return 0; 82 | } 83 | -------------------------------------------------------------------------------- /native/src/unix_socket.c: -------------------------------------------------------------------------------- 1 | #include "unix_socket.h" 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | int sock; 9 | 10 | void init_unix_socket(const char* socket_path) 11 | { 12 | struct sockaddr_un addr; 13 | 14 | sock = socket(AF_UNIX, SOCK_STREAM, 0); 15 | if (sock < 0) { 16 | perror("socket"); 17 | exit(1); 18 | } 19 | 20 | memset(&addr, 0, sizeof(struct sockaddr_un)); 21 | addr.sun_family = AF_UNIX; 22 | strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path) - 1); 23 | 24 | if (connect(sock, (struct sockaddr*)&addr, sizeof(struct sockaddr_un)) < 0) { 25 | perror("connect"); 26 | cleanup_socket(); 27 | exit(1); 28 | } 29 | printf("Connected to the socket.\n"); 30 | } 31 | 32 | void send_message_to_unix_socket(const char* message) 33 | { 34 | if (send(sock, message, strlen(message), 0) < 0) { 35 | perror("send"); 36 | } 37 | } 38 | 39 | void cleanup_socket() 40 | { 41 | if (sock >= 0) { 42 | close(sock); 43 | sock = -1; 44 | printf("Socket closed.\n"); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /native/tests/test_gst_pipeline.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "../include/gst_pipeline.h" 10 | 11 | static void test_create_pipeline(void **state) 12 | { 13 | (void)state; 14 | 15 | gst_init(NULL, NULL); 16 | 17 | const char *json_str = 18 | "{\"source\":{\"type\":\"srtsrc\",\"localaddress\":\"127.0.0.1\",\"localport\":8000,\"auto-reconnect\":true," 19 | "\"keep-listening\":false,\"mode\":\"listener\"},\"sinks\":[{\"type\":\"srtsink\",\"localaddress\":\"127.0.0." 20 | "1\",\"localport\":8002," 21 | "\"mode\":\"listener\"},{\"type\":\"udpsink\",\"host\":\"127.0.0.1\",\"port\":8003}]}"; 22 | cJSON *json = cJSON_Parse(json_str); 23 | assert_non_null(json); 24 | 25 | GstElement *pipeline = create_pipeline(json); 26 | assert_non_null(pipeline); 27 | 28 | cleanup_pipeline(pipeline); 29 | cJSON_Delete(json); 30 | } 31 | -------------------------------------------------------------------------------- /native/tests/test_unix_socket.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "../include/gst_pipeline.h" 10 | #include "../include/unix_socket.h" 11 | 12 | static void test_init_unix_socket(void **state) 13 | { 14 | (void)state; 15 | 16 | init_unix_socket("/tmp/hydra_unix_sock"); 17 | assert_int_not_equal(sock, -1); 18 | } 19 | 20 | static void test_send_message_to_unix_socket(void **state) 21 | { 22 | (void)state; 23 | 24 | init_unix_socket("/tmp/hydra_unix_sock"); 25 | send_message_to_unix_socket("Test message"); 26 | } 27 | 28 | static void test_cleanup_socket(void **state) 29 | { 30 | (void)state; 31 | 32 | init_unix_socket("/tmp/hydra_unix_sock"); 33 | cleanup_socket(); 34 | assert_int_equal(sock, -1); 35 | } 36 | 37 | static void test_create_pipeline(void **state) 38 | { 39 | (void)state; 40 | 41 | const char *json_str = 42 | "{\"source\":{\"type\":\"srtsrc\",\"localaddress\":\"127.0.0.1\",\"localport\":8000,\"auto-reconnect\":true," 43 | "\"keep-listening\":false,\"mode\":\"listener\"},\"sinks\":[{\"type\":\"srtsink\",\"localaddress\":\"127.0.0." 44 | "1\",\"localport\":8002," 45 | "\"mode\":\"listener\"},{\"type\":\"udpsink\",\"host\":\"127.0.0.1\",\"port\":8003}]}"; 46 | cJSON *json = cJSON_Parse(json_str); 47 | assert_non_null(json); 48 | 49 | GstElement *pipeline = create_pipeline(json); 50 | assert_non_null(pipeline); 51 | 52 | cleanup_pipeline(pipeline); 53 | cJSON_Delete(json); 54 | } 55 | 56 | int main(void) 57 | { 58 | gst_init(NULL, NULL); 59 | 60 | const struct CMUnitTest tests[] = { 61 | cmocka_unit_test(test_init_unix_socket), 62 | cmocka_unit_test(test_send_message_to_unix_socket), 63 | cmocka_unit_test(test_cleanup_socket), 64 | cmocka_unit_test(test_create_pipeline), 65 | }; 66 | return cmocka_run_group_tests(tests, NULL, NULL); 67 | } 68 | -------------------------------------------------------------------------------- /priv/repo/migrations/.formatter.exs: -------------------------------------------------------------------------------- 1 | [ 2 | import_deps: [:ecto_sql], 3 | inputs: ["*.exs"] 4 | ] 5 | -------------------------------------------------------------------------------- /priv/repo/migrations/20250219145148_create_routes.exs: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Repo.Migrations.CreateRoutes do 2 | use Ecto.Migration 3 | 4 | def change do 5 | create table(:routes, primary_key: false) do 6 | add :id, :binary_id, primary_key: true 7 | add :enabled, :boolean, default: false, null: false 8 | add :name, :string 9 | add :alias, :string 10 | add :status, :string 11 | add :source, :map 12 | add :destinations, :map 13 | add :started_at, :utc_datetime 14 | add :stopped_at, :utc_datetime 15 | 16 | timestamps(type: :utc_datetime) 17 | end 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /priv/repo/migrations/20250220162451_create_destinations.exs: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.Repo.Migrations.CreateDestinations do 2 | use Ecto.Migration 3 | 4 | def change do 5 | create table(:destinations, primary_key: false) do 6 | add :id, :binary_id, primary_key: true 7 | add :enabled, :boolean, default: false, null: false 8 | add :name, :string 9 | add :alias, :string 10 | add :status, :string 11 | add :started_at, :utc_datetime 12 | add :stopped_at, :utc_datetime 13 | 14 | timestamps(type: :utc_datetime) 15 | end 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /priv/repo/seeds.exs: -------------------------------------------------------------------------------- 1 | # Script for populating the database. You can run it as: 2 | # 3 | # mix run priv/repo/seeds.exs 4 | # 5 | # Inside the script, you can read and write to any of your 6 | # repositories directly: 7 | # 8 | # HydraSrt.Repo.insert!(%HydraSrt.SomeSchema{}) 9 | # 10 | # We recommend using the bang functions (`insert!`, `update!` 11 | # and so on) as they will fail if something goes wrong. 12 | -------------------------------------------------------------------------------- /priv/static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abc3/hydra-srt/5b5b32527dd24a91e7778ac8c7f8c8fadc8dcc8d/priv/static/favicon.ico -------------------------------------------------------------------------------- /priv/static/robots.txt: -------------------------------------------------------------------------------- 1 | # See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file 2 | # 3 | # To ban all spiders from the entire site uncomment the next two lines: 4 | # User-agent: * 5 | # Disallow: / 6 | -------------------------------------------------------------------------------- /rel/env.bat.eex: -------------------------------------------------------------------------------- 1 | @echo off 2 | rem Set the release to work across nodes. If using the long name format like 3 | rem the one below (my_app@127.0.0.1), you need to also uncomment the 4 | rem RELEASE_DISTRIBUTION variable below. Must be "sname", "name" or "none". 5 | rem set RELEASE_DISTRIBUTION=name 6 | rem set RELEASE_NODE=<%= @release.name %>@127.0.0.1 7 | -------------------------------------------------------------------------------- /rel/env.sh.eex: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Use NODE_IP if set, otherwise use hostname -f 4 | if [ -z "$NODE_IP" ] 5 | then 6 | ip=$(hostname -f) 7 | else 8 | ip=$NODE_IP 9 | fi 10 | 11 | # assign the value of NODE_NAME if it exists, else assign the value of FLY_APP_NAME, 12 | # and if that doesn't exist either, assign "supavisor" to node_name 13 | node_name="${NODE_NAME:=hydra_srt}" 14 | 15 | export RELEASE_DISTRIBUTION=name 16 | export RELEASE_NODE=$node_name@$ip 17 | -------------------------------------------------------------------------------- /rel/overlays/bin/migrate: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | cd -P -- "$(dirname -- "$0")" 5 | exec ./hydra_srt eval HydraSrt.Release.migrate 6 | -------------------------------------------------------------------------------- /rel/overlays/bin/migrate.bat: -------------------------------------------------------------------------------- 1 | call "%~dp0\hydra_srt" eval HydraSrt.Release.migrate 2 | -------------------------------------------------------------------------------- /rel/overlays/bin/server: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | cd -P -- "$(dirname -- "$0")" 5 | PHX_SERVER=true exec ./hydra_srt start 6 | -------------------------------------------------------------------------------- /rel/overlays/bin/server.bat: -------------------------------------------------------------------------------- 1 | set PHX_SERVER=true 2 | call "%~dp0\hydra_srt" start 3 | -------------------------------------------------------------------------------- /rel/vm.args.eex: -------------------------------------------------------------------------------- 1 | ## Customize flags given to the VM: http://erlang.org/doc/man/erl.html 2 | ## -mode/-name/-sname/-setcookie are configured via env vars, do not set them here 3 | 4 | ## Number of dirty schedulers doing IO work (file, sockets, and others) 5 | ##+SDio 5 6 | 7 | ## Increase number of concurrent ports/sockets 8 | ##+Q 65536 9 | 10 | ## Tweak GC to run more often 11 | ##-env ERL_FULLSWEEP_AFTER 10 12 | 13 | ## Set number of ETS tables 14 | +e 5000 15 | 16 | ## Set number of processes 17 | +P 1000000 18 | 19 | -kernel inet_dist_listen_min 20000 20 | -kernel inet_dist_listen_max 21000 21 | +zdbbl 2097151 22 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | # Check if RLIMIT_NOFILE is set before using it 5 | if [ -n "${RLIMIT_NOFILE:-}" ]; then 6 | echo "Setting RLIMIT_NOFILE to ${RLIMIT_NOFILE}" 7 | ulimit -n "$RLIMIT_NOFILE" 8 | fi 9 | 10 | exec "$@" -------------------------------------------------------------------------------- /test/hydra_srt/api_test.exs: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.ApiTest do 2 | use HydraSrt.DataCase 3 | 4 | alias HydraSrt.Api 5 | 6 | describe "routes" do 7 | alias HydraSrt.Api.Route 8 | 9 | import HydraSrt.ApiFixtures 10 | 11 | @invalid_attrs %{alias: nil, enabled: nil, name: nil, status: nil, started_at: nil, source: nil, destinations: nil, stopped_at: nil} 12 | 13 | test "list_routes/0 returns all routes" do 14 | route = route_fixture() 15 | assert Api.list_routes() == [route] 16 | end 17 | 18 | test "get_route!/1 returns the route with given id" do 19 | route = route_fixture() 20 | assert Api.get_route!(route.id) == route 21 | end 22 | 23 | test "create_route/1 with valid data creates a route" do 24 | valid_attrs = %{alias: "some alias", enabled: true, name: "some name", status: "some status", started_at: ~U[2025-02-18 14:51:00Z], source: %{}, destinations: %{}, stopped_at: ~U[2025-02-18 14:51:00Z]} 25 | 26 | assert {:ok, %Route{} = route} = Api.create_route(valid_attrs) 27 | assert route.alias == "some alias" 28 | assert route.enabled == true 29 | assert route.name == "some name" 30 | assert route.status == "some status" 31 | assert route.started_at == ~U[2025-02-18 14:51:00Z] 32 | assert route.source == %{} 33 | assert route.destinations == %{} 34 | assert route.stopped_at == ~U[2025-02-18 14:51:00Z] 35 | end 36 | 37 | test "create_route/1 with invalid data returns error changeset" do 38 | assert {:error, %Ecto.Changeset{}} = Api.create_route(@invalid_attrs) 39 | end 40 | 41 | test "update_route/2 with valid data updates the route" do 42 | route = route_fixture() 43 | update_attrs = %{alias: "some updated alias", enabled: false, name: "some updated name", status: "some updated status", started_at: ~U[2025-02-19 14:51:00Z], source: %{}, destinations: %{}, stopped_at: ~U[2025-02-19 14:51:00Z]} 44 | 45 | assert {:ok, %Route{} = route} = Api.update_route(route, update_attrs) 46 | assert route.alias == "some updated alias" 47 | assert route.enabled == false 48 | assert route.name == "some updated name" 49 | assert route.status == "some updated status" 50 | assert route.started_at == ~U[2025-02-19 14:51:00Z] 51 | assert route.source == %{} 52 | assert route.destinations == %{} 53 | assert route.stopped_at == ~U[2025-02-19 14:51:00Z] 54 | end 55 | 56 | test "update_route/2 with invalid data returns error changeset" do 57 | route = route_fixture() 58 | assert {:error, %Ecto.Changeset{}} = Api.update_route(route, @invalid_attrs) 59 | assert route == Api.get_route!(route.id) 60 | end 61 | 62 | test "delete_route/1 deletes the route" do 63 | route = route_fixture() 64 | assert {:ok, %Route{}} = Api.delete_route(route) 65 | assert_raise Ecto.NoResultsError, fn -> Api.get_route!(route.id) end 66 | end 67 | 68 | test "change_route/1 returns a route changeset" do 69 | route = route_fixture() 70 | assert %Ecto.Changeset{} = Api.change_route(route) 71 | end 72 | end 73 | 74 | describe "destinations" do 75 | alias HydraSrt.Api.Destination 76 | 77 | import HydraSrt.ApiFixtures 78 | 79 | @invalid_attrs %{alias: nil, enabled: nil, name: nil, status: nil, started_at: nil, stopped_at: nil} 80 | 81 | test "list_destinations/0 returns all destinations" do 82 | destination = destination_fixture() 83 | assert Api.list_destinations() == [destination] 84 | end 85 | 86 | test "get_destination!/1 returns the destination with given id" do 87 | destination = destination_fixture() 88 | assert Api.get_destination!(destination.id) == destination 89 | end 90 | 91 | test "create_destination/1 with valid data creates a destination" do 92 | valid_attrs = %{alias: "some alias", enabled: true, name: "some name", status: "some status", started_at: ~U[2025-02-19 16:24:00Z], stopped_at: ~U[2025-02-19 16:24:00Z]} 93 | 94 | assert {:ok, %Destination{} = destination} = Api.create_destination(valid_attrs) 95 | assert destination.alias == "some alias" 96 | assert destination.enabled == true 97 | assert destination.name == "some name" 98 | assert destination.status == "some status" 99 | assert destination.started_at == ~U[2025-02-19 16:24:00Z] 100 | assert destination.stopped_at == ~U[2025-02-19 16:24:00Z] 101 | end 102 | 103 | test "create_destination/1 with invalid data returns error changeset" do 104 | assert {:error, %Ecto.Changeset{}} = Api.create_destination(@invalid_attrs) 105 | end 106 | 107 | test "update_destination/2 with valid data updates the destination" do 108 | destination = destination_fixture() 109 | update_attrs = %{alias: "some updated alias", enabled: false, name: "some updated name", status: "some updated status", started_at: ~U[2025-02-20 16:24:00Z], stopped_at: ~U[2025-02-20 16:24:00Z]} 110 | 111 | assert {:ok, %Destination{} = destination} = Api.update_destination(destination, update_attrs) 112 | assert destination.alias == "some updated alias" 113 | assert destination.enabled == false 114 | assert destination.name == "some updated name" 115 | assert destination.status == "some updated status" 116 | assert destination.started_at == ~U[2025-02-20 16:24:00Z] 117 | assert destination.stopped_at == ~U[2025-02-20 16:24:00Z] 118 | end 119 | 120 | test "update_destination/2 with invalid data returns error changeset" do 121 | destination = destination_fixture() 122 | assert {:error, %Ecto.Changeset{}} = Api.update_destination(destination, @invalid_attrs) 123 | assert destination == Api.get_destination!(destination.id) 124 | end 125 | 126 | test "delete_destination/1 deletes the destination" do 127 | destination = destination_fixture() 128 | assert {:ok, %Destination{}} = Api.delete_destination(destination) 129 | assert_raise Ecto.NoResultsError, fn -> Api.get_destination!(destination.id) end 130 | end 131 | 132 | test "change_destination/1 returns a destination changeset" do 133 | destination = destination_fixture() 134 | assert %Ecto.Changeset{} = Api.change_destination(destination) 135 | end 136 | end 137 | end 138 | -------------------------------------------------------------------------------- /test/hydra_srt/monitoring_test.exs: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.MonitoringTest do 2 | use ExUnit.Case 3 | alias HydraSrt.Monitoring.OsMon 4 | alias HydraSrt.ProcessMonitor 5 | alias HydraSrt.SignalHandler 6 | alias HydraSrt.ErlSysMon 7 | 8 | test "OsMon returns valid system stats" do 9 | stats = OsMon.get_all_stats() 10 | assert is_map(stats) 11 | assert is_float(stats.ram) 12 | assert stats.ram >= 0 and stats.ram <= 100 13 | assert is_map(stats.cpu_la) 14 | assert is_float(stats.cpu_la.avg1) 15 | assert is_float(stats.cpu_la.avg5) 16 | assert is_float(stats.cpu_la.avg15) 17 | end 18 | 19 | test "OsMon ram_usage returns valid percentage" do 20 | ram_usage = OsMon.ram_usage() 21 | assert is_float(ram_usage) 22 | assert ram_usage >= 0 and ram_usage <= 100 23 | end 24 | 25 | test "OsMon cpu_la returns valid load averages" do 26 | cpu_la = OsMon.cpu_la() 27 | assert is_map(cpu_la) 28 | assert Map.has_key?(cpu_la, :avg1) 29 | assert Map.has_key?(cpu_la, :avg5) 30 | assert Map.has_key?(cpu_la, :avg15) 31 | assert is_float(cpu_la.avg1) 32 | assert is_float(cpu_la.avg5) 33 | assert is_float(cpu_la.avg15) 34 | end 35 | 36 | test "OsMon cpu_util returns valid utilization" do 37 | cpu_util = OsMon.cpu_util() 38 | assert is_float(cpu_util) or match?({:error, _}, cpu_util) 39 | 40 | if is_float(cpu_util) do 41 | assert cpu_util >= 0 and cpu_util <= 100 42 | end 43 | end 44 | 45 | test "OsMon swap_usage returns valid percentage or nil" do 46 | swap_usage = OsMon.swap_usage() 47 | assert is_float(swap_usage) or is_nil(swap_usage) 48 | 49 | if is_float(swap_usage) do 50 | assert swap_usage >= 0 and swap_usage <= 100 51 | end 52 | end 53 | 54 | test "ProcessMonitor lists pipeline processes" do 55 | processes = ProcessMonitor.list_pipeline_processes() 56 | assert is_list(processes) 57 | 58 | for process <- processes do 59 | assert is_map(process) 60 | assert Map.has_key?(process, :pid) 61 | assert Map.has_key?(process, :cpu) 62 | assert Map.has_key?(process, :memory) 63 | assert Map.has_key?(process, :memory_percent) 64 | assert Map.has_key?(process, :memory_bytes) 65 | assert Map.has_key?(process, :swap_percent) 66 | assert Map.has_key?(process, :swap_bytes) 67 | assert Map.has_key?(process, :user) 68 | assert Map.has_key?(process, :start_time) 69 | assert Map.has_key?(process, :command) 70 | end 71 | end 72 | 73 | test "ProcessMonitor lists detailed pipeline processes" do 74 | processes = ProcessMonitor.list_pipeline_processes_detailed() 75 | assert is_list(processes) 76 | 77 | for process <- processes do 78 | assert is_map(process) 79 | assert Map.has_key?(process, :pid) 80 | assert Map.has_key?(process, :cpu) 81 | assert Map.has_key?(process, :memory_percent) 82 | assert Map.has_key?(process, :memory_bytes) 83 | assert Map.has_key?(process, :virtual_memory) 84 | assert Map.has_key?(process, :resident_memory) 85 | assert Map.has_key?(process, :swap_percent) 86 | assert Map.has_key?(process, :swap_bytes) 87 | assert Map.has_key?(process, :cpu_time) 88 | assert Map.has_key?(process, :state) 89 | assert Map.has_key?(process, :ppid) 90 | assert Map.has_key?(process, :user) 91 | assert Map.has_key?(process, :start_time) 92 | assert Map.has_key?(process, :command) 93 | end 94 | end 95 | 96 | test "ProcessMonitor handles different operating systems" do 97 | case :os.type() do 98 | {:unix, :darwin} -> 99 | assert is_list(ProcessMonitor.list_pipeline_processes()) 100 | assert is_list(ProcessMonitor.list_pipeline_processes_detailed()) 101 | 102 | {:unix, :linux} -> 103 | assert is_list(ProcessMonitor.list_pipeline_processes()) 104 | assert is_list(ProcessMonitor.list_pipeline_processes_detailed()) 105 | 106 | _ -> 107 | assert {:error, "Unsupported operating system"} = ProcessMonitor.list_pipeline_processes() 108 | 109 | assert {:error, "Unsupported operating system"} = 110 | ProcessMonitor.list_pipeline_processes_detailed() 111 | end 112 | end 113 | 114 | test "SignalHandler initializes with empty state" do 115 | assert {:ok, %{}} = SignalHandler.init([]) 116 | end 117 | 118 | test "SignalHandler handles events" do 119 | signal = {:signal, :sigterm} 120 | state = %{} 121 | assert {:ok, %{}} = SignalHandler.handle_event(signal, state) 122 | end 123 | 124 | test "SignalHandler handles multiple signal types" do 125 | signals = [:sigterm, :sigint, :sighup, :sigquit] 126 | state = %{} 127 | 128 | for signal_type <- signals do 129 | signal = {:signal, signal_type} 130 | assert {:ok, %{}} = SignalHandler.handle_event(signal, state) 131 | end 132 | end 133 | 134 | test "ErlSysMon initializes correctly" do 135 | assert {:ok, []} = ErlSysMon.init([]) 136 | end 137 | 138 | test "ErlSysMon handles info messages" do 139 | msg = {:monitor, :test_pid, :test_event} 140 | state = [] 141 | assert {:noreply, []} = ErlSysMon.handle_info(msg, state) 142 | end 143 | 144 | test "ErlSysMon handles various monitor messages" do 145 | messages = [ 146 | {:monitor, :test_pid, :busy_port}, 147 | {:monitor, :test_pid, :busy_dist_port}, 148 | {:monitor, :test_pid, {:long_gc, 500}}, 149 | {:monitor, :test_pid, {:long_schedule, 200}} 150 | ] 151 | 152 | state = [] 153 | 154 | for msg <- messages do 155 | assert {:noreply, []} = ErlSysMon.handle_info(msg, state) 156 | end 157 | end 158 | end 159 | -------------------------------------------------------------------------------- /test/hydra_srt/route_handler_test.exs: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.RouteHandlerTest do 2 | use ExUnit.Case 3 | alias HydraSrt.RouteHandler 4 | 5 | test "source_from_record with valid SRT schema" do 6 | record = %{ 7 | "schema" => "SRT", 8 | "schema_options" => %{ 9 | "localaddress" => "127.0.0.1", 10 | "localport" => 4201, 11 | "mode" => "listener", 12 | "latency" => 200, 13 | "auto-reconnect" => true, 14 | "keep-listening" => true 15 | } 16 | } 17 | 18 | assert {:ok, source} = RouteHandler.source_from_record(record) 19 | assert source["type"] == "srtsrc" 20 | assert source["uri"] =~ "srt://127.0.0.1:4201" 21 | assert source["uri"] =~ "mode=listener" 22 | assert source["latency"] == 200 23 | assert source["auto-reconnect"] == true 24 | assert source["keep-listening"] == true 25 | end 26 | 27 | test "source_from_record with SRT schema and passphrase" do 28 | record = %{ 29 | "schema" => "SRT", 30 | "schema_options" => %{ 31 | "localaddress" => "127.0.0.1", 32 | "localport" => 4201, 33 | "mode" => "listener", 34 | "passphrase" => "secret", 35 | "pbkeylen" => 16 36 | } 37 | } 38 | 39 | assert {:ok, source} = RouteHandler.source_from_record(record) 40 | assert source["type"] == "srtsrc" 41 | assert source["uri"] =~ "srt://127.0.0.1:4201" 42 | assert source["uri"] =~ "mode=listener" 43 | assert source["uri"] =~ "passphrase=secret" 44 | assert source["uri"] =~ "pbkeylen=16" 45 | end 46 | 47 | test "source_from_record with valid UDP schema" do 48 | record = %{ 49 | "schema" => "UDP", 50 | "schema_options" => %{ 51 | "address" => "127.0.0.1", 52 | "port" => 4201, 53 | "buffer-size" => 65536, 54 | "mtu" => 1500 55 | } 56 | } 57 | 58 | assert {:ok, source} = RouteHandler.source_from_record(record) 59 | assert source["type"] == "udpsrc" 60 | assert source["address"] == "127.0.0.1" 61 | assert source["port"] == 4201 62 | assert source["buffer-size"] == 65536 63 | assert source["mtu"] == 1500 64 | end 65 | 66 | test "source_from_record with UDP schema and minimal options" do 67 | record = %{ 68 | "schema" => "UDP", 69 | "schema_options" => %{ 70 | "address" => "127.0.0.1", 71 | "port" => 4201 72 | } 73 | } 74 | 75 | assert {:ok, source} = RouteHandler.source_from_record(record) 76 | assert source["type"] == "udpsrc" 77 | assert source["address"] == "127.0.0.1" 78 | assert source["port"] == 4201 79 | end 80 | 81 | test "source_from_record with invalid schema" do 82 | record = %{ 83 | "schema" => "INVALID", 84 | "schema_options" => %{} 85 | } 86 | 87 | assert {:error, :invalid_source} = RouteHandler.source_from_record(record) 88 | end 89 | 90 | test "source_from_record with missing schema_options" do 91 | record = %{"schema" => "SRT"} 92 | assert {:error, :invalid_source} = RouteHandler.source_from_record(record) 93 | end 94 | 95 | test "route_data_to_params with valid route data" do 96 | route_id = "test_route" 97 | 98 | route = %{ 99 | "schema" => "SRT", 100 | "schema_options" => %{ 101 | "localaddress" => "127.0.0.1", 102 | "localport" => 4201, 103 | "mode" => "listener" 104 | }, 105 | "destinations" => [ 106 | %{ 107 | "schema" => "SRT", 108 | "schema_options" => %{ 109 | "localaddress" => "127.0.0.1", 110 | "localport" => 4202, 111 | "mode" => "listener" 112 | } 113 | } 114 | ] 115 | } 116 | 117 | assert {:ok, params} = RouteHandler.route_data_to_params(route_id) 118 | assert is_map(params) 119 | assert Map.has_key?(params, "source") 120 | assert Map.has_key?(params, "sinks") 121 | assert is_list(params["sinks"]) 122 | end 123 | 124 | test "route_data_to_params with multiple destinations" do 125 | route_id = "test_route" 126 | 127 | route = %{ 128 | "schema" => "SRT", 129 | "schema_options" => %{ 130 | "localaddress" => "127.0.0.1", 131 | "localport" => 4201, 132 | "mode" => "listener" 133 | }, 134 | "destinations" => [ 135 | %{ 136 | "schema" => "SRT", 137 | "schema_options" => %{ 138 | "localaddress" => "127.0.0.1", 139 | "localport" => 4202, 140 | "mode" => "listener" 141 | } 142 | }, 143 | %{ 144 | "schema" => "UDP", 145 | "schema_options" => %{ 146 | "address" => "127.0.0.1", 147 | "port" => 4203 148 | } 149 | } 150 | ] 151 | } 152 | 153 | assert {:ok, params} = RouteHandler.route_data_to_params(route_id) 154 | assert is_map(params) 155 | assert Map.has_key?(params, "source") 156 | assert Map.has_key?(params, "sinks") 157 | assert length(params["sinks"]) == 2 158 | end 159 | 160 | test "callback_mode returns handle_event_function" do 161 | assert RouteHandler.callback_mode() == [:handle_event_function] 162 | end 163 | 164 | test "init sets up initial state" do 165 | args = %{id: "test_route"} 166 | 167 | assert {:ok, :start, %{id: "test_route", port: nil}, {:next_event, :internal, :start}} = 168 | RouteHandler.init(args) 169 | end 170 | 171 | test "init with process flag" do 172 | args = %{id: "test_route"} 173 | assert Process.flag(:trap_exit, true) 174 | 175 | assert {:ok, :start, %{id: "test_route", port: nil}, {:next_event, :internal, :start}} = 176 | RouteHandler.init(args) 177 | end 178 | 179 | test "terminate handles port cleanup" do 180 | state = :started 181 | data = %{port: nil, id: "test_route"} 182 | assert :ok = RouteHandler.terminate(:normal, state, data) 183 | end 184 | 185 | test "terminate with active port" do 186 | state = :started 187 | port = Port.open({:spawn, "echo test"}, [:binary]) 188 | data = %{port: port, id: "test_route"} 189 | assert :ok = RouteHandler.terminate(:normal, state, data) 190 | end 191 | end 192 | -------------------------------------------------------------------------------- /test/hydra_srt/unix_sock_handler_test.exs: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.UnixSockHandlerTest do 2 | use ExUnit.Case 3 | alias HydraSrt.UnixSockHandler 4 | 5 | test "callback_mode returns handle_event_function" do 6 | assert UnixSockHandler.callback_mode() == [:handle_event_function] 7 | end 8 | 9 | test "init returns ignore" do 10 | assert :ignore = UnixSockHandler.init([]) 11 | end 12 | 13 | test "handle_event with route_id message" do 14 | route_id = "test_route" 15 | message = {:tcp, nil, "route_id:" <> route_id} 16 | state = :exchange 17 | data = %{sock: nil, trans: nil, source_stream_id: nil, route_id: nil, route_record: nil} 18 | 19 | assert {:keep_state, new_data} = UnixSockHandler.handle_event(:info, message, state, data) 20 | assert new_data.route_id == route_id 21 | end 22 | 23 | test "handle_event with empty route_id message" do 24 | message = {:tcp, nil, "route_id:"} 25 | state = :exchange 26 | data = %{sock: nil, trans: nil, source_stream_id: nil, route_id: nil, route_record: nil} 27 | 28 | assert {:keep_state, new_data} = UnixSockHandler.handle_event(:info, message, state, data) 29 | assert new_data.route_id == "" 30 | end 31 | 32 | test "handle_event with stats_json message and exportStats true" do 33 | json = ~s({"bytes_sent": 1000, "bytes_received": 2000}) 34 | message = {:tcp, nil, "stats_json:" <> json} 35 | state = :exchange 36 | 37 | data = %{ 38 | sock: nil, 39 | trans: nil, 40 | source_stream_id: "test_stream", 41 | route_id: "test_route", 42 | route_record: %{"exportStats" => true, "name" => "test"} 43 | } 44 | 45 | assert :keep_state_and_data = UnixSockHandler.handle_event(:info, message, state, data) 46 | end 47 | 48 | test "handle_event with invalid stats_json message" do 49 | json = ~s({invalid_json) 50 | message = {:tcp, nil, "stats_json:" <> json} 51 | state = :exchange 52 | 53 | data = %{ 54 | sock: nil, 55 | trans: nil, 56 | source_stream_id: "test_stream", 57 | route_id: "test_route", 58 | route_record: %{"exportStats" => true, "name" => "test"} 59 | } 60 | 61 | assert :keep_state_and_data = UnixSockHandler.handle_event(:info, message, state, data) 62 | end 63 | 64 | test "handle_event with stats_json message and exportStats false" do 65 | json = ~s({"bytes_sent": 1000, "bytes_received": 2000}) 66 | message = {:tcp, nil, "stats_json:" <> json} 67 | state = :exchange 68 | 69 | data = %{ 70 | sock: nil, 71 | trans: nil, 72 | source_stream_id: "test_stream", 73 | route_id: "test_route", 74 | route_record: %{"exportStats" => false} 75 | } 76 | 77 | assert :keep_state_and_data = UnixSockHandler.handle_event(:info, message, state, data) 78 | end 79 | 80 | test "handle_event with stats_json message and missing route_record" do 81 | json = ~s({"bytes_sent": 1000, "bytes_received": 2000}) 82 | message = {:tcp, nil, "stats_json:" <> json} 83 | state = :exchange 84 | 85 | data = %{ 86 | sock: nil, 87 | trans: nil, 88 | source_stream_id: "test_stream", 89 | route_id: "test_route", 90 | route_record: nil 91 | } 92 | 93 | assert :keep_state_and_data = UnixSockHandler.handle_event(:info, message, state, data) 94 | end 95 | 96 | test "handle_event with stats_source_stream_id message" do 97 | stream_id = "test_stream" 98 | message = {:tcp, nil, "stats_source_stream_id:" <> stream_id} 99 | state = :exchange 100 | data = %{sock: nil, trans: nil, source_stream_id: nil, route_id: nil, route_record: nil} 101 | 102 | assert {:keep_state, new_data} = UnixSockHandler.handle_event(:info, message, state, data) 103 | assert new_data.source_stream_id == stream_id 104 | end 105 | 106 | test "handle_event with empty stats_source_stream_id message" do 107 | message = {:tcp, nil, "stats_source_stream_id:"} 108 | state = :exchange 109 | data = %{sock: nil, trans: nil, source_stream_id: nil, route_id: nil, route_record: nil} 110 | 111 | assert {:keep_state, new_data} = UnixSockHandler.handle_event(:info, message, state, data) 112 | assert new_data.source_stream_id == "" 113 | end 114 | 115 | test "handle_event with undefined message" do 116 | message = {:tcp, nil, "undefined_message"} 117 | state = :exchange 118 | data = %{sock: nil, trans: nil, source_stream_id: nil, route_id: nil, route_record: nil} 119 | 120 | assert :keep_state_and_data = UnixSockHandler.handle_event(:info, message, state, data) 121 | end 122 | 123 | test "handle_event with non-tcp message" do 124 | message = {:other, nil, "some_message"} 125 | state = :exchange 126 | data = %{sock: nil, trans: nil, source_stream_id: nil, route_id: nil, route_record: nil} 127 | 128 | assert :keep_state_and_data = UnixSockHandler.handle_event(:info, message, state, data) 129 | end 130 | 131 | test "terminate returns ok" do 132 | assert :ok = UnixSockHandler.terminate(:normal, :exchange, %{}) 133 | end 134 | 135 | test "terminate with different reasons" do 136 | reasons = [:normal, :shutdown, :timeout, {:error, "test"}] 137 | 138 | for reason <- reasons do 139 | assert :ok = UnixSockHandler.terminate(reason, :exchange, %{}) 140 | end 141 | end 142 | 143 | test "stats_to_metrics with nested data" do 144 | stats = %{ 145 | "nested" => [ 146 | %{"value" => 100}, 147 | %{"value" => 200} 148 | ], 149 | "simple" => 300 150 | } 151 | 152 | data = %{ 153 | route_id: "test_route", 154 | route_record: %{"name" => "test"}, 155 | source_stream_id: "test_stream" 156 | } 157 | 158 | UnixSockHandler.stats_to_metrics(stats, data) 159 | end 160 | 161 | test "stats_to_metrics with deeply nested data" do 162 | stats = %{ 163 | "level1" => %{ 164 | "level2" => [ 165 | %{"level3" => %{"value" => 100}}, 166 | %{"level3" => %{"value" => 200}} 167 | ] 168 | }, 169 | "simple" => 300 170 | } 171 | 172 | data = %{ 173 | route_id: "test_route", 174 | route_record: %{"name" => "test"}, 175 | source_stream_id: "test_stream" 176 | } 177 | 178 | UnixSockHandler.stats_to_metrics(stats, data) 179 | end 180 | 181 | test "norm_names normalizes metric names" do 182 | assert "test_metric" = UnixSockHandler.norm_names("test-metric") 183 | assert "test_metric" = UnixSockHandler.norm_names("TEST_METRIC") 184 | assert "test_metric" = UnixSockHandler.norm_names("Test-Metric") 185 | end 186 | 187 | test "norm_names with various special characters" do 188 | assert "test_metric_name" = UnixSockHandler.norm_names("test-metric-name") 189 | assert "test_metric_name" = UnixSockHandler.norm_names("test.metric.name") 190 | assert "test_metric_name" = UnixSockHandler.norm_names("test/metric/name") 191 | assert "test_metric_name" = UnixSockHandler.norm_names("test:metric:name") 192 | end 193 | end 194 | -------------------------------------------------------------------------------- /test/hydra_srt_web/controllers/destination_controller_test.exs: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.DestinationControllerTest do 2 | use HydraSrtWeb.ConnCase 3 | 4 | import HydraSrt.ApiFixtures 5 | 6 | alias HydraSrt.Api.Destination 7 | 8 | @create_attrs %{ 9 | alias: "some alias", 10 | enabled: true, 11 | name: "some name", 12 | status: "some status", 13 | started_at: ~U[2025-02-19 16:24:00Z], 14 | stopped_at: ~U[2025-02-19 16:24:00Z] 15 | } 16 | @update_attrs %{ 17 | alias: "some updated alias", 18 | enabled: false, 19 | name: "some updated name", 20 | status: "some updated status", 21 | started_at: ~U[2025-02-20 16:24:00Z], 22 | stopped_at: ~U[2025-02-20 16:24:00Z] 23 | } 24 | @invalid_attrs %{alias: nil, enabled: nil, name: nil, status: nil, started_at: nil, stopped_at: nil} 25 | 26 | setup %{conn: conn} do 27 | {:ok, conn: put_req_header(conn, "accept", "application/json")} 28 | end 29 | 30 | describe "index" do 31 | test "lists all destinations", %{conn: conn} do 32 | conn = get(conn, ~p"/api/destinations") 33 | assert json_response(conn, 200)["data"] == [] 34 | end 35 | end 36 | 37 | describe "create destination" do 38 | test "renders destination when data is valid", %{conn: conn} do 39 | conn = post(conn, ~p"/api/destinations", destination: @create_attrs) 40 | assert %{"id" => id} = json_response(conn, 201)["data"] 41 | 42 | conn = get(conn, ~p"/api/destinations/#{id}") 43 | 44 | assert %{ 45 | "id" => ^id, 46 | "alias" => "some alias", 47 | "enabled" => true, 48 | "name" => "some name", 49 | "started_at" => "2025-02-19T16:24:00Z", 50 | "status" => "some status", 51 | "stopped_at" => "2025-02-19T16:24:00Z" 52 | } = json_response(conn, 200)["data"] 53 | end 54 | 55 | test "renders errors when data is invalid", %{conn: conn} do 56 | conn = post(conn, ~p"/api/destinations", destination: @invalid_attrs) 57 | assert json_response(conn, 422)["errors"] != %{} 58 | end 59 | end 60 | 61 | describe "update destination" do 62 | setup [:create_destination] 63 | 64 | test "renders destination when data is valid", %{conn: conn, destination: %Destination{id: id} = destination} do 65 | conn = put(conn, ~p"/api/destinations/#{destination}", destination: @update_attrs) 66 | assert %{"id" => ^id} = json_response(conn, 200)["data"] 67 | 68 | conn = get(conn, ~p"/api/destinations/#{id}") 69 | 70 | assert %{ 71 | "id" => ^id, 72 | "alias" => "some updated alias", 73 | "enabled" => false, 74 | "name" => "some updated name", 75 | "started_at" => "2025-02-20T16:24:00Z", 76 | "status" => "some updated status", 77 | "stopped_at" => "2025-02-20T16:24:00Z" 78 | } = json_response(conn, 200)["data"] 79 | end 80 | 81 | test "renders errors when data is invalid", %{conn: conn, destination: destination} do 82 | conn = put(conn, ~p"/api/destinations/#{destination}", destination: @invalid_attrs) 83 | assert json_response(conn, 422)["errors"] != %{} 84 | end 85 | end 86 | 87 | describe "delete destination" do 88 | setup [:create_destination] 89 | 90 | test "deletes chosen destination", %{conn: conn, destination: destination} do 91 | conn = delete(conn, ~p"/api/destinations/#{destination}") 92 | assert response(conn, 204) 93 | 94 | assert_error_sent 404, fn -> 95 | get(conn, ~p"/api/destinations/#{destination}") 96 | end 97 | end 98 | end 99 | 100 | defp create_destination(_) do 101 | destination = destination_fixture() 102 | %{destination: destination} 103 | end 104 | end 105 | -------------------------------------------------------------------------------- /test/hydra_srt_web/controllers/error_json_test.exs: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.ErrorJSONTest do 2 | use HydraSrtWeb.ConnCase, async: true 3 | 4 | test "renders 404" do 5 | assert HydraSrtWeb.ErrorJSON.render("404.json", %{}) == %{errors: %{detail: "Not Found"}} 6 | end 7 | 8 | test "renders 500" do 9 | assert HydraSrtWeb.ErrorJSON.render("500.json", %{}) == 10 | %{errors: %{detail: "Internal Server Error"}} 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /test/hydra_srt_web/controllers/route_controller_test.exs: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.RouteControllerTest do 2 | use HydraSrtWeb.ConnCase 3 | 4 | import HydraSrt.ApiFixtures 5 | 6 | alias HydraSrt.Api.Route 7 | 8 | @create_attrs %{ 9 | alias: "some alias", 10 | enabled: true, 11 | name: "some name", 12 | status: "some status", 13 | started_at: ~U[2025-02-18 14:51:00Z], 14 | source: %{}, 15 | destinations: %{}, 16 | stopped_at: ~U[2025-02-18 14:51:00Z] 17 | } 18 | @update_attrs %{ 19 | alias: "some updated alias", 20 | enabled: false, 21 | name: "some updated name", 22 | status: "some updated status", 23 | started_at: ~U[2025-02-19 14:51:00Z], 24 | source: %{}, 25 | destinations: %{}, 26 | stopped_at: ~U[2025-02-19 14:51:00Z] 27 | } 28 | @invalid_attrs %{alias: nil, enabled: nil, name: nil, status: nil, started_at: nil, source: nil, destinations: nil, stopped_at: nil} 29 | 30 | setup %{conn: conn} do 31 | {:ok, conn: put_req_header(conn, "accept", "application/json")} 32 | end 33 | 34 | describe "index" do 35 | test "lists all routes", %{conn: conn} do 36 | conn = get(conn, ~p"/api/routes") 37 | assert json_response(conn, 200)["data"] == [] 38 | end 39 | end 40 | 41 | describe "create route" do 42 | test "renders route when data is valid", %{conn: conn} do 43 | conn = post(conn, ~p"/api/routes", route: @create_attrs) 44 | assert %{"id" => id} = json_response(conn, 201)["data"] 45 | 46 | conn = get(conn, ~p"/api/routes/#{id}") 47 | 48 | assert %{ 49 | "id" => ^id, 50 | "alias" => "some alias", 51 | "destinations" => %{}, 52 | "enabled" => true, 53 | "name" => "some name", 54 | "source" => %{}, 55 | "started_at" => "2025-02-18T14:51:00Z", 56 | "status" => "some status", 57 | "stopped_at" => "2025-02-18T14:51:00Z" 58 | } = json_response(conn, 200)["data"] 59 | end 60 | 61 | test "renders errors when data is invalid", %{conn: conn} do 62 | conn = post(conn, ~p"/api/routes", route: @invalid_attrs) 63 | assert json_response(conn, 422)["errors"] != %{} 64 | end 65 | end 66 | 67 | describe "update route" do 68 | setup [:create_route] 69 | 70 | test "renders route when data is valid", %{conn: conn, route: %Route{id: id} = route} do 71 | conn = put(conn, ~p"/api/routes/#{route}", route: @update_attrs) 72 | assert %{"id" => ^id} = json_response(conn, 200)["data"] 73 | 74 | conn = get(conn, ~p"/api/routes/#{id}") 75 | 76 | assert %{ 77 | "id" => ^id, 78 | "alias" => "some updated alias", 79 | "destinations" => %{}, 80 | "enabled" => false, 81 | "name" => "some updated name", 82 | "source" => %{}, 83 | "started_at" => "2025-02-19T14:51:00Z", 84 | "status" => "some updated status", 85 | "stopped_at" => "2025-02-19T14:51:00Z" 86 | } = json_response(conn, 200)["data"] 87 | end 88 | 89 | test "renders errors when data is invalid", %{conn: conn, route: route} do 90 | conn = put(conn, ~p"/api/routes/#{route}", route: @invalid_attrs) 91 | assert json_response(conn, 422)["errors"] != %{} 92 | end 93 | end 94 | 95 | describe "delete route" do 96 | setup [:create_route] 97 | 98 | test "deletes chosen route", %{conn: conn, route: route} do 99 | conn = delete(conn, ~p"/api/routes/#{route}") 100 | assert response(conn, 204) 101 | 102 | assert_error_sent 404, fn -> 103 | get(conn, ~p"/api/routes/#{route}") 104 | end 105 | end 106 | end 107 | 108 | defp create_route(_) do 109 | route = route_fixture() 110 | %{route: route} 111 | end 112 | end 113 | -------------------------------------------------------------------------------- /test/support/conn_case.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrtWeb.ConnCase do 2 | @moduledoc """ 3 | This module defines the test case to be used by 4 | tests that require setting up a connection. 5 | 6 | Such tests rely on `Phoenix.ConnTest` and also 7 | import other functionality to make it easier 8 | to build common data structures and query the data layer. 9 | 10 | Finally, if the test case interacts with the database, 11 | we enable the SQL sandbox, so changes done to the database 12 | are reverted at the end of every test. If you are using 13 | PostgreSQL, you can even run database tests asynchronously 14 | by setting `use HydraSrtWeb.ConnCase, async: true`, although 15 | this option is not recommended for other databases. 16 | """ 17 | 18 | use ExUnit.CaseTemplate 19 | 20 | using do 21 | quote do 22 | # The default endpoint for testing 23 | @endpoint HydraSrtWeb.Endpoint 24 | 25 | use HydraSrtWeb, :verified_routes 26 | 27 | # Import conveniences for testing with connections 28 | import Plug.Conn 29 | import Phoenix.ConnTest 30 | import HydraSrtWeb.ConnCase 31 | end 32 | end 33 | 34 | setup tags do 35 | HydraSrt.DataCase.setup_sandbox(tags) 36 | {:ok, conn: Phoenix.ConnTest.build_conn()} 37 | end 38 | end 39 | -------------------------------------------------------------------------------- /test/support/data_case.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.DataCase do 2 | @moduledoc """ 3 | This module defines the setup for tests requiring 4 | access to the application's data layer. 5 | 6 | You may define functions here to be used as helpers in 7 | your tests. 8 | 9 | Finally, if the test case interacts with the database, 10 | we enable the SQL sandbox, so changes done to the database 11 | are reverted at the end of every test. If you are using 12 | PostgreSQL, you can even run database tests asynchronously 13 | by setting `use HydraSrt.DataCase, async: true`, although 14 | this option is not recommended for other databases. 15 | """ 16 | 17 | use ExUnit.CaseTemplate 18 | 19 | using do 20 | quote do 21 | alias HydraSrt.Repo 22 | 23 | import Ecto 24 | import Ecto.Changeset 25 | import Ecto.Query 26 | import HydraSrt.DataCase 27 | end 28 | end 29 | 30 | setup tags do 31 | HydraSrt.DataCase.setup_sandbox(tags) 32 | :ok 33 | end 34 | 35 | @doc """ 36 | Sets up the sandbox based on the test tags. 37 | """ 38 | def setup_sandbox(tags) do 39 | pid = Ecto.Adapters.SQL.Sandbox.start_owner!(HydraSrt.Repo, shared: not tags[:async]) 40 | on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) 41 | end 42 | 43 | @doc """ 44 | A helper that transforms changeset errors into a map of messages. 45 | 46 | assert {:error, changeset} = Accounts.create_user(%{password: "short"}) 47 | assert "password is too short" in errors_on(changeset).password 48 | assert %{password: ["password is too short"]} = errors_on(changeset) 49 | 50 | """ 51 | def errors_on(changeset) do 52 | Ecto.Changeset.traverse_errors(changeset, fn {message, opts} -> 53 | Regex.replace(~r"%{(\w+)}", message, fn _, key -> 54 | opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string() 55 | end) 56 | end) 57 | end 58 | end 59 | -------------------------------------------------------------------------------- /test/support/fixtures/api_fixtures.ex: -------------------------------------------------------------------------------- 1 | defmodule HydraSrt.ApiFixtures do 2 | @moduledoc """ 3 | This module defines test helpers for creating 4 | entities via the `HydraSrt.Api` context. 5 | """ 6 | 7 | @doc """ 8 | Generate a route. 9 | """ 10 | def route_fixture(attrs \\ %{}) do 11 | {:ok, route} = 12 | attrs 13 | |> Enum.into(%{ 14 | alias: "some alias", 15 | destinations: %{}, 16 | enabled: true, 17 | name: "some name", 18 | source: %{}, 19 | started_at: ~U[2025-02-18 14:51:00Z], 20 | status: "some status", 21 | stopped_at: ~U[2025-02-18 14:51:00Z] 22 | }) 23 | |> HydraSrt.Api.create_route() 24 | 25 | route 26 | end 27 | 28 | @doc """ 29 | Generate a destination. 30 | """ 31 | def destination_fixture(attrs \\ %{}) do 32 | {:ok, destination} = 33 | attrs 34 | |> Enum.into(%{ 35 | alias: "some alias", 36 | enabled: true, 37 | name: "some name", 38 | started_at: ~U[2025-02-19 16:24:00Z], 39 | status: "some status", 40 | stopped_at: ~U[2025-02-19 16:24:00Z] 41 | }) 42 | |> HydraSrt.Api.create_destination() 43 | 44 | destination 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | ExUnit.start() 2 | Ecto.Adapters.SQL.Sandbox.mode(HydraSrt.Repo, :manual) 3 | -------------------------------------------------------------------------------- /web_app/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | -------------------------------------------------------------------------------- /web_app/README.md: -------------------------------------------------------------------------------- 1 | # React + Vite 2 | 3 | This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules. 4 | 5 | Currently, two official plugins are available: 6 | 7 | - [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react/README.md) uses [Babel](https://babeljs.io/) for Fast Refresh 8 | - [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh 9 | -------------------------------------------------------------------------------- /web_app/eslint.config.js: -------------------------------------------------------------------------------- 1 | import js from '@eslint/js' 2 | import globals from 'globals' 3 | import react from 'eslint-plugin-react' 4 | import reactHooks from 'eslint-plugin-react-hooks' 5 | import reactRefresh from 'eslint-plugin-react-refresh' 6 | 7 | export default [ 8 | { ignores: ['dist'] }, 9 | { 10 | files: ['**/*.{js,jsx}'], 11 | languageOptions: { 12 | ecmaVersion: 2020, 13 | globals: globals.browser, 14 | parserOptions: { 15 | ecmaVersion: 'latest', 16 | ecmaFeatures: { jsx: true }, 17 | sourceType: 'module', 18 | }, 19 | }, 20 | settings: { react: { version: '18.3' } }, 21 | plugins: { 22 | react, 23 | 'react-hooks': reactHooks, 24 | 'react-refresh': reactRefresh, 25 | }, 26 | rules: { 27 | ...js.configs.recommended.rules, 28 | ...react.configs.recommended.rules, 29 | ...react.configs['jsx-runtime'].rules, 30 | ...reactHooks.configs.recommended.rules, 31 | 'react/jsx-no-target-blank': 'off', 32 | 'react-refresh/only-export-components': [ 33 | 'warn', 34 | { allowConstantExport: true }, 35 | ], 36 | }, 37 | }, 38 | ] 39 | -------------------------------------------------------------------------------- /web_app/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | HydraSRT 9 | 10 | 11 |
12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /web_app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "admin", 3 | "private": true, 4 | "version": "0.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "dev": "vite", 8 | "build": "vite build", 9 | "lint": "eslint .", 10 | "preview": "vite preview" 11 | }, 12 | "dependencies": { 13 | "antd": "^5.24.1", 14 | "react": "^19.0.0", 15 | "react-dom": "^19.0.0", 16 | "react-router": "^7.1.5", 17 | "react-router-dom": "^7.1.5" 18 | }, 19 | "devDependencies": { 20 | "@eslint/js": "^9.19.0", 21 | "@types/react": "^19.0.8", 22 | "@types/react-dom": "^19.0.3", 23 | "@vitejs/plugin-react": "^4.3.4", 24 | "eslint": "^9.19.0", 25 | "eslint-plugin-react": "^7.37.4", 26 | "eslint-plugin-react-hooks": "^5.0.0", 27 | "eslint-plugin-react-refresh": "^0.4.18", 28 | "globals": "^15.14.0", 29 | "vite": "^6.1.0" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /web_app/public/logo.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abc3/hydra-srt/5b5b32527dd24a91e7778ac8c7f8c8fadc8dcc8d/web_app/public/logo.webp -------------------------------------------------------------------------------- /web_app/public/logo2.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abc3/hydra-srt/5b5b32527dd24a91e7778ac8c7f8c8fadc8dcc8d/web_app/public/logo2.webp -------------------------------------------------------------------------------- /web_app/public/vite.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /web_app/src/App.css: -------------------------------------------------------------------------------- 1 | #root { 2 | max-width: 1280px; 3 | margin: 0 auto; 4 | padding: 2rem; 5 | text-align: center; 6 | } 7 | 8 | .logo { 9 | height: 6em; 10 | padding: 1.5em; 11 | will-change: filter; 12 | transition: filter 300ms; 13 | } 14 | .logo:hover { 15 | filter: drop-shadow(0 0 2em #646cffaa); 16 | } 17 | .logo.react:hover { 18 | filter: drop-shadow(0 0 2em #61dafbaa); 19 | } 20 | 21 | @keyframes logo-spin { 22 | from { 23 | transform: rotate(0deg); 24 | } 25 | to { 26 | transform: rotate(360deg); 27 | } 28 | } 29 | 30 | @media (prefers-reduced-motion: no-preference) { 31 | a:nth-of-type(2) .logo { 32 | animation: logo-spin infinite 20s linear; 33 | } 34 | } 35 | 36 | .card { 37 | padding: 2em; 38 | } 39 | 40 | .read-the-docs { 41 | color: #888; 42 | } 43 | -------------------------------------------------------------------------------- /web_app/src/App.jsx: -------------------------------------------------------------------------------- 1 | import React from 'react'; 2 | import { Button } from 'antd'; 3 | 4 | const App = () => ( 5 |
6 | 7 |
8 | ); 9 | 10 | export default App; -------------------------------------------------------------------------------- /web_app/src/assets/react.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /web_app/src/index.css: -------------------------------------------------------------------------------- 1 | :root { 2 | font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; 3 | line-height: 1.5; 4 | font-weight: 400; 5 | 6 | color-scheme: light dark; 7 | color: rgba(255, 255, 255, 0.87); 8 | background-color: #242424; 9 | 10 | font-synthesis: none; 11 | text-rendering: optimizeLegibility; 12 | -webkit-font-smoothing: antialiased; 13 | -moz-osx-font-smoothing: grayscale; 14 | } 15 | 16 | a { 17 | font-weight: 500; 18 | color: rgb(64, 123, 255); 19 | text-decoration: none; 20 | transition: color 0.3s; 21 | } 22 | 23 | a:hover { 24 | color: rgb(89, 143, 255); 25 | text-decoration: underline; 26 | } 27 | 28 | body { 29 | margin: 0; 30 | padding: 0; 31 | font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif; 32 | } 33 | 34 | h1 { 35 | font-size: 3.2em; 36 | line-height: 1.1; 37 | } 38 | 39 | button { 40 | border-radius: 8px; 41 | border: 1px solid transparent; 42 | padding: 0.6em 1.2em; 43 | font-size: 1em; 44 | font-weight: 500; 45 | font-family: inherit; 46 | background-color: #1a1a1a; 47 | cursor: pointer; 48 | transition: border-color 0.25s; 49 | } 50 | button:hover { 51 | border-color: #646cff; 52 | } 53 | button:focus, 54 | button:focus-visible { 55 | outline: 4px auto -webkit-focus-ring-color; 56 | } 57 | 58 | @media (prefers-color-scheme: light) { 59 | :root { 60 | color: #213547; 61 | background-color: #ffffff; 62 | } 63 | a:hover { 64 | color: #747bff; 65 | } 66 | button { 67 | background-color: #f9f9f9; 68 | } 69 | } 70 | 71 | .logo { 72 | height: 32px; 73 | margin: 16px; 74 | background: rgba(255, 255, 255, 0.2); 75 | display: flex; 76 | align-items: center; 77 | justify-content: center; 78 | color: white; 79 | font-size: 18px; 80 | font-weight: bold; 81 | } 82 | 83 | .site-layout-content { 84 | min-height: 280px; 85 | padding: 24px; 86 | background: #fff; 87 | } 88 | 89 | .ant-layout { 90 | min-height: 100vh; 91 | } 92 | 93 | .ant-layout-sider { 94 | box-shadow: 2px 0 6px rgba(0,21,41,.35); 95 | } 96 | 97 | .header-container { 98 | padding: 0; 99 | background: #fff; 100 | box-shadow: 0 2px 8px #f0f1f2; 101 | } 102 | 103 | .content-container { 104 | margin: 24px 16px; 105 | padding: 24px; 106 | background: #fff; 107 | border-radius: 4px; 108 | } 109 | 110 | /* Breadcrumb styling */ 111 | .ant-breadcrumb ol { 112 | display: flex; 113 | align-items: center; 114 | } 115 | 116 | .ant-breadcrumb a { 117 | color: rgba(255, 255, 255, 0.65); 118 | transition: color 0.3s; 119 | font-size: 14px; 120 | } 121 | 122 | .ant-breadcrumb a:hover { 123 | color: #1677ff; 124 | } 125 | 126 | .ant-breadcrumb-separator { 127 | color: rgba(255, 255, 255, 0.45); 128 | margin: 0 8px; 129 | } 130 | 131 | /* Style for the last breadcrumb item (current page) */ 132 | .ant-breadcrumb span:last-child { 133 | color: rgba(255, 255, 255, 0.85); 134 | font-weight: 500; 135 | cursor: default; /* Show default cursor instead of pointer */ 136 | pointer-events: none; /* Make it non-clickable */ 137 | } 138 | 139 | .ant-breadcrumb .anticon { 140 | font-size: 14px; 141 | margin-right: 0; 142 | vertical-align: -0.125em; 143 | } 144 | 145 | /* Style for loading indicator in breadcrumb */ 146 | .ant-breadcrumb .anticon-loading { 147 | animation: loadingCircle 1s infinite linear; 148 | } 149 | 150 | @keyframes loadingCircle { 151 | 100% { 152 | transform: rotate(360deg); 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /web_app/src/main.jsx: -------------------------------------------------------------------------------- 1 | import ReactDOM from 'react-dom/client'; 2 | import { ConfigProvider, theme } from 'antd'; 3 | import { StrictMode, useEffect, useState } from 'react'; 4 | import { HashRouter, Routes, Route, Navigate, useLocation } from 'react-router-dom'; 5 | import MainLayout from './components/MainLayout'; 6 | import Dashboard from './pages/Dashboard'; 7 | import R from './pages/routes/Routes'; 8 | import Settings from './pages/Settings'; 9 | import RouteItem from './pages/routes/RouteItem'; 10 | import RouteSourceEdit from './pages/routes/RouteSourceEdit'; 11 | import RouteDestEdit from './pages/routes/RouteDestEdit'; 12 | import SystemPipelines from './pages/system/SystemPipelines'; 13 | import SystemNodes from './pages/system/SystemNodes'; 14 | import Login from './pages/Login'; 15 | import { isAuthenticated } from './utils/auth'; 16 | import { ROUTES } from './utils/constants'; 17 | import './index.css'; 18 | 19 | const config = { 20 | algorithm: [theme.darkAlgorithm], 21 | token: { 22 | colorPrimary: '#1677ff', 23 | borderRadius: 6, 24 | colorBgContainer: '#121212', 25 | colorBgElevated: '#1a1a1a', 26 | colorBgLayout: '#000000', 27 | colorText: 'rgba(255, 255, 255, 0.85)', 28 | colorTextSecondary: 'rgba(255, 255, 255, 0.45)', 29 | controlHeight: 36, 30 | boxShadow: '0 1px 2px rgba(0, 0, 0, 0.3)', 31 | }, 32 | components: { 33 | Menu: { 34 | itemBg: 'transparent', 35 | itemColor: 'rgba(255, 255, 255, 0.65)', 36 | itemSelectedColor: '#fff', 37 | itemSelectedBg: '#1677ff', 38 | itemHoverColor: '#fff', 39 | itemHoverBg: 'rgba(255, 255, 255, 0.08)', 40 | itemMarginInline: 8, 41 | itemBorderRadius: 4, 42 | }, 43 | Button: { 44 | controlHeight: 36, 45 | borderRadius: 4, 46 | }, 47 | Card: { 48 | colorBgContainer: '#1a1a1a', 49 | }, 50 | Layout: { 51 | headerBg: '#000000', 52 | siderBg: '#000000', 53 | }, 54 | Table: { 55 | colorBgContainer: '#121212', 56 | headerBg: '#121212', 57 | headerColor: 'rgba(255, 255, 255, 0.85)', 58 | headerSortActiveBg: '#1a1a1a', 59 | rowHoverBg: '#1a1a1a', 60 | borderColor: '#303030', 61 | } 62 | } 63 | }; 64 | 65 | // Protected route component 66 | const ProtectedRoute = ({ children }) => { 67 | const location = useLocation(); 68 | 69 | if (!isAuthenticated()) { 70 | // Redirect to login if not authenticated 71 | return ; 72 | } 73 | 74 | return children; 75 | }; 76 | 77 | // App component with authentication logic 78 | const App = () => { 79 | const [isLoading, setIsLoading] = useState(true); 80 | 81 | useEffect(() => { 82 | // Check if user is authenticated 83 | setIsLoading(false); 84 | }, []); 85 | 86 | if (isLoading) { 87 | return null; // Or a loading spinner 88 | } 89 | 90 | return ( 91 | 92 | 93 | } /> 94 | 95 | 97 | 98 | 99 | 100 | 101 | } /> 102 | 103 | 105 | 106 | 107 | 108 | 109 | } /> 110 | 111 | 113 | 114 | 115 | 116 | 117 | } /> 118 | 119 | 121 | 122 | 123 | 124 | 125 | } /> 126 | 127 | 129 | 130 | 131 | 132 | 133 | } /> 134 | 135 | 137 | 138 | 139 | 140 | 141 | } /> 142 | 143 | 145 | 146 | 147 | 148 | 149 | } /> 150 | 151 | 153 | 154 | 155 | 156 | 157 | } /> 158 | 159 | 160 | ); 161 | }; 162 | 163 | ReactDOM.createRoot(document.getElementById('root')).render( 164 | 165 | 166 | 167 | 168 | , 169 | ); 170 | -------------------------------------------------------------------------------- /web_app/src/pages/Dashboard.jsx: -------------------------------------------------------------------------------- 1 | import { Typography, Card, Row, Col, Statistic, Progress } from 'antd'; 2 | import { UserOutlined, ClockCircleOutlined, CheckCircleOutlined, HomeOutlined, DesktopOutlined, AreaChartOutlined, LoadingOutlined } from '@ant-design/icons'; 3 | import { useEffect, useState } from 'react'; 4 | import React from 'react'; 5 | import { nodesApi } from '../utils/api'; 6 | 7 | const { Title } = Typography; 8 | 9 | const Dashboard = () => { 10 | const [nodeStats, setNodeStats] = useState({ 11 | cpu: null, 12 | ram: null, 13 | swap: null, 14 | la: 'N/A / N/A / N/A' 15 | }); 16 | const [loading, setLoading] = useState(true); 17 | 18 | // Set breadcrumb items for the Dashboard page 19 | useEffect(() => { 20 | if (window.setBreadcrumbItems) { 21 | window.breadcrumbSet = true; 22 | window.setBreadcrumbItems([ 23 | { 24 | href: '/', 25 | title: , 26 | } 27 | ]); 28 | } 29 | }, []); 30 | 31 | // Fetch node stats for the current node 32 | useEffect(() => { 33 | const fetchNodeStats = async () => { 34 | try { 35 | setLoading(true); 36 | const data = await nodesApi.getAll(); 37 | // Find the self node 38 | const selfNode = data.find(node => node.status === 'self'); 39 | if (selfNode) { 40 | setNodeStats(selfNode); 41 | } 42 | } catch (error) { 43 | console.error('Error fetching node stats:', error); 44 | } finally { 45 | setLoading(false); 46 | } 47 | }; 48 | 49 | fetchNodeStats(); 50 | // Set up auto-refresh every 30 seconds 51 | const intervalId = setInterval(fetchNodeStats, 30000); 52 | 53 | // Clean up interval on component unmount 54 | return () => clearInterval(intervalId); 55 | }, []); 56 | 57 | const getProgressColor = (value) => { 58 | if (value === null || value === undefined) return '#ccc'; 59 | if (value > 80) return '#ff4d4f'; 60 | if (value > 50) return '#faad14'; 61 | return '#52c41a'; 62 | }; 63 | 64 | return ( 65 |
66 | Dashboard (DEMO VIEW) 67 | 68 | 69 | 70 | 71 | } 75 | /> 76 | 77 | 78 | 79 | 80 | } 84 | /> 85 | 86 | 87 | 88 | 89 | } 93 | suffix="/ 100" 94 | /> 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 |
103 |
CPU Usage
104 |
{nodeStats.cpu !== null ? `${Math.round(nodeStats.cpu)}%` : 'N/A'}
105 |
106 | {/* (nodeStats.cpu !== null ? `${percent}%` : 'N/A')} 112 | /> */} 113 |
114 |
115 |
116 | 117 | 118 | 119 |
120 |
RAM Usage
121 |
{nodeStats.ram !== null ? `${Math.round(nodeStats.ram)}%` : 'N/A'}
122 |
123 | {/* (nodeStats.ram !== null ? `${percent}%` : 'N/A')} 129 | /> */} 130 |
131 |
132 |
133 | 134 | 135 | 136 |
137 |
SWAP Usage
138 |
{nodeStats.swap !== null ? `${Math.round(nodeStats.swap)}%` : 'N/A'}
139 |
140 | {/* (nodeStats.swap !== null ? `${percent}%` : 'N/A')} 146 | /> */} 147 |
148 |
149 |
150 | 151 | 152 | 153 |
154 |
System Load
155 |
{nodeStats.la}
156 |
157 |
158 | 159 |
160 | 161 | 162 | 163 | 164 |

User login from 192.168.1.1

165 |

System update completed

166 |

New user registered

167 |
168 | 169 | 170 | 171 |

Server Status: Online

172 |

Last Backup: 2 hours ago

173 |

System Load: Normal

174 |
175 | 176 |
177 |
178 | ); 179 | }; 180 | 181 | export default Dashboard; -------------------------------------------------------------------------------- /web_app/src/pages/Login.jsx: -------------------------------------------------------------------------------- 1 | import React, { useState, useEffect } from 'react'; 2 | import { useNavigate, useLocation } from 'react-router-dom'; 3 | import { 4 | Button, 5 | Checkbox, 6 | Form, 7 | Grid, 8 | Input, 9 | theme, 10 | Typography, 11 | Card, 12 | Space, 13 | message 14 | } from 'antd'; 15 | import { 16 | LockOutlined, 17 | UserOutlined, 18 | LoginOutlined 19 | } from '@ant-design/icons'; 20 | import { login, isAuthenticated } from '../utils/auth'; 21 | import { ROUTES } from '../utils/constants'; 22 | 23 | const { useToken } = theme; 24 | const { useBreakpoint } = Grid; 25 | const { Title, Text, Link } = Typography; 26 | 27 | const Login = () => { 28 | const { token } = useToken(); 29 | const screens = useBreakpoint(); 30 | const navigate = useNavigate(); 31 | const location = useLocation(); 32 | const [loading, setLoading] = useState(false); 33 | 34 | // Redirect if already authenticated 35 | useEffect(() => { 36 | if (isAuthenticated()) { 37 | const from = location.state?.from?.pathname || ROUTES.DASHBOARD; 38 | navigate(from, { replace: true }); 39 | } 40 | }, [navigate, location]); 41 | 42 | const onFinish = async (values) => { 43 | try { 44 | setLoading(true); 45 | 46 | // Call the login function from auth.js 47 | await login(values.username, values.password); 48 | 49 | message.success('Login successful!'); 50 | 51 | // Redirect to the page the user was trying to access, or to the dashboard 52 | const from = location.state?.from?.pathname || ROUTES.DASHBOARD; 53 | navigate(from, { replace: true }); 54 | } catch (error) { 55 | console.error('Login error:', error); 56 | message.error('Invalid username or password'); 57 | } finally { 58 | setLoading(false); 59 | } 60 | }; 61 | 62 | const styles = { 63 | container: { 64 | display: 'flex', 65 | justifyContent: 'center', 66 | alignItems: 'center', 67 | minHeight: '100vh', 68 | background: token.colorBgLayout, 69 | padding: screens.md ? `${token.paddingXL}px` : `${token.padding}px`, 70 | }, 71 | card: { 72 | width: screens.sm ? '400px' : '100%', 73 | maxWidth: '400px', 74 | borderRadius: token.borderRadiusLG, 75 | boxShadow: token.boxShadow, 76 | }, 77 | header: { 78 | marginBottom: token.marginLG, 79 | textAlign: 'center', 80 | }, 81 | logo: { 82 | fontSize: '32px', 83 | color: token.colorPrimary, 84 | marginBottom: token.marginSM, 85 | }, 86 | form: { 87 | width: '100%', 88 | }, 89 | footer: { 90 | marginTop: token.marginLG, 91 | textAlign: 'center', 92 | }, 93 | forgotPassword: { 94 | float: 'right', 95 | }, 96 | }; 97 | 98 | return ( 99 |
100 | 101 |
102 |
103 | 104 |
105 | Welcome to HydraSRT 106 | 107 | Please sign in to access your account 108 | 109 |
110 | 111 |
119 | 123 | } 125 | placeholder="Username" 126 | autoComplete="username" 127 | /> 128 | 129 | 130 | 134 | } 136 | placeholder="Password" 137 | autoComplete="current-password" 138 | /> 139 | 140 | 141 | 142 | 143 | 144 | Remember me 145 | 146 | 147 | Forgot password? 148 | 149 | 150 | 151 | 152 | 153 | 161 | 162 | 163 |
164 | 165 | Don't have an account? Contact administrator 166 | 167 |
168 |
169 |
170 |
171 | ); 172 | }; 173 | 174 | export default Login; -------------------------------------------------------------------------------- /web_app/src/pages/routes/Routes.jsx: -------------------------------------------------------------------------------- 1 | import { useEffect, useState } from 'react'; 2 | import { Table, Card, Button, Tag, Space, Typography, message, Modal } from 'antd'; 3 | import { PlusOutlined, EditOutlined, DeleteOutlined, ExclamationCircleFilled, CaretRightOutlined, StopOutlined, HomeOutlined } from '@ant-design/icons'; 4 | import { useNavigate } from 'react-router-dom'; 5 | import { routesApi } from '../../utils/api'; 6 | 7 | const { Title } = Typography; 8 | 9 | const Routes = () => { 10 | const [routes, setRoutes] = useState([]); 11 | const [loading, setLoading] = useState(true); 12 | const [messageApi, contextHolder] = message.useMessage(); 13 | const [modal, modalContextHolder] = Modal.useModal(); 14 | const navigate = useNavigate(); 15 | 16 | // Set breadcrumb items for the Routes page 17 | useEffect(() => { 18 | if (window.setBreadcrumbItems) { 19 | window.breadcrumbSet = true; 20 | window.setBreadcrumbItems([ 21 | { 22 | href: '/', 23 | title: , 24 | }, 25 | { 26 | href: '/routes', 27 | title: 'Routes', 28 | } 29 | ]); 30 | } 31 | }, []); 32 | 33 | useEffect(() => { 34 | fetchRoutes(); 35 | }, []); 36 | 37 | const fetchRoutes = async () => { 38 | try { 39 | setLoading(true); 40 | const result = await routesApi.getAll(); 41 | setRoutes(result.data); 42 | } catch (error) { 43 | messageApi.error(`Failed to fetch routes: ${error.message}`); 44 | console.error('Error:', error); 45 | } finally { 46 | setLoading(false); 47 | } 48 | }; 49 | 50 | const showDeleteConfirm = (record) => { 51 | modal.confirm({ 52 | title: 'Are you sure you want to delete this route?', 53 | icon: , 54 | content: `Route: ${record.name}`, 55 | okText: 'Yes, delete', 56 | okType: 'danger', 57 | cancelText: 'No, cancel', 58 | onOk() { 59 | return handleDelete(record.id); 60 | }, 61 | }); 62 | }; 63 | 64 | const handleDelete = async (id) => { 65 | try { 66 | await routesApi.delete(id); 67 | messageApi.success('Route deleted successfully'); 68 | fetchRoutes(); // Refresh the list 69 | } catch (error) { 70 | messageApi.error(`Failed to delete route: ${error.message}`); 71 | console.error('Error:', error); 72 | } 73 | }; 74 | 75 | const handleRouteStatus = async (id, action) => { 76 | try { 77 | const result = action === 'start' 78 | ? await routesApi.start(id) 79 | : await routesApi.stop(id); 80 | 81 | // Update the specific route in the routes array 82 | setRoutes(routes.map(route => 83 | route.id === id ? { ...route, status: result.data.status } : route 84 | )); 85 | 86 | messageApi.success(`Route ${action}ed successfully`); 87 | } catch (error) { 88 | messageApi.error(`Failed to ${action} route: ${error.message}`); 89 | console.error('Error:', error); 90 | } 91 | }; 92 | 93 | const columns = [ 94 | { 95 | title: 'Name', 96 | dataIndex: 'name', 97 | key: 'name', 98 | render: (text, record) => { 99 | return ( 100 | 101 | 102 | {text} 103 | 104 | 105 | ) 106 | }, 107 | }, 108 | { 109 | title: 'Enabled', 110 | dataIndex: 'enabled', 111 | key: 'enabled', 112 | render: (schema) => ( 113 | 114 | {schema ? 'yes' : 'no'} 115 | 116 | ), 117 | }, 118 | { 119 | title: 'Status', 120 | dataIndex: 'status', 121 | key: 'status', 122 | }, 123 | { 124 | title: 'Authentication', 125 | key: 'authentication', 126 | // filters: [ 127 | // { text: 'Enabled', value: true }, 128 | // { text: 'Disabled', value: false } 129 | // ], 130 | // onFilter: (value, record) => { 131 | // if (record.schema !== 'SRT') return !value; 132 | // return (record.schema_options && record.schema_options.authentication) === value; 133 | // }, 134 | render: (_, record) => { 135 | if (record.schema !== 'SRT') return N/A; 136 | return record.schema_options && record.schema_options.authentication ? ( 137 | yes 138 | ) : ( 139 | no 140 | ); 141 | }, 142 | }, 143 | { 144 | title: 'Input', 145 | dataIndex: 'input', 146 | key: 'input', 147 | render: (text, record) => { 148 | switch (record.schema) { 149 | case 'SRT': 150 | return (`${record.schema}:${record.srtMode}:${record?.schema_options?.localport}`); 151 | default: 152 | return ('Unknown'); 153 | } 154 | } 155 | 156 | }, 157 | { 158 | title: 'Last Updated', 159 | dataIndex: 'updated_at', 160 | key: 'updated_at', 161 | render: (date) => new Date(date).toLocaleString(), 162 | }, 163 | { 164 | title: 'Actions', 165 | key: 'actions', 166 | render: (_, record) => ( 167 | 168 | 175 | 182 | 190 | 191 | ), 192 | }, 193 | ]; 194 | 195 | return ( 196 |
197 | {contextHolder} 198 | {modalContextHolder} 199 | 200 | 201 | Routes 202 | 203 | 210 | 211 | 212 | 213 | 214 | `Total ${total} routes`, 223 | }} 224 | /> 225 | 226 | 227 | 228 | ); 229 | }; 230 | 231 | export default Routes; -------------------------------------------------------------------------------- /web_app/src/pages/system/SystemNodes.jsx: -------------------------------------------------------------------------------- 1 | import { useEffect, useState } from 'react'; 2 | import { Table, Card, Button, Space, Typography, message, Tooltip, Tag, Progress } from 'antd'; 3 | import { ReloadOutlined, HomeOutlined } from '@ant-design/icons'; 4 | import { nodesApi } from '../../utils/api'; 5 | import { ROUTES } from '../../utils/constants'; 6 | 7 | const { Title } = Typography; 8 | 9 | const SystemNodes = () => { 10 | const [nodes, setNodes] = useState([]); 11 | const [loading, setLoading] = useState(true); 12 | const [messageApi, contextHolder] = message.useMessage(); 13 | 14 | // Set breadcrumb items for the System Nodes page 15 | useEffect(() => { 16 | if (window.setBreadcrumbItems) { 17 | window.breadcrumbSet = true; 18 | window.setBreadcrumbItems([ 19 | { 20 | href: ROUTES.DASHBOARD, 21 | title: , 22 | }, 23 | { 24 | href: ROUTES.SYSTEM_NODES, 25 | title: 'Nodes List', 26 | } 27 | ]); 28 | } 29 | }, []); 30 | 31 | useEffect(() => { 32 | fetchNodes(); 33 | // Set up auto-refresh every 5 seconds 34 | const intervalId = setInterval(fetchNodes, 5000); 35 | 36 | // Clean up interval on component unmount 37 | return () => clearInterval(intervalId); 38 | }, []); 39 | 40 | const fetchNodes = async () => { 41 | try { 42 | setLoading(true); 43 | const data = await nodesApi.getAll(); 44 | setNodes(data); 45 | } catch (error) { 46 | messageApi.error(`Failed to fetch nodes: ${error.message}`); 47 | console.error('Error:', error); 48 | } finally { 49 | setLoading(false); 50 | } 51 | }; 52 | 53 | const getStatusColor = (status) => { 54 | switch (status) { 55 | case 'up': 56 | return 'success'; 57 | case 'self': 58 | return 'processing'; 59 | case 'down': 60 | return 'error'; 61 | default: 62 | return 'default'; 63 | } 64 | }; 65 | 66 | const getStatusText = (status) => { 67 | switch (status) { 68 | case 'up': 69 | return 'Up'; 70 | case 'self': 71 | return 'Self'; 72 | case 'down': 73 | return 'Down'; 74 | default: 75 | return 'Unknown'; 76 | } 77 | }; 78 | 79 | const getProgressColor = (value) => { 80 | if (value === null || value === undefined) return '#ccc'; 81 | if (value > 80) return '#ff4d4f'; 82 | if (value > 50) return '#faad14'; 83 | return '#52c41a'; 84 | }; 85 | 86 | const columns = [ 87 | { 88 | title: 'Host', 89 | dataIndex: 'host', 90 | key: 'host', 91 | render: (text) => {text}, 92 | }, 93 | { 94 | title: 'CPU', 95 | dataIndex: 'cpu', 96 | key: 'cpu', 97 | render: (value) => { 98 | if (value === null || value === undefined) return 'N/A'; 99 | return ( 100 | `${percent}%`} 106 | /> 107 | ); 108 | }, 109 | sorter: (a, b) => { 110 | if (a.cpu === null && b.cpu === null) return 0; 111 | if (a.cpu === null) return -1; 112 | if (b.cpu === null) return 1; 113 | return a.cpu - b.cpu; 114 | }, 115 | }, 116 | { 117 | title: 'RAM', 118 | dataIndex: 'ram', 119 | key: 'ram', 120 | render: (value) => { 121 | if (value === null || value === undefined) return 'N/A'; 122 | return ( 123 | `${percent}%`} 129 | /> 130 | ); 131 | }, 132 | sorter: (a, b) => { 133 | if (a.ram === null && b.ram === null) return 0; 134 | if (a.ram === null) return -1; 135 | if (b.ram === null) return 1; 136 | return a.ram - b.ram; 137 | }, 138 | }, 139 | { 140 | title: 'SWAP', 141 | dataIndex: 'swap', 142 | key: 'swap', 143 | render: (value) => { 144 | if (value === null || value === undefined) return 'N/A'; 145 | return ( 146 | `${percent}%`} 152 | /> 153 | ); 154 | }, 155 | sorter: (a, b) => { 156 | if (a.swap === null && b.swap === null) return 0; 157 | if (a.swap === null) return -1; 158 | if (b.swap === null) return 1; 159 | return a.swap - b.swap; 160 | }, 161 | }, 162 | { 163 | title: 'LA', 164 | dataIndex: 'la', 165 | key: 'la', 166 | }, 167 | { 168 | title: 'Status', 169 | dataIndex: 'status', 170 | key: 'status', 171 | render: (status) => ( 172 | 173 | {getStatusText(status)} 174 | 175 | ), 176 | filters: [ 177 | { text: 'Self', value: 'self' }, 178 | { text: 'Up', value: 'up' }, 179 | { text: 'Down', value: 'down' }, 180 | ], 181 | onFilter: (value, record) => record.status === value, 182 | }, 183 | ]; 184 | 185 | return ( 186 |
187 | {contextHolder} 188 | 189 | 190 | Nodes List 191 | 198 | 199 | 200 | 201 |
208 | 209 | 210 | 211 | ); 212 | }; 213 | 214 | export default SystemNodes; -------------------------------------------------------------------------------- /web_app/src/pages/system/SystemPipelines.jsx: -------------------------------------------------------------------------------- 1 | import { useEffect, useState } from 'react'; 2 | import { Table, Card, Button, Space, Typography, message, Modal, Tooltip, Tag } from 'antd'; 3 | import { ReloadOutlined, StopOutlined, ExclamationCircleFilled, HomeOutlined } from '@ant-design/icons'; 4 | import { systemPipelinesApi } from '../../utils/api'; 5 | import { ROUTES } from '../../utils/constants'; 6 | 7 | const { Title } = Typography; 8 | 9 | const SystemPipelines = () => { 10 | const [pipelines, setPipelines] = useState([]); 11 | const [loading, setLoading] = useState(true); 12 | const [messageApi, contextHolder] = message.useMessage(); 13 | const [modal, modalContextHolder] = Modal.useModal(); 14 | 15 | // Set breadcrumb items for the System Pipelines page 16 | useEffect(() => { 17 | if (window.setBreadcrumbItems) { 18 | window.breadcrumbSet = true; 19 | window.setBreadcrumbItems([ 20 | { 21 | href: ROUTES.DASHBOARD, 22 | title: , 23 | }, 24 | { 25 | href: ROUTES.SYSTEM_PIPELINES, 26 | title: 'System Pipelines', 27 | } 28 | ]); 29 | } 30 | }, []); 31 | 32 | useEffect(() => { 33 | fetchPipelines(); 34 | // Set up auto-refresh every 5 seconds 35 | const intervalId = setInterval(fetchPipelines, 5000); 36 | 37 | // Clean up interval on component unmount 38 | return () => clearInterval(intervalId); 39 | }, []); 40 | 41 | const fetchPipelines = async () => { 42 | try { 43 | setLoading(true); 44 | const data = await systemPipelinesApi.getAll(); 45 | setPipelines(data); 46 | } catch (error) { 47 | messageApi.error(`Failed to fetch pipeline processes: ${error.message}`); 48 | console.error('Error:', error); 49 | } finally { 50 | setLoading(false); 51 | } 52 | }; 53 | 54 | const showKillConfirm = (record) => { 55 | modal.confirm({ 56 | title: 'Are you sure you want to force kill this pipeline process?', 57 | icon: , 58 | content: `PID: ${record.pid}, Command: ${record.command}`, 59 | okText: 'Yes, kill', 60 | okType: 'danger', 61 | cancelText: 'No, cancel', 62 | onOk() { 63 | return handleKill(record.pid); 64 | }, 65 | }); 66 | }; 67 | 68 | const handleKill = async (pid) => { 69 | try { 70 | await systemPipelinesApi.kill(pid); 71 | messageApi.success('Pipeline process killed successfully'); 72 | fetchPipelines(); // Refresh the list 73 | } catch (error) { 74 | messageApi.error(`Failed to kill process: ${error.message}`); 75 | console.error('Error:', error); 76 | } 77 | }; 78 | 79 | const formatBytes = (bytes, decimals = 2) => { 80 | if (bytes === 0) return '0 Bytes'; 81 | 82 | const sizes = ['B', 'KB', 'MB', 'GB', 'TB']; 83 | const i = Math.floor(Math.log(bytes) / Math.log(1024)); 84 | return parseFloat((bytes / Math.pow(1024, i)).toFixed(decimals)) + ' ' + sizes[i]; 85 | }; 86 | 87 | const columns = [ 88 | { 89 | title: 'PID', 90 | dataIndex: 'pid', 91 | key: 'pid', 92 | sorter: (a, b) => a.pid - b.pid, 93 | }, 94 | { 95 | title: 'CPU', 96 | dataIndex: 'cpu', 97 | key: 'cpu', 98 | sorter: (a, b) => parseFloat(a.cpu) - parseFloat(b.cpu), 99 | render: (text) => { 100 | const value = parseFloat(text); 101 | let color = 'green'; 102 | if (value > 50) color = 'orange'; 103 | if (value > 80) color = 'red'; 104 | return {text}; 105 | } 106 | }, 107 | { 108 | title: 'Memory', 109 | dataIndex: 'memory', 110 | key: 'memory', 111 | render: (_, record) => ( 112 | 113 | {record.memory} 114 | 115 | ), 116 | sorter: (a, b) => a.memory_bytes - b.memory_bytes, 117 | }, 118 | { 119 | title: 'Swap', 120 | key: 'swap', 121 | render: (_, record) => ( 122 | 123 | {formatBytes(record.swap_bytes)} ({record.swap_percent}) 124 | 125 | ), 126 | sorter: (a, b) => a.swap_bytes - b.swap_bytes, 127 | }, 128 | { 129 | title: 'User', 130 | dataIndex: 'user', 131 | key: 'user', 132 | }, 133 | { 134 | title: 'Start Time', 135 | dataIndex: 'start_time', 136 | key: 'start_time', 137 | }, 138 | { 139 | title: 'Command', 140 | dataIndex: 'command', 141 | key: 'command', 142 | ellipsis: true, 143 | render: (text) => ( 144 | 145 | {text} 146 | 147 | ), 148 | }, 149 | { 150 | title: 'Actions', 151 | key: 'actions', 152 | render: (_, record) => ( 153 | 161 | ), 162 | }, 163 | ]; 164 | 165 | const expandedRowRender = (record) => { 166 | const items = [ 167 | { label: 'PID', value: record.pid }, 168 | { label: 'CPU Usage', value: record.cpu }, 169 | { label: 'Memory Usage', value: `${record.memory} (${record.memory_percent})` }, 170 | { label: 'Memory in Bytes', value: record.memory_bytes.toLocaleString() }, 171 | { label: 'Swap Usage', value: `${formatBytes(record.swap_bytes)} (${record.swap_percent})` }, 172 | { label: 'Swap in Bytes', value: record.swap_bytes.toLocaleString() }, 173 | { label: 'User', value: record.user }, 174 | { label: 'Start Time', value: record.start_time }, 175 | { label: 'Command', value: record.command }, 176 | ]; 177 | 178 | if (record.virtual_memory) { 179 | items.push({ label: 'Virtual Memory', value: record.virtual_memory }); 180 | items.push({ label: 'Resident Memory', value: record.resident_memory }); 181 | items.push({ label: 'CPU Time', value: record.cpu_time }); 182 | items.push({ label: 'Process State', value: record.state }); 183 | items.push({ label: 'Parent PID', value: record.ppid }); 184 | } 185 | 186 | return ( 187 | 188 |
189 | {items.map((item, index) => ( 190 |
191 | {item.label}: {item.value} 192 |
193 | ))} 194 |
195 |
196 | ); 197 | }; 198 | 199 | return ( 200 |
201 | {contextHolder} 202 | {modalContextHolder} 203 | 204 | 205 | System Pipelines 206 | 213 | 214 | 215 | 216 |
`Total ${total} pipeline processes`, 229 | }} 230 | /> 231 | 232 | 233 | 234 | ); 235 | }; 236 | 237 | export default SystemPipelines; -------------------------------------------------------------------------------- /web_app/src/utils/api.js: -------------------------------------------------------------------------------- 1 | /** 2 | * API service for making authenticated requests to the backend 3 | */ 4 | import { authFetch } from './auth'; 5 | import { API_BASE_URL } from './constants'; 6 | 7 | // System Pipelines API 8 | export const systemPipelinesApi = { 9 | // Get all pipeline processes 10 | getAll: async () => { 11 | const response = await authFetch('/api/system/pipelines'); 12 | return response.json(); 13 | }, 14 | 15 | // Get detailed pipeline information 16 | getDetailed: async () => { 17 | const response = await authFetch('/api/system/pipelines/detailed'); 18 | return response.json(); 19 | }, 20 | 21 | // Kill a pipeline process 22 | kill: async (pid) => { 23 | const response = await authFetch(`/api/system/pipelines/${pid}/kill`, { 24 | method: 'POST', 25 | }); 26 | return response.json(); 27 | }, 28 | }; 29 | 30 | // Nodes API 31 | export const nodesApi = { 32 | // Get all nodes 33 | getAll: async () => { 34 | const response = await authFetch('/api/nodes'); 35 | return response.json(); 36 | }, 37 | 38 | // Get a single node by ID 39 | getById: async (id) => { 40 | const response = await authFetch(`/api/nodes/${id}`); 41 | return response.json(); 42 | }, 43 | }; 44 | 45 | // Routes API 46 | export const routesApi = { 47 | // Get all routes 48 | getAll: async () => { 49 | const response = await authFetch('/api/routes'); 50 | return response.json(); 51 | }, 52 | 53 | // Get a single route by ID 54 | getById: async (id) => { 55 | const response = await authFetch(`/api/routes/${id}`); 56 | return response.json(); 57 | }, 58 | 59 | // Create a new route 60 | create: async (routeData) => { 61 | const response = await authFetch('/api/routes', { 62 | method: 'POST', 63 | body: JSON.stringify({ route: routeData }), 64 | }); 65 | return response.json(); 66 | }, 67 | 68 | // Update a route 69 | update: async (id, routeData) => { 70 | const response = await authFetch(`/api/routes/${id}`, { 71 | method: 'PUT', 72 | body: JSON.stringify({ route: routeData }), 73 | }); 74 | return response.json(); 75 | }, 76 | 77 | // Delete a route 78 | delete: async (id) => { 79 | const response = await authFetch(`/api/routes/${id}`, { 80 | method: 'DELETE', 81 | }); 82 | // Check if response has content before parsing as JSON 83 | const contentType = response.headers.get("content-type"); 84 | if (contentType && contentType.includes("application/json")) { 85 | return response.json(); 86 | } 87 | return { success: true }; 88 | }, 89 | 90 | // Start a route 91 | start: async (id) => { 92 | const response = await authFetch(`/api/routes/${id}/start`); 93 | return response.json(); 94 | }, 95 | 96 | // Stop a route 97 | stop: async (id) => { 98 | const response = await authFetch(`/api/routes/${id}/stop`); 99 | return response.json(); 100 | }, 101 | 102 | // Restart a route 103 | restart: async (id) => { 104 | const response = await authFetch(`/api/routes/${id}/restart`); 105 | return response.json(); 106 | }, 107 | }; 108 | 109 | export const backupApi = { 110 | export: async () => { 111 | const response = await authFetch('/api/backup/export'); 112 | return response.json(); 113 | }, 114 | 115 | getDownloadLink: async () => { 116 | const response = await authFetch('/api/backup/create-download-link'); 117 | return response.json(); 118 | }, 119 | 120 | getBackupDownloadLink: async () => { 121 | const response = await authFetch('/api/backup/create-backup-download-link'); 122 | return response.json(); 123 | }, 124 | 125 | download: async () => { 126 | try { 127 | const { download_link } = await backupApi.getDownloadLink(); 128 | 129 | window.open(`${API_BASE_URL}${download_link}`, '_blank'); 130 | return true; 131 | } catch (error) { 132 | console.error('Error downloading backup:', error); 133 | throw error; 134 | } 135 | }, 136 | 137 | downloadBackup: async () => { 138 | try { 139 | const { download_link } = await backupApi.getBackupDownloadLink(); 140 | 141 | window.open(`${API_BASE_URL}${download_link}`, '_blank'); 142 | return true; 143 | } catch (error) { 144 | console.error('Error downloading backup:', error); 145 | throw error; 146 | } 147 | }, 148 | 149 | restore: async (file) => { 150 | try { 151 | // Read the file as an ArrayBuffer 152 | const arrayBuffer = await file.arrayBuffer(); 153 | 154 | // Convert ArrayBuffer to Blob with the correct MIME type 155 | const blob = new Blob([arrayBuffer], { type: 'application/octet-stream' }); 156 | 157 | console.log('Sending file as binary data with Content-Type: application/octet-stream'); 158 | const response = await authFetch('/api/restore', { 159 | method: 'POST', 160 | headers: { 161 | 'Content-Type': 'application/octet-stream', 162 | }, 163 | body: blob, 164 | }); 165 | 166 | if (!response.ok) { 167 | const errorData = await response.json(); 168 | throw new Error(errorData.error || 'Failed to restore backup'); 169 | } 170 | 171 | return response.json(); 172 | } catch (error) { 173 | console.error('Error in restore API call:', error); 174 | throw error; 175 | } 176 | }, 177 | }; 178 | 179 | // Destinations API 180 | export const destinationsApi = { 181 | // Get all destinations for a route 182 | getAll: async (routeId) => { 183 | const response = await authFetch(`/api/routes/${routeId}/destinations`); 184 | return response.json(); 185 | }, 186 | 187 | // Get a single destination by ID 188 | getById: async (routeId, destId) => { 189 | const response = await authFetch(`/api/routes/${routeId}/destinations/${destId}`); 190 | return response.json(); 191 | }, 192 | 193 | // Create a new destination 194 | create: async (routeId, destData) => { 195 | const response = await authFetch(`/api/routes/${routeId}/destinations`, { 196 | method: 'POST', 197 | body: JSON.stringify({ destination: destData }), 198 | }); 199 | return response.json(); 200 | }, 201 | 202 | // Update a destination 203 | update: async (routeId, destId, destData) => { 204 | const response = await authFetch(`/api/routes/${routeId}/destinations/${destId}`, { 205 | method: 'PUT', 206 | body: JSON.stringify({ destination: destData }), 207 | }); 208 | return response.json(); 209 | }, 210 | 211 | // Delete a destination 212 | delete: async (routeId, destId) => { 213 | const response = await authFetch(`/api/routes/${routeId}/destinations/${destId}`, { 214 | method: 'DELETE', 215 | }); 216 | // Check if response has content before parsing as JSON 217 | const contentType = response.headers.get("content-type"); 218 | if (contentType && contentType.includes("application/json")) { 219 | return response.json(); 220 | } 221 | return { success: true }; 222 | }, 223 | }; -------------------------------------------------------------------------------- /web_app/src/utils/auth.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Authentication utility functions 3 | */ 4 | import { API_BASE_URL, AUTH_TOKEN_KEY, AUTH_USER_KEY } from './constants'; 5 | 6 | // Get the authentication token from localStorage 7 | export const getToken = () => { 8 | return localStorage.getItem(AUTH_TOKEN_KEY); 9 | }; 10 | 11 | // Set the authentication token in localStorage 12 | export const setToken = (token) => { 13 | localStorage.setItem(AUTH_TOKEN_KEY, token); 14 | }; 15 | 16 | // Remove the authentication token from localStorage 17 | export const removeToken = () => { 18 | localStorage.removeItem(AUTH_TOKEN_KEY); 19 | }; 20 | 21 | // Get the user from localStorage 22 | export const getUser = () => { 23 | const userStr = localStorage.getItem(AUTH_USER_KEY); 24 | return userStr ? JSON.parse(userStr) : null; 25 | }; 26 | 27 | // Set the user in localStorage 28 | export const setUser = (user) => { 29 | localStorage.setItem(AUTH_USER_KEY, JSON.stringify(user)); 30 | }; 31 | 32 | // Remove the user from localStorage 33 | export const removeUser = () => { 34 | localStorage.removeItem(AUTH_USER_KEY); 35 | }; 36 | 37 | // Check if the user is authenticated 38 | export const isAuthenticated = () => { 39 | return !!getToken(); 40 | }; 41 | 42 | // Add the authentication token to API requests 43 | export const authHeader = () => { 44 | const token = getToken(); 45 | return token ? { 'Authorization': `Bearer ${token}` } : {}; 46 | }; 47 | 48 | // Create an authenticated fetch function 49 | export const authFetch = async (url, options = {}) => { 50 | const headers = { 51 | ...authHeader(), 52 | ...options.headers, 53 | }; 54 | 55 | // Only set Content-Type to application/json if: 56 | // 1. The body is not FormData 57 | // 2. Content-Type is not already set in options.headers 58 | if (!(options.body instanceof FormData) && !options.headers?.['Content-Type']) { 59 | headers['Content-Type'] = 'application/json'; 60 | } 61 | 62 | const config = { 63 | ...options, 64 | headers, 65 | }; 66 | 67 | try { 68 | // Prepend API_BASE_URL if the URL doesn't already include it 69 | const fullUrl = url.startsWith('http') ? url : `${API_BASE_URL}${url}`; 70 | const response = await fetch(fullUrl, config); 71 | 72 | // If 401 Unauthorized or 403 Forbidden, clear token and redirect to login 73 | if (response.status === 401 || response.status === 403) { 74 | removeToken(); 75 | removeUser(); 76 | window.location.href = '/#/login'; 77 | return Promise.reject('Authentication error'); 78 | } 79 | 80 | return response; 81 | } catch (error) { 82 | return Promise.reject(error); 83 | } 84 | }; 85 | 86 | // Login function 87 | export const login = async (username, password) => { 88 | try { 89 | const response = await fetch(`${API_BASE_URL}/api/login`, { 90 | method: 'POST', 91 | headers: { 92 | 'Content-Type': 'application/json', 93 | }, 94 | body: JSON.stringify({ 95 | login: { 96 | user: username, 97 | password: password 98 | } 99 | }), 100 | }); 101 | 102 | if (!response.ok) { 103 | throw new Error('Login failed'); 104 | } 105 | 106 | const data = await response.json(); 107 | setToken(data.token); 108 | setUser(data.user); 109 | return data; 110 | } catch (error) { 111 | console.error('Login error:', error); 112 | throw error; 113 | } 114 | }; 115 | 116 | // Logout function 117 | export const logout = () => { 118 | removeToken(); 119 | removeUser(); 120 | window.location.href = '/#/login'; 121 | }; -------------------------------------------------------------------------------- /web_app/src/utils/constants.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Application constants 3 | */ 4 | 5 | // API base URL 6 | export const API_BASE_URL = 'http://127.0.0.1:4000'; 7 | 8 | // Authentication 9 | export const AUTH_TOKEN_KEY = 'token'; 10 | export const AUTH_USER_KEY = 'user'; 11 | 12 | // Routes 13 | export const ROUTES = { 14 | LOGIN: '/login', 15 | DASHBOARD: '/', 16 | ROUTES: '/routes', 17 | SETTINGS: '/settings', 18 | SYSTEM_PIPELINES: '/system/pipelines', 19 | SYSTEM_NODES: '/system/nodes', 20 | }; -------------------------------------------------------------------------------- /web_app/vite.config.js: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'vite' 2 | import react from '@vitejs/plugin-react' 3 | 4 | // https://vite.dev/config/ 5 | export default defineConfig({ 6 | plugins: [react()], 7 | }) 8 | --------------------------------------------------------------------------------