├── priv ├── zstd_dictionary ├── static │ ├── favicon.ico │ └── robots.txt └── repo │ └── migrations │ ├── 20250910005347_add_index_to_identity.exs │ ├── 20241129150013_add_world_ids.exs │ ├── 20241126020227_add_initial_tables.exs │ └── 20241214185646_add_world_labels.exs ├── .formatter.exs ├── lib ├── vrhose │ ├── timeliner_storage.ex │ ├── data.ex │ ├── repo_vacuum.ex │ ├── repo.ex │ ├── world.ex │ ├── quick_leader.ex │ ├── tinycron.ex │ ├── repo_base.ex │ ├── vrchat_world.ex │ ├── ring_buffer.zig │ ├── identity.ex │ ├── hydrator.ex │ ├── application.ex │ ├── websocket.ex │ ├── timeliner_storage.zig │ ├── timeliner.ex │ └── ingestor.ex ├── vrhose.ex ├── vrhose_web │ ├── controllers │ │ ├── error_json.ex │ │ └── main.ex │ ├── router.ex │ ├── endpoint.ex │ └── telemetry.ex └── vrhose_web.ex ├── test ├── test_helper.exs ├── vrhose_web │ └── controllers │ │ └── error_json_test.exs ├── support │ └── conn_case.ex └── vrhose │ └── post_flags_test.exs ├── config ├── prod.exs ├── test.exs ├── config.exs ├── dev.exs └── runtime.exs ├── README.md ├── .gitignore ├── test_stress.py ├── test.py ├── test.sh ├── mix.exs └── mix.lock /priv/zstd_dictionary: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lun-4/vrhose/HEAD/priv/zstd_dictionary -------------------------------------------------------------------------------- /priv/static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lun-4/vrhose/HEAD/priv/static/favicon.ico -------------------------------------------------------------------------------- /.formatter.exs: -------------------------------------------------------------------------------- 1 | [ 2 | import_deps: [:phoenix], 3 | inputs: ["*.{ex,exs}", "{config,lib,test}/**/*.{ex,exs}"] 4 | ] 5 | -------------------------------------------------------------------------------- /lib/vrhose/timeliner_storage.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.TimelinerStorage do 2 | use Zig, otp_app: :vrhose, zig_code_path: "timeliner_storage.zig" 3 | end 4 | -------------------------------------------------------------------------------- /lib/vrhose/data.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Data do 2 | def generate_unix_timestamp do 3 | NaiveDateTime.utc_now() 4 | |> NaiveDateTime.truncate(:second) 5 | end 6 | end 7 | -------------------------------------------------------------------------------- /test/test_helper.exs: -------------------------------------------------------------------------------- 1 | Code.put_compiler_option(:warnings_as_errors, true) 2 | ExUnit.start() 3 | 4 | # for repo <- 5 | # Application.fetch_env!(:vrhose, :ecto_repos) do 6 | # Ecto.Adapters.SQL.Sandbox.mode(repo, :auto) 7 | # end 8 | -------------------------------------------------------------------------------- /config/prod.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | # Do not print debug messages in production 4 | config :logger, level: :info 5 | 6 | # Runtime production configuration, including reading 7 | # of environment variables, is done on config/runtime.exs. 8 | -------------------------------------------------------------------------------- /priv/static/robots.txt: -------------------------------------------------------------------------------- 1 | # See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file 2 | # 3 | # To ban all spiders from the entire site uncomment the next two lines: 4 | # User-agent: * 5 | # Disallow: / 6 | -------------------------------------------------------------------------------- /priv/repo/migrations/20250910005347_add_index_to_identity.exs: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Repo.Migrations.AddIndexToIdentity do 2 | use Ecto.Migration 3 | 4 | def change do 5 | create(index(:identity, ["unixepoch(inserted_at)"])) 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /lib/vrhose.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose do 2 | @moduledoc """ 3 | VRHose keeps the contexts that define your domain 4 | and business logic. 5 | 6 | Contexts are also responsible for managing your data, regardless 7 | if it comes from the database, an external API or others. 8 | """ 9 | end 10 | -------------------------------------------------------------------------------- /priv/repo/migrations/20241129150013_add_world_ids.exs: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Repo.Migrations.AddWorlds do 2 | use Ecto.Migration 3 | 4 | def change do 5 | create table(:worlds) do 6 | add(:vrchat_id, :text, null: false) 7 | add(:poster_did, :text, null: false) 8 | timestamps() 9 | end 10 | 11 | create(index(:worlds, ["unixepoch(inserted_at)"])) 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /test/vrhose_web/controllers/error_json_test.exs: -------------------------------------------------------------------------------- 1 | defmodule VRHoseWeb.ErrorJSONTest do 2 | use VRHoseWeb.ConnCase, async: true 3 | 4 | test "renders 404" do 5 | assert VRHoseWeb.ErrorJSON.render("404.json", %{}) == %{errors: %{detail: "Not Found"}} 6 | end 7 | 8 | test "renders 500" do 9 | assert VRHoseWeb.ErrorJSON.render("500.json", %{}) == 10 | %{errors: %{detail: "Internal Server Error"}} 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # VRHose 2 | 3 | backend for Bluesky Firehose VR, available at https://vrchat.com/home/launch?worldId=wrld_52865286-5286-5286-5286-528652865286 4 | 5 | more information here!!! https://bsky.app/profile/natalie.ee/post/3ldcxzpmaxs2e 6 | 7 | ``` 8 | git clone ... 9 | cd vrhose 10 | env MIX_ENV=prod mix deps.get 11 | env MIX_ENV=prod mix zig.get 12 | env MIX_ENV=prod mix compile 13 | 14 | # and, for prod 15 | env MIX_ENV=prod mix phx.server 16 | ``` 17 | -------------------------------------------------------------------------------- /priv/repo/migrations/20241126020227_add_initial_tables.exs: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Repo.Migrations.AddInitialTables do 2 | use Ecto.Migration 3 | 4 | def change do 5 | create table(:identity) do 6 | add(:did, :string) 7 | add(:also_known_as, :string, null: false) 8 | add(:atproto_pds_endpoint, :string, null: false) 9 | add(:name, :string) 10 | timestamps() 11 | end 12 | 13 | create(unique_index(:identity, [:did])) 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /lib/vrhose/repo_vacuum.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Repo.Janitor do 2 | @moduledoc """ 3 | Run incremental vacuums on the database 4 | """ 5 | require Logger 6 | 7 | @page_count 20 8 | 9 | def tick() do 10 | Logger.info("running vacuum at #{@page_count} pages for repos...") 11 | 12 | VRHose.Application.primaries() 13 | |> Enum.map(fn repo -> 14 | repo.query!("PRAGMA incremental_vacuum(#{@page_count});") 15 | end) 16 | 17 | Logger.info("done!") 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /priv/repo/migrations/20241214185646_add_world_labels.exs: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Repo.Migrations.AddWorldLabels do 2 | use Ecto.Migration 3 | 4 | def change do 5 | create table(:vrchat_worlds) do 6 | add(:wrld_id, :text, null: false) 7 | add(:name, :text, null: false) 8 | add(:author_id, :text, null: false) 9 | add(:tags, :text, null: false) 10 | add(:capacity, :integer, null: false) 11 | add(:description, :text, null: false) 12 | timestamps() 13 | end 14 | 15 | create(unique_index(:vrchat_worlds, [:wrld_id])) 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /config/test.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | # We don't run a server during test. If one is required, 4 | # you can enable the server option below. 5 | config :vrhose, VRHoseWeb.Endpoint, 6 | http: [ip: {127, 0, 0, 1}, port: 4002], 7 | secret_key_base: "QrZ1l8Qns/Zzs56F+5T+RKPNwD1sn4Wx8H8J5Jl701ShIdPgicXgxL+4SEdscdQu", 8 | server: false 9 | 10 | # Print only warnings and errors during test 11 | config :logger, level: :warning 12 | 13 | # Initialize plugs at runtime for faster test compilation 14 | config :phoenix, :plug_init_mode, :runtime 15 | 16 | config :vrhose, VRHose.Repo, 17 | pool_size: 1, 18 | queue_target: 10000, 19 | queue_timeout: 10000 20 | 21 | # pool: Ecto.Adapters.SQL.Sandbox 22 | -------------------------------------------------------------------------------- /lib/vrhose_web/controllers/error_json.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHoseWeb.ErrorJSON do 2 | @moduledoc """ 3 | This module is invoked by your endpoint in case of errors on JSON requests. 4 | 5 | See config/config.exs. 6 | """ 7 | 8 | # If you want to customize a particular status code, 9 | # you may add your own clauses, such as: 10 | # 11 | # def render("500.json", _assigns) do 12 | # %{errors: %{detail: "Internal Server Error"}} 13 | # end 14 | 15 | # By default, Phoenix returns the status message from 16 | # the template name. For example, "404.json" becomes 17 | # "Not Found". 18 | def render(template, _assigns) do 19 | %{errors: %{detail: Phoenix.Controller.status_message_from_template(template)}} 20 | end 21 | end 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # The directory Mix will write compiled artifacts to. 2 | /_build/ 3 | 4 | # If you run "mix test --cover", coverage assets end up here. 5 | /cover/ 6 | 7 | # The directory Mix downloads your dependencies sources to. 8 | /deps/ 9 | 10 | # Where 3rd-party dependencies like ExDoc output generated docs. 11 | /doc/ 12 | 13 | # Ignore .fetch files in case you like to edit your project deps locally. 14 | /.fetch 15 | 16 | # If the VM crashes, it generates a dump, let's ignore it too. 17 | erl_crash.dump 18 | 19 | # Also ignore archive artifacts (built via "mix archive.build"). 20 | *.ez 21 | 22 | # Temporary files, for example, from tests. 23 | /tmp/ 24 | 25 | # Ignore package tarball (built via "mix hex.build"). 26 | vrhose-*.tar 27 | 28 | # zigler 29 | .Elixir*.zig 30 | priv/lib 31 | *.db* 32 | 33 | .envrc 34 | -------------------------------------------------------------------------------- /lib/vrhose/repo.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Repo do 2 | use VRHose.Repo.Base, 3 | primary: VRHose.Repo, 4 | read_replicas: [ 5 | VRHose.Repo.Replica1, 6 | VRHose.Repo.Replica2, 7 | VRHose.Repo.Replica3, 8 | VRHose.Repo.Replica4 9 | ], 10 | dedicated_replicas: [ 11 | VRHose.Repo.JanitorReplica 12 | ] 13 | 14 | # use Ecto.Repo, 15 | # otp_app: :vrhose, 16 | # adapter: Ecto.Adapters.SQLite3, 17 | # pool_size: 1, 18 | # loggers: [VRHose.Repo.Instrumenter, Ecto.LogEntry] 19 | 20 | defmodule Instrumenter do 21 | use Prometheus.EctoInstrumenter 22 | 23 | def label_value(:repo, log_entry) do 24 | log_entry[:repo] |> to_string 25 | end 26 | 27 | def label_value(:query, log_entry) do 28 | log_entry[:query] 29 | end 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /lib/vrhose_web/router.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHoseWeb.Router do 2 | use VRHoseWeb, :router 3 | 4 | pipeline :api do 5 | plug :accepts, ["json"] 6 | end 7 | 8 | scope "/api/v1", VRHoseWeb do 9 | pipe_through :api 10 | 11 | get "/hi", MainController, :hi 12 | get "/s/:timestamp", MainController, :fetch_delta 13 | end 14 | 15 | # Enable LiveDashboard in development 16 | if Application.compile_env(:vrhose, :dev_routes) do 17 | # If you want to use the LiveDashboard in production, you should put 18 | # it behind authentication and allow only admins to access it. 19 | # If your application does not have an admins-only section yet, 20 | # you can use Plug.BasicAuth to set up some basic authentication 21 | # as long as you are also using SSL (which you should anyway). 22 | import Phoenix.LiveDashboard.Router 23 | 24 | scope "/dev" do 25 | pipe_through [:fetch_session, :protect_from_forgery] 26 | 27 | live_dashboard "/dashboard", metrics: VRHoseWeb.Telemetry 28 | end 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /lib/vrhose/world.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.World do 2 | use Ecto.Schema 3 | import Ecto.Query 4 | import Ecto.Changeset 5 | alias VRHose.Repo 6 | 7 | @type t :: %__MODULE__{} 8 | 9 | schema "worlds" do 10 | field(:vrchat_id, :string) 11 | field(:poster_did, :string) 12 | timestamps(autogenerate: {VRHose.Data, :generate_unix_timestamp, []}) 13 | end 14 | 15 | def changeset(%__MODULE__{} = identity, params) do 16 | identity 17 | |> cast(params, [:vrchat_id, :poster_did]) 18 | |> validate_required([:vrchat_id, :poster_did]) 19 | end 20 | 21 | def last_worlds(count \\ 10) do 22 | query = 23 | from(s in __MODULE__, 24 | select: s, 25 | order_by: [desc: fragment("unixepoch(?)", s.inserted_at)], 26 | limit: ^count 27 | ) 28 | 29 | Repo.all(query) 30 | end 31 | 32 | def insert(world_id, poster_did) do 33 | %__MODULE__{} 34 | |> changeset(%{ 35 | vrchat_id: world_id, 36 | poster_did: poster_did 37 | }) 38 | |> Repo.insert() 39 | end 40 | end 41 | -------------------------------------------------------------------------------- /test_stress.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import requests 4 | import time 5 | import threading 6 | from concurrent.futures import ThreadPoolExecutor 7 | import queue 8 | 9 | 10 | def make_request(host): 11 | try: 12 | resp_initial = requests.get(f"{host}/api/v1/hi") 13 | assert resp_initial.status_code == 200 14 | resp_initial.json() 15 | except Exception as e: 16 | print(f"Error in thread: {e}") 17 | return None 18 | 19 | 20 | def main(): 21 | host = os.environ.get("HOST") 22 | host = host or "http://localhost:4000" 23 | 24 | num_threads = 100 25 | 26 | while True: 27 | with ThreadPoolExecutor(max_workers=num_threads) as executor: 28 | # Create a list of 10 identical tasks 29 | futures = [executor.submit(make_request, host) for _ in range(num_threads)] 30 | 31 | # Wait for all requests to complete 32 | for future in futures: 33 | future.result() 34 | 35 | # Optional: Add a small delay between batches 36 | print("tick", time.time()) 37 | time.sleep(0.001) 38 | 39 | 40 | if __name__ == "__main__": 41 | main() 42 | -------------------------------------------------------------------------------- /test/support/conn_case.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHoseWeb.ConnCase do 2 | @moduledoc """ 3 | This module defines the test case to be used by 4 | tests that require setting up a connection. 5 | 6 | Such tests rely on `Phoenix.ConnTest` and also 7 | import other functionality to make it easier 8 | to build common data structures and query the data layer. 9 | 10 | Finally, if the test case interacts with the database, 11 | we enable the SQL sandbox, so changes done to the database 12 | are reverted at the end of every test. If you are using 13 | PostgreSQL, you can even run database tests asynchronously 14 | by setting `use VRHoseWeb.ConnCase, async: true`, although 15 | this option is not recommended for other databases. 16 | """ 17 | 18 | use ExUnit.CaseTemplate 19 | 20 | using do 21 | quote do 22 | # The default endpoint for testing 23 | @endpoint VRHoseWeb.Endpoint 24 | 25 | use VRHoseWeb, :verified_routes 26 | 27 | # Import conveniences for testing with connections 28 | import Plug.Conn 29 | import Phoenix.ConnTest 30 | import VRHoseWeb.ConnCase 31 | end 32 | end 33 | 34 | setup _tags do 35 | {:ok, conn: Phoenix.ConnTest.build_conn()} 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import requests 4 | import time 5 | 6 | 7 | def print_rates(rates): 8 | print("per second:") 9 | for field in rates: 10 | data = rates[field] 11 | print( 12 | "\t", 13 | field, 14 | "\t\t", 15 | round(data["rate"], 2), 16 | "\t", 17 | "(inexact!)" if data["inexact"] else "(ok)", 18 | ) 19 | 20 | 21 | host = os.environ.get("HOST") 22 | host = host or "http://localhost:4000" 23 | 24 | resp_initial = requests.get(f"{host}/api/v1/hi") 25 | assert resp_initial.status_code == 200 26 | rjson = resp_initial.json() 27 | posts = rjson["batch"] 28 | print_rates(rjson["rates"]) 29 | first = True 30 | while True: 31 | print("have", len(posts)) 32 | post = posts[-1] 33 | post_timestamp = int(post["d"]) % 1000 34 | print("requesting at timestamp", post_timestamp) 35 | resp_delta = requests.get(f"{host}/api/v1/s/{post_timestamp}") 36 | djson = resp_delta.json() 37 | delta_posts = djson["batch"] 38 | if first: 39 | assert len(delta_posts) <= len(posts) 40 | first = False 41 | print_rates(djson["rates"]) 42 | print("got", len(delta_posts), "delta posts, sleeping...") 43 | posts = delta_posts 44 | time.sleep(2) 45 | -------------------------------------------------------------------------------- /lib/vrhose/quick_leader.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.QuickLeader do 2 | require Logger 3 | use GenServer 4 | 5 | def start_link(opts \\ []) do 6 | GenServer.start_link(__MODULE__, opts, opts) 7 | end 8 | 9 | def acquire() do 10 | GenServer.call(VRHose.QuickLeader, :acquire) 11 | end 12 | 13 | def init(_opts) do 14 | {:ok, 15 | %{ 16 | leader: nil, 17 | leader_monitor: nil 18 | }} 19 | end 20 | 21 | def handle_call(:acquire, from_ref, state) do 22 | {from, _} = from_ref 23 | 24 | if state.leader == nil do 25 | Logger.info("quick leader: #{inspect(from)} is now leader") 26 | monitor_ref = Process.monitor(from) 27 | 28 | state = put_in(state.leader, from) 29 | state = put_in(state.leader_monitor, monitor_ref) 30 | 31 | {:reply, :leader, state} 32 | else 33 | if from == state.leader do 34 | {:reply, :leader, state} 35 | else 36 | {:reply, :not_leader, state} 37 | end 38 | end 39 | end 40 | 41 | def handle_info({:DOWN, monitor_ref, :process, _pid, reason}, state) do 42 | if monitor_ref == state.leader_monitor do 43 | Logger.warning( 44 | "LEADER DOWN message on monitor_ref #{inspect(monitor_ref)} reason=#{inspect(reason)}" 45 | ) 46 | 47 | true = Process.demonitor(monitor_ref, [:flush]) 48 | state = put_in(state.leader, nil) 49 | {:noreply, state} 50 | else 51 | Logger.warning( 52 | "unknown DOWN message on monitor_ref #{inspect(monitor_ref)} reason=#{inspect(reason)}" 53 | ) 54 | end 55 | end 56 | end 57 | -------------------------------------------------------------------------------- /lib/vrhose_web.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHoseWeb do 2 | @moduledoc """ 3 | The entrypoint for defining your web interface, such 4 | as controllers, components, channels, and so on. 5 | 6 | This can be used in your application as: 7 | 8 | use VRHoseWeb, :controller 9 | use VRHoseWeb, :html 10 | 11 | The definitions below will be executed for every controller, 12 | component, etc, so keep them short and clean, focused 13 | on imports, uses and aliases. 14 | 15 | Do NOT define functions inside the quoted expressions 16 | below. Instead, define additional modules and import 17 | those modules here. 18 | """ 19 | 20 | def static_paths, do: ~w(assets fonts images favicon.ico robots.txt) 21 | 22 | def router do 23 | quote do 24 | use Phoenix.Router, helpers: false 25 | 26 | # Import common connection and controller functions to use in pipelines 27 | import Plug.Conn 28 | import Phoenix.Controller 29 | end 30 | end 31 | 32 | def channel do 33 | quote do 34 | use Phoenix.Channel 35 | end 36 | end 37 | 38 | def controller do 39 | quote do 40 | use Phoenix.Controller, 41 | formats: [:html, :json], 42 | layouts: [html: VRHoseWeb.Layouts] 43 | 44 | import Plug.Conn 45 | 46 | unquote(verified_routes()) 47 | end 48 | end 49 | 50 | def verified_routes do 51 | quote do 52 | use Phoenix.VerifiedRoutes, 53 | endpoint: VRHoseWeb.Endpoint, 54 | router: VRHoseWeb.Router, 55 | statics: VRHoseWeb.static_paths() 56 | end 57 | end 58 | 59 | @doc """ 60 | When used, dispatch to the appropriate controller/live_view/etc. 61 | """ 62 | defmacro __using__(which) when is_atom(which) do 63 | apply(__MODULE__, which, []) 64 | end 65 | end 66 | -------------------------------------------------------------------------------- /lib/vrhose/tinycron.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Tinycron do 2 | use GenServer 3 | require Logger 4 | 5 | def new(module, opts \\ []) do 6 | %{ 7 | id: module, 8 | start: {__MODULE__, :start_link, [module, opts |> Keyword.put(:name, module)]} 9 | } 10 | end 11 | 12 | def start_link(module, opts \\ []) do 13 | GenServer.start_link(__MODULE__, [module, opts], opts) 14 | end 15 | 16 | def noop(mod, value) do 17 | GenServer.cast(mod, {:noop, value}) 18 | end 19 | 20 | @impl true 21 | def init([module, opts]) do 22 | state = %{module: module, opts: opts} 23 | schedule_work(state) 24 | {:ok, state} 25 | end 26 | 27 | @impl true 28 | def handle_info(:work, %{module: module} = state) do 29 | unless state |> Map.get(:noop, false) do 30 | Logger.debug("running #{inspect(state.module)}") 31 | module.tick() 32 | end 33 | 34 | schedule_work(state) 35 | {:noreply, state} 36 | end 37 | 38 | @impl true 39 | def handle_cast({:noop, value}, state) do 40 | {:noreply, state |> Map.put(:noop, value)} 41 | end 42 | 43 | defp schedule_work(state) do 44 | every_seconds = state.opts |> Keyword.get(:every) || 10 45 | jitter_seconds_range = state.opts |> Keyword.get(:jitter) || -2..2 46 | first..last//step = jitter_seconds_range 47 | # turn it into milliseconds for greater jitter possibilities 48 | jitter_milliseconds_range = (first * 1000)..(last * 1000)//step 49 | 50 | # prevent jitter from creating negative next_tick_time by doing max(0, next_tick_time) 51 | next_tick_time = max(0, every_seconds * 1000 + Enum.random(jitter_milliseconds_range)) 52 | Logger.debug("scheduling #{inspect(state.module)} in #{next_tick_time}ms") 53 | Process.send_after(self(), :work, next_tick_time) 54 | end 55 | end 56 | -------------------------------------------------------------------------------- /config/config.exs: -------------------------------------------------------------------------------- 1 | # This file is responsible for configuring your application 2 | # and its dependencies with the aid of the Config module. 3 | # 4 | # This configuration file is loaded before any dependency and 5 | # is restricted to this project. 6 | 7 | # General application configuration 8 | import Config 9 | 10 | config :vrhose, 11 | namespace: VRHose, 12 | generators: [timestamp_type: :utc_datetime] 13 | 14 | config :vrhose, 15 | ecto_repos: [VRHose.Repo] 16 | 17 | repos = [ 18 | VRHose.Repo, 19 | VRHose.Repo.Replica1, 20 | VRHose.Repo.Replica2, 21 | VRHose.Repo.Replica3, 22 | VRHose.Repo.Replica4, 23 | VRHose.Repo.JanitorReplica 24 | ] 25 | 26 | for repo <- repos do 27 | config :vrhose, repo, 28 | cache_size: -8_000, 29 | pool_size: 1, 30 | auto_vacuum: :incremental, 31 | telemetry_prefix: [:vrhose, :repo], 32 | telemetry_event: [VRHose.Repo.Instrumenter], 33 | queue_target: 500, 34 | queue_interval: 2000, 35 | database: "vrhose_#{Mix.env()}.db" 36 | end 37 | 38 | # Configures the endpoint 39 | config :vrhose, VRHoseWeb.Endpoint, 40 | url: [host: "localhost"], 41 | adapter: Bandit.PhoenixAdapter, 42 | render_errors: [ 43 | formats: [json: VRHoseWeb.ErrorJSON], 44 | layout: false 45 | ], 46 | pubsub_server: VRHose.PubSub, 47 | live_view: [signing_salt: "10KBJgAB"] 48 | 49 | # Configures Elixir's Logger 50 | config :logger, :console, 51 | format: "$time $metadata[$level] $message\n", 52 | metadata: [:request_id] 53 | 54 | # Use Jason for JSON parsing in Phoenix 55 | config :phoenix, :json_library, Jason 56 | 57 | config :vrhose, :atproto, did_plc_endpoint: "https://plc.directory" 58 | 59 | # Import environment specific config. This must remain at the bottom 60 | # of this file so it overrides the configuration defined above. 61 | import_config "#{config_env()}.exs" 62 | -------------------------------------------------------------------------------- /lib/vrhose/repo_base.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Repo.Base do 2 | defmacro __using__(opts) do 3 | quote bind_quoted: [opts: opts] do 4 | use Ecto.Repo, 5 | otp_app: :vrhose, 6 | adapter: Ecto.Adapters.SQLite3, 7 | 8 | # sqlite does not do multi-writer. pool_size is effectively one, 9 | # if it's larger than one, then Database Busy errors haunt you 10 | # the trick to make concurrency happen is to create "read replicas" 11 | # that are effectively a pool of readers. this works because we're in WAL mode 12 | pool_size: 1, 13 | loggers: [VRHose.Repo.Instrumenter, Ecto.LogEntry] 14 | 15 | @read_replicas opts[:read_replicas] 16 | @dedicated_replicas opts[:dedicated_replicas] 17 | 18 | def repo_spec do 19 | %{read_replicas: @read_replicas, dedicated_replicas: @dedicated_replicas} 20 | end 21 | 22 | def replica() do 23 | Enum.random(@read_replicas) 24 | end 25 | 26 | def replica(identifier) 27 | when is_number(identifier) or is_bitstring(identifier) or is_atom(identifier) do 28 | @read_replicas |> Enum.at(rem(identifier |> :erlang.phash2(), length(@read_replicas))) 29 | end 30 | 31 | for repo <- @read_replicas ++ @dedicated_replicas do 32 | default_dynamic_repo = 33 | if Mix.env() == :test do 34 | opts[:primary] 35 | else 36 | repo 37 | end 38 | 39 | defmodule repo do 40 | use Ecto.Repo, 41 | otp_app: :vrhose, 42 | adapter: Ecto.Adapters.SQLite3, 43 | pool_size: 1, 44 | loggers: [VRHose.Repo.Instrumenter, Ecto.LogEntry], 45 | read_only: true, 46 | default_dynamic_repo: default_dynamic_repo 47 | end 48 | end 49 | end 50 | end 51 | end 52 | -------------------------------------------------------------------------------- /config/dev.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | # For development, we disable any cache and enable 4 | # debugging and code reloading. 5 | # 6 | # The watchers configuration can be used to run external 7 | # watchers to your application. For example, we can use it 8 | # to bundle .js and .css sources. 9 | config :vrhose, VRHoseWeb.Endpoint, 10 | # Binding to loopback ipv4 address prevents access from other machines. 11 | # Change to `ip: {0, 0, 0, 0}` to allow access from other machines. 12 | http: [ip: {127, 0, 0, 1}, port: 4000], 13 | check_origin: false, 14 | code_reloader: true, 15 | debug_errors: true, 16 | secret_key_base: "XHJKI9w8ILE8BWHN9Wtb1ssJY6xCJqABWrCs+3Owghr13TUEvGlJ3OJRl0d3A5O3", 17 | watchers: [] 18 | 19 | # ## SSL Support 20 | # 21 | # In order to use HTTPS in development, a self-signed 22 | # certificate can be generated by running the following 23 | # Mix task: 24 | # 25 | # mix phx.gen.cert 26 | # 27 | # Run `mix help phx.gen.cert` for more information. 28 | # 29 | # The `http:` config above can be replaced with: 30 | # 31 | # https: [ 32 | # port: 4001, 33 | # cipher_suite: :strong, 34 | # keyfile: "priv/cert/selfsigned_key.pem", 35 | # certfile: "priv/cert/selfsigned.pem" 36 | # ], 37 | # 38 | # If desired, both `http:` and `https:` keys can be 39 | # configured to run both http and https servers on 40 | # different ports. 41 | 42 | # Enable dev routes for dashboard and mailbox 43 | config :vrhose, dev_routes: true 44 | 45 | # Do not include metadata nor timestamps in development logs 46 | config :logger, :console, format: "[$level] $message\n" 47 | 48 | # Set a higher stacktrace during development. Avoid configuring such 49 | # in production as building large stacktraces may be expensive. 50 | config :phoenix, :stacktrace_depth, 20 51 | 52 | # Initialize plugs at runtime for faster development compilation 53 | config :phoenix, :plug_init_mode, :runtime 54 | -------------------------------------------------------------------------------- /lib/vrhose_web/endpoint.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHoseWeb.Endpoint do 2 | use Phoenix.Endpoint, otp_app: :vrhose 3 | 4 | # The session will be stored in the cookie and signed, 5 | # this means its contents can be read but not tampered with. 6 | # Set :encryption_salt if you would also like to encrypt it. 7 | @session_options [ 8 | store: :cookie, 9 | key: "_vrhose_key", 10 | signing_salt: "dTCkkeeF", 11 | same_site: "Lax" 12 | ] 13 | 14 | socket "/live", Phoenix.LiveView.Socket, 15 | websocket: [connect_info: [session: @session_options]], 16 | longpoll: [connect_info: [session: @session_options]] 17 | 18 | # Serve at "/" the static files from "priv/static" directory. 19 | # 20 | # You should set gzip to true if you are running phx.digest 21 | # when deploying your static files in production. 22 | plug Plug.Static, 23 | at: "/", 24 | from: :vrhose, 25 | gzip: false, 26 | only: VRHoseWeb.static_paths() 27 | 28 | # Code reloading can be explicitly enabled under the 29 | # :code_reloader configuration of your endpoint. 30 | if code_reloading? do 31 | plug Phoenix.CodeReloader 32 | end 33 | 34 | plug Phoenix.LiveDashboard.RequestLogger, 35 | param_key: "request_logger", 36 | cookie_key: "request_logger" 37 | 38 | plug Plug.RequestId 39 | plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint] 40 | 41 | plug Plug.Parsers, 42 | parsers: [:urlencoded, :multipart, :json], 43 | pass: ["*/*"], 44 | json_decoder: Phoenix.json_library() 45 | 46 | plug Plug.MethodOverride 47 | plug Plug.Head 48 | plug Plug.Session, @session_options 49 | 50 | defmodule Instrumenter do 51 | use Prometheus.PhoenixInstrumenter 52 | end 53 | 54 | defmodule PipelineInstrumenter do 55 | use Prometheus.PlugPipelineInstrumenter 56 | end 57 | 58 | defmodule MetricsExporter do 59 | use Prometheus.PlugExporter 60 | end 61 | 62 | plug(PipelineInstrumenter) 63 | plug(MetricsExporter) 64 | 65 | plug VRHoseWeb.Router 66 | end 67 | -------------------------------------------------------------------------------- /test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eux 3 | record='{"text":"The life on the big cities roofs is always beautiful and cleaner than the ground~World : City of Glass (Night)by Hiyu#VRChat #VRChatPhotography #VirtualPhotography #VRChat_world紹介 #ShockyReportvrchat.com/home/world/w...","$type":"app.bsky.feed.post","embed":{"$type":"app.bsky.embed.images","images":[{"alt":"cityscape from the rooftops","image":{"$type":"blob","ref":{"$link":"bafkreibv3ndp4p7dnmilh5slrqtyr3lpfzuvu2hes72gqhxlbzqbounmii"},"mimeType":"image/jpeg","size":329666},"aspectRatio":{"width":1920,"height":1080}},{"alt":"cityscape from the rooftops","image":{"$type":"blob","ref":{"$link":"bafkreiaabtxafoavi5r5xw64b7gb6fv45gug6hbcu2wmhf3gi236spsje4"},"mimeType":"image/jpeg","size":957868},"aspectRatio":{"width":1920,"height":1080}},{"alt":"grey cityscape from the street","image":{"$type":"blob","ref":{"$link":"bafkreig577gxjig7b7yixzafgr6znfeerwpzc2uei2g5nebveuts27vfre"},"mimeType":"image/jpeg","size":346215},"aspectRatio":{"width":1920,"height":1080}},{"alt":"futuristic modern interior ","image":{"$type":"blob","ref":{"$link":"bafkreidht7kwqwvyrpnn5pzszgmfx2kd72det57gp7jxo5uzruvc3mqeiq"},"mimeType":"image/jpeg","size":537684},"aspectRatio":{"width":1920,"height":1080}}]},"langs":["en"],"facets":[{"index":{"byteEnd":130,"byteStart":123},"features":[{"tag":"VRChat","$type":"app.bsky.richtext.facet#tag"}]},{"index":{"byteEnd":149,"byteStart":131},"features":[{"tag":"VRChatPhotography","$type":"app.bsky.richtext.facet#tag"}]},{"index":{"byteEnd":169,"byteStart":150},"features":[{"tag":"VirtualPhotography","$type":"app.bsky.richtext.facet#tag"}]},{"index":{"byteEnd":189,"byteStart":170},"features":[{"tag":"VRChat_world紹介","$type":"app.bsky.richtext.facet#tag"}]},{"index":{"byteEnd":203,"byteStart":190},"features":[{"tag":"ShockyReport","$type":"app.bsky.richtext.facet#tag"}]},{"index":{"byteEnd":231,"byteStart":205},"features":[{"uri":"https://vrchat.com/home/world/wrld_dd2650ab-0065-459e-af05-b2a2be569b65","$type":"app.bsky.richtext.facet#link"}]}],"createdAt":"2024-11-27T10:33:33.249Z"}' 4 | packed='{"did":"did:plc:3supx2bf4t7nz3dstxwviefk","kind":"commit","commit":{"operation":"create","collection":"app.bsky.feed.post","rkey":"amogus","record":'$record'}}' 5 | as_str=$(echo "$packed" | jq -R '.') 6 | 7 | echo "send VRHose.Ingestor, {:websocket_text, DateTime.utc_now(), "$as_str"}" 8 | -------------------------------------------------------------------------------- /lib/vrhose/vrchat_world.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.VRChatWorld do 2 | use Ecto.Schema 3 | import Ecto.Query 4 | import Ecto.Changeset 5 | alias VRHose.Repo 6 | 7 | @type t :: %__MODULE__{} 8 | 9 | schema "vrchat_worlds" do 10 | field(:wrld_id, :string) 11 | field(:name, :string) 12 | field(:author_id, :string) 13 | field(:tags, :string) 14 | field(:capacity, :integer) 15 | field(:description, :string) 16 | timestamps(autogenerate: {VRHose.Data, :generate_unix_timestamp, []}) 17 | end 18 | 19 | def one(wrld_id) do 20 | query = from(s in __MODULE__, where: s.wrld_id == ^wrld_id, select: s) 21 | Repo.replica(wrld_id).one(query, log: false) 22 | end 23 | 24 | def changeset(%__MODULE__{} = world, params) do 25 | world 26 | |> cast(params, [:wrld_id, :name, :author_id, :tags, :capacity, :description]) 27 | |> validate_required([:wrld_id, :name, :author_id, :tags, :capacity, :description]) 28 | end 29 | 30 | def insert(wrld_id, name, author_id, tags, capacity, description) do 31 | %__MODULE__{} 32 | |> changeset(%{ 33 | wrld_id: wrld_id, 34 | name: name, 35 | author_id: author_id, 36 | tags: tags, 37 | capacity: capacity, 38 | description: description 39 | }) 40 | |> Repo.insert() 41 | end 42 | 43 | def fetch(wrld_id) do 44 | maybe_world = one(wrld_id) 45 | 46 | if maybe_world == nil do 47 | {:ok, resp} = fetch_upstream(wrld_id) 48 | 49 | tags = 50 | resp.body["tags"] 51 | |> Enum.filter(fn tag -> 52 | String.starts_with?(tag, "content_") 53 | end) 54 | |> Jason.encode!() 55 | 56 | insert( 57 | resp.body["id"], 58 | resp.body["name"], 59 | resp.body["authorId"], 60 | tags, 61 | resp.body["capacity"], 62 | resp.body["description"] 63 | ) 64 | else 65 | {:ok, maybe_world} 66 | end 67 | end 68 | 69 | defp fetch_upstream(wrld_id) do 70 | operator_email = System.get_env("VRHOSE_OPERATOR_EMAIL") 71 | 72 | if operator_email == nil do 73 | {:error, :no_operator_email} 74 | else 75 | Req.get("https://api.vrchat.cloud/api/1/worlds/#{wrld_id}", 76 | headers: [{"User-Agent", "BlueskyFirehoseVR/0.0.0 #{operator_email}"}] 77 | ) 78 | end 79 | end 80 | end 81 | -------------------------------------------------------------------------------- /lib/vrhose_web/telemetry.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHoseWeb.Telemetry do 2 | use Supervisor 3 | import Telemetry.Metrics 4 | 5 | def start_link(arg) do 6 | Supervisor.start_link(__MODULE__, arg, name: __MODULE__) 7 | end 8 | 9 | @impl true 10 | def init(_arg) do 11 | children = [ 12 | # Telemetry poller will execute the given period measurements 13 | # every 10_000ms. Learn more here: https://hexdocs.pm/telemetry_metrics 14 | {:telemetry_poller, measurements: periodic_measurements(), period: 10_000} 15 | # Add reporters as children of your supervision tree. 16 | # {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} 17 | ] 18 | 19 | Supervisor.init(children, strategy: :one_for_one) 20 | end 21 | 22 | def metrics do 23 | [ 24 | # Phoenix Metrics 25 | summary("phoenix.endpoint.start.system_time", 26 | unit: {:native, :millisecond} 27 | ), 28 | summary("phoenix.endpoint.stop.duration", 29 | unit: {:native, :millisecond} 30 | ), 31 | summary("phoenix.router_dispatch.start.system_time", 32 | tags: [:route], 33 | unit: {:native, :millisecond} 34 | ), 35 | summary("phoenix.router_dispatch.exception.duration", 36 | tags: [:route], 37 | unit: {:native, :millisecond} 38 | ), 39 | summary("phoenix.router_dispatch.stop.duration", 40 | tags: [:route], 41 | unit: {:native, :millisecond} 42 | ), 43 | summary("phoenix.socket_connected.duration", 44 | unit: {:native, :millisecond} 45 | ), 46 | summary("phoenix.channel_joined.duration", 47 | unit: {:native, :millisecond} 48 | ), 49 | summary("phoenix.channel_handled_in.duration", 50 | tags: [:event], 51 | unit: {:native, :millisecond} 52 | ), 53 | 54 | # VM Metrics 55 | summary("vm.memory.total", unit: {:byte, :kilobyte}), 56 | summary("vm.total_run_queue_lengths.total"), 57 | summary("vm.total_run_queue_lengths.cpu"), 58 | summary("vm.total_run_queue_lengths.io") 59 | ] 60 | end 61 | 62 | defp periodic_measurements do 63 | [ 64 | # A module, function and arguments to be invoked periodically. 65 | # This function must call :telemetry.execute/3 and a metric must be added above. 66 | # {VRHoseWeb, :count_users, []} 67 | ] 68 | end 69 | end 70 | -------------------------------------------------------------------------------- /mix.exs: -------------------------------------------------------------------------------- 1 | defmodule VRHose.MixProject do 2 | use Mix.Project 3 | 4 | def project do 5 | [ 6 | app: :vrhose, 7 | version: "0.1.0", 8 | elixir: "~> 1.14", 9 | elixirc_paths: elixirc_paths(Mix.env()), 10 | start_permanent: Mix.env() == :prod, 11 | aliases: aliases(), 12 | deps: deps() 13 | ] 14 | end 15 | 16 | # Configuration for the OTP application. 17 | # 18 | # Type `mix help compile.app` for more information. 19 | def application do 20 | [ 21 | mod: {VRHose.Application, []}, 22 | extra_applications: [:logger, :runtime_tools, :inets] 23 | ] 24 | end 25 | 26 | # Specifies which paths to compile per environment. 27 | defp elixirc_paths(:test), do: ["lib", "test/support"] 28 | defp elixirc_paths(_), do: ["lib"] 29 | 30 | # Specifies your project dependencies. 31 | # 32 | # Type `mix help deps` for examples and options. 33 | defp deps do 34 | [ 35 | {:phoenix, "~> 1.7.14"}, 36 | {:phoenix_live_dashboard, "~> 0.8.3"}, 37 | {:phoenix_ecto, "~> 4.4"}, 38 | {:ecto_sql, "~> 3.10"}, 39 | {:ecto_sqlite3, ">= 0.0.0"}, 40 | {:telemetry_metrics, "~> 1.0"}, 41 | {:telemetry_poller, "~> 1.0"}, 42 | {:dns_cluster, "~> 0.1.1"}, 43 | {:bandit, "~> 1.5"}, 44 | {:jason, "~> 1.4"}, 45 | {:certifi, "~> 2.13"}, 46 | {:recon, "~> 2.3"}, 47 | {:mint, "~> 1.0"}, 48 | {:mint_web_socket, "~> 1.0"}, 49 | {:ezstd, "~> 1.1"}, 50 | {:zigler, "~> 0.13.2", runtime: false}, 51 | {:gen_stage, "~> 1.0"}, 52 | {:req, "~> 0.5.0"}, 53 | {:xrpc, git: "https://github.com/moomerman/xrpc", branch: "main"}, 54 | {:ex_hash_ring, "~> 6.0"}, 55 | {:prometheus, "~> 4.6"}, 56 | {:prometheus_ex, 57 | git: "https://github.com/lanodan/prometheus.ex.git", 58 | branch: "fix/elixir-1.14", 59 | override: true}, 60 | {:prometheus_plugs, "~> 1.1"}, 61 | {:prometheus_phoenix, "~> 1.3"}, 62 | # Note: once `prometheus_phx` is integrated into `prometheus_phoenix`, remove the former: 63 | {:prometheus_phx, 64 | git: "https://git.pleroma.social/pleroma/elixir-libraries/prometheus-phx.git", 65 | branch: "no-logging"}, 66 | {:prometheus_ecto, "~> 1.4"} 67 | ] 68 | end 69 | 70 | # Aliases are shortcuts or tasks specific to the current project. 71 | # For example, to install project dependencies and perform other setup tasks, run: 72 | # 73 | # $ mix setup 74 | # 75 | # See the documentation for `Mix` for more info on aliases. 76 | defp aliases do 77 | [ 78 | test: ["ecto.create --quiet", "ecto.migrate --quiet", "test"], 79 | setup: ["deps.get", "zig.get"] 80 | ] 81 | end 82 | end 83 | -------------------------------------------------------------------------------- /lib/vrhose/ring_buffer.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | pub fn RingBuffer(comptime T: type) type { 4 | return struct { 5 | const Self = @This(); 6 | 7 | buffer: []T, 8 | head: usize, 9 | tail: usize, 10 | len: usize, 11 | allocator: std.mem.Allocator, 12 | 13 | pub fn init(allocator: std.mem.Allocator, initialCapacity: usize) !Self { 14 | const buffer = try allocator.alloc(T, initialCapacity); 15 | return Self{ 16 | .buffer = buffer, 17 | .head = 0, 18 | .tail = 0, 19 | .len = 0, 20 | .allocator = allocator, 21 | }; 22 | } 23 | 24 | pub fn deinit(self: *Self) void { 25 | self.allocator.free(self.buffer); 26 | } 27 | 28 | pub fn push(self: *Self, item: T) !void { 29 | if (self.len == self.buffer.len) { 30 | return error.BufferFull; 31 | } 32 | 33 | self.buffer[self.tail] = item; 34 | self.tail = (self.tail + 1) % self.buffer.len; 35 | self.len += 1; 36 | } 37 | 38 | pub fn pop(self: *Self) ?*T { 39 | if (self.len == 0) { 40 | return null; 41 | } 42 | 43 | const item = &self.buffer[self.head]; 44 | self.head = (self.head + 1) % self.buffer.len; 45 | self.len -= 1; 46 | return item; 47 | } 48 | 49 | pub fn peek(self: *Self) ?T { 50 | if (self.len == 0) { 51 | return null; 52 | } 53 | return self.buffer[self.head]; 54 | } 55 | 56 | pub fn clear(self: *Self) void { 57 | self.head = 0; 58 | self.tail = 0; 59 | self.len = 0; 60 | } 61 | 62 | pub fn isFull(self: *Self) bool { 63 | return self.len == self.buffer.len; 64 | } 65 | 66 | pub fn isEmpty(self: *Self) bool { 67 | return self.len == 0; 68 | } 69 | 70 | pub fn capacity(self: *Self) usize { 71 | return self.buffer.len; 72 | } 73 | 74 | pub fn length(self: *Self) usize { 75 | return self.len; 76 | } 77 | 78 | pub fn iterator(self: *Self) Iterator { 79 | return Iterator{ 80 | .buffer = self, 81 | .current = 0, 82 | }; 83 | } 84 | 85 | pub const Iterator = struct { 86 | buffer: *Self, 87 | current: usize, 88 | 89 | pub fn next(self: *Iterator) ?*T { 90 | if (self.current >= self.buffer.len) { 91 | return null; 92 | } 93 | const index = (self.buffer.head + self.current) % self.buffer.buffer.len; 94 | self.current += 1; 95 | return &self.buffer.buffer[index]; 96 | } 97 | }; 98 | }; 99 | } 100 | -------------------------------------------------------------------------------- /lib/vrhose/identity.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Identity do 2 | use Ecto.Schema 3 | import Ecto.Query 4 | import Ecto.Changeset 5 | alias VRHose.Repo 6 | 7 | @type t :: %__MODULE__{} 8 | 9 | schema "identity" do 10 | field(:did, :string) 11 | field(:also_known_as, :string) 12 | field(:atproto_pds_endpoint, :string) 13 | field(:name, :string) 14 | timestamps(autogenerate: {VRHose.Data, :generate_unix_timestamp, []}) 15 | end 16 | 17 | def to_handle(%__MODULE__{} = identity) do 18 | "@" <> 19 | if identity.also_known_as == "handle.invalid" do 20 | # fallback to did 21 | identity.did 22 | else 23 | identity.also_known_as 24 | end 25 | end 26 | 27 | def changeset(%__MODULE__{} = identity, params) do 28 | identity 29 | |> cast(params, [:did, :also_known_as, :atproto_pds_endpoint, :name]) 30 | |> validate_required([:did, :also_known_as, :atproto_pds_endpoint, :name]) 31 | end 32 | 33 | def one(did) do 34 | query = from(s in __MODULE__, where: s.did == ^did, select: s) 35 | Repo.replica(did).one(query, log: false) 36 | end 37 | 38 | def fake(did) do 39 | %__MODULE__{ 40 | did: did, 41 | also_known_as: did, 42 | atproto_pds_endpoint: "no", 43 | name: did 44 | } 45 | end 46 | 47 | def insert(did, aka, atproto_pds_endpoint, name) do 48 | aka = String.downcase(aka) 49 | 50 | %__MODULE__{} 51 | |> changeset(%{ 52 | did: did, 53 | also_known_as: aka, 54 | atproto_pds_endpoint: atproto_pds_endpoint, 55 | name: name 56 | }) 57 | |> Repo.insert( 58 | on_conflict: [ 59 | set: [ 60 | did: did, 61 | also_known_as: aka, 62 | atproto_pds_endpoint: atproto_pds_endpoint, 63 | name: name 64 | ] 65 | ], 66 | log: false 67 | ) 68 | end 69 | 70 | defmodule Janitor do 71 | require Logger 72 | 73 | import Ecto.Query 74 | alias VRHose.Identity 75 | alias VRHose.Repo.JanitorReplica 76 | 77 | def tick() do 78 | Logger.info("cleaning identities...") 79 | 80 | expiry_time = 81 | NaiveDateTime.utc_now() 82 | |> NaiveDateTime.add(-1, :day) 83 | |> DateTime.from_naive!("Etc/UTC") 84 | |> DateTime.to_unix() 85 | 86 | deleted_count = 87 | from(s in Identity, 88 | where: 89 | fragment("unixepoch(?)", s.inserted_at) < 90 | ^expiry_time, 91 | limit: 1000 92 | ) 93 | |> JanitorReplica.all() 94 | |> Enum.chunk_every(10) 95 | |> Enum.map(fn chunk -> 96 | chunk 97 | |> Enum.map(fn identity -> 98 | Repo.delete(identity) 99 | 1 100 | end) 101 | |> then(fn count -> 102 | :timer.sleep(1500) 103 | count 104 | end) 105 | |> Enum.sum() 106 | end) 107 | |> Enum.sum() 108 | 109 | Logger.info("deleted #{deleted_count} identities") 110 | end 111 | end 112 | end 113 | -------------------------------------------------------------------------------- /lib/vrhose/hydrator.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Hydrator do 2 | require Logger 3 | use GenServer 4 | require Logger 5 | 6 | def start_link(opts) do 7 | GenServer.start_link(__MODULE__, opts) 8 | end 9 | 10 | def submit_post(pid, data) do 11 | GenServer.cast(pid, {:post, data}) 12 | end 13 | 14 | @impl true 15 | def init(_opts) do 16 | Logger.info("Initializing hydrator #{inspect(self())}") 17 | ExHashRing.Ring.add_node(VRHose.Hydrator.Ring, self() |> :erlang.pid_to_list() |> to_string) 18 | {:ok, %{}} 19 | end 20 | 21 | @impl true 22 | def handle_cast({:post, event}, state) do 23 | process(event) 24 | {:noreply, state} 25 | end 26 | 27 | defp hydrate_with(post_data, %VRHose.Identity{} = identity) do 28 | post_data 29 | |> Map.put(:author_name, identity.name) 30 | |> Map.put(:author_handle, VRHose.Identity.to_handle(identity)) 31 | # recompute timestamp because we did some processing before making post ready to go 32 | |> Map.put( 33 | :timestamp, 34 | System.os_time(:millisecond) / 1000 35 | ) 36 | end 37 | 38 | defp process({_did, post_data, _subscribers} = event) do 39 | if post_data.world_id == nil do 40 | process_post(event) 41 | else 42 | is_good? = is_good_world?(post_data.world_id) 43 | Logger.warning("is wrld #{post_data.world_id} good? #{inspect(is_good?)}") 44 | 45 | if is_good? do 46 | process_post(event) 47 | end 48 | end 49 | end 50 | 51 | @unwanted_tags [ 52 | "content_sex", 53 | "content_adult" 54 | ] 55 | 56 | defp is_good_world?(wrld_id) do 57 | case VRHose.VRChatWorld.fetch(wrld_id) do 58 | {:ok, nil} -> 59 | true 60 | 61 | {:ok, world} -> 62 | matches_unwanted_tags? = 63 | world.tags 64 | |> Jason.decode!() 65 | |> Enum.map(fn tag -> 66 | tag in @unwanted_tags 67 | end) 68 | |> Enum.any?() 69 | 70 | # wanted when unwanted tags arent in the world 71 | not matches_unwanted_tags? 72 | 73 | {:error, _} -> 74 | true 75 | end 76 | end 77 | 78 | defp process_post({did, post_data, subscribers} = event) do 79 | identity = VRHose.Identity.one(did) 80 | 81 | post_data = 82 | if identity != nil do 83 | post_data 84 | |> hydrate_with(identity) 85 | else 86 | process_without_cache(event) 87 | end 88 | 89 | subscribers 90 | |> Enum.each(fn pid -> 91 | send( 92 | pid, 93 | {:post, post_data} 94 | ) 95 | end) 96 | end 97 | 98 | defp process_without_cache({did, post_data, _}) do 99 | case Req.get("https://public.api.bsky.app/xrpc/app.bsky.actor.getProfile?actor=#{did}") do 100 | {:ok, resp} -> 101 | aka = resp.body["handle"] || did 102 | 103 | display_name = 104 | (resp.body["displayName"] || aka) 105 | |> String.trim() 106 | |> then(fn 107 | "" -> aka 108 | v -> v 109 | end) 110 | 111 | {:ok, identity} = VRHose.Identity.insert(did, aka || did, "nil", display_name) 112 | 113 | post_data 114 | |> hydrate_with(identity) 115 | 116 | {:error, v} -> 117 | Logger.error("Error fetching profile for did #{did}: #{inspect(v)}") 118 | 119 | post_data 120 | |> hydrate_with(VRHose.Identity.fake(did)) 121 | end 122 | end 123 | end 124 | -------------------------------------------------------------------------------- /config/runtime.exs: -------------------------------------------------------------------------------- 1 | import Config 2 | 3 | # config/runtime.exs is executed for all environments, including 4 | # during releases. It is executed after compilation and before the 5 | # system starts, so it is typically used to load production configuration 6 | # and secrets from environment variables or elsewhere. Do not define 7 | # any compile-time configuration in here, as it won't be applied. 8 | # The block below contains prod specific runtime configuration. 9 | 10 | # ## Using releases 11 | # 12 | # If you use `mix release`, you need to explicitly enable the server 13 | # by passing the PHX_SERVER=true when you start it: 14 | # 15 | # PHX_SERVER=true bin/vrhose start 16 | # 17 | # Alternatively, you can use `mix phx.gen.release` to generate a `bin/server` 18 | # script that automatically sets the env var above. 19 | if System.get_env("PHX_SERVER") do 20 | config :vrhose, VRHoseWeb.Endpoint, server: true 21 | end 22 | 23 | config :vrhose, :atproto, 24 | did_plc_endpoint: System.get_env("ATPROTO_PLC_URL") || "https://plc.directory" 25 | 26 | if config_env() == :prod do 27 | # The secret key base is used to sign/encrypt cookies and other secrets. 28 | # A default value is used in config/dev.exs and config/test.exs but you 29 | # want to use a different value for prod and you most likely don't want 30 | # to check this value into version control, so we use an environment 31 | # variable instead. 32 | secret_key_base = 33 | System.get_env("SECRET_KEY_BASE") || 34 | raise """ 35 | environment variable SECRET_KEY_BASE is missing. 36 | You can generate one by calling: mix phx.gen.secret 37 | """ 38 | 39 | host = System.get_env("PHX_HOST") || "example.com" 40 | port = String.to_integer(System.get_env("PORT") || "4000") 41 | 42 | config :vrhose, :dns_cluster_query, System.get_env("DNS_CLUSTER_QUERY") 43 | 44 | config :vrhose, VRHoseWeb.Endpoint, 45 | url: [host: host, port: 443, scheme: "https"], 46 | http: [ 47 | # Enable IPv6 and bind on all interfaces. 48 | # Set it to {0, 0, 0, 0, 0, 0, 0, 1} for local network only access. 49 | # See the documentation on https://hexdocs.pm/bandit/Bandit.html#t:options/0 50 | # for details about using IPv6 vs IPv4 and loopback vs public addresses. 51 | ip: {0, 0, 0, 0, 0, 0, 0, 0}, 52 | port: port 53 | ], 54 | secret_key_base: secret_key_base 55 | 56 | # ## SSL Support 57 | # 58 | # To get SSL working, you will need to add the `https` key 59 | # to your endpoint configuration: 60 | # 61 | # config :vrhose, VRHoseWeb.Endpoint, 62 | # https: [ 63 | # ..., 64 | # port: 443, 65 | # cipher_suite: :strong, 66 | # keyfile: System.get_env("SOME_APP_SSL_KEY_PATH"), 67 | # certfile: System.get_env("SOME_APP_SSL_CERT_PATH") 68 | # ] 69 | # 70 | # The `cipher_suite` is set to `:strong` to support only the 71 | # latest and more secure SSL ciphers. This means old browsers 72 | # and clients may not be supported. You can set it to 73 | # `:compatible` for wider support. 74 | # 75 | # `:keyfile` and `:certfile` expect an absolute path to the key 76 | # and cert in disk or a relative path inside priv, for example 77 | # "priv/ssl/server.key". For all supported SSL configuration 78 | # options, see https://hexdocs.pm/plug/Plug.SSL.html#configure/1 79 | # 80 | # We also recommend setting `force_ssl` in your config/prod.exs, 81 | # ensuring no data is ever sent via http, always redirecting to https: 82 | # 83 | # config :vrhose, VRHoseWeb.Endpoint, 84 | # force_ssl: [hsts: true] 85 | # 86 | # Check `Plug.SSL` for all available options in `force_ssl`. 87 | end 88 | -------------------------------------------------------------------------------- /lib/vrhose_web/controllers/main.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHoseWeb.MainController do 2 | use VRHoseWeb, :controller 3 | # vrcjson does not support unbalanced braces inside strings 4 | # this has been reported to vrchat already 5 | # 6 | # https://feedback.vrchat.com/vrchat-udon-closed-alpha-bugs/p/braces-inside-strings-in-vrcjson-can-fail-to-deserialize 7 | # 8 | # workaround for now is to strip off any brace character. we could write a balancer and strip 9 | # off the edge case, but i dont think i care enough to do that just for vrchat. 10 | # taken from YTS/yt_search 11 | 12 | def vrcjson_workaround(incoming_data, opts \\ []) do 13 | ignore_keys = Keyword.get(opts || [], :ignore_keys, []) 14 | 15 | case incoming_data do 16 | data when is_bitstring(data) -> 17 | data 18 | |> String.replace(~r/[\[\]{}]/, "") 19 | |> String.trim(" ") 20 | 21 | data when is_map(data) -> 22 | data 23 | |> Map.to_list() 24 | |> Enum.map(fn {key, value} -> 25 | if key in ignore_keys do 26 | {key, value} 27 | else 28 | {key, value |> vrcjson_workaround(opts)} 29 | end 30 | end) 31 | |> Map.new() 32 | 33 | data when is_list(data) -> 34 | data 35 | |> Enum.map(fn x -> vrcjson_workaround(x, opts) end) 36 | 37 | v when is_boolean(v) -> 38 | v 39 | 40 | nil -> 41 | nil 42 | 43 | v when is_atom(v) -> 44 | raise "Unsupported type #{inspect(v)}" 45 | 46 | v when is_tuple(v) -> 47 | raise "Unsupported type #{inspect(v)}" 48 | 49 | v -> 50 | v 51 | end 52 | end 53 | 54 | def hi(conn, _) do 55 | {:ok, timeline} = 56 | Registry.lookup(Registry.Timeliners, "timeliner") 57 | |> Enum.random() 58 | |> then(fn {pid, _} -> 59 | VRHose.Timeliner.fetch_all(pid) 60 | end) 61 | 62 | conn 63 | |> json(timeline |> vrcjson_workaround) 64 | end 65 | 66 | def fetch_delta(conn, %{"timestamp" => worldspace_timestamp_str}) do 67 | case Integer.parse(worldspace_timestamp_str, 10) do 68 | {worldspace_timestamp, ""} -> 69 | unless worldspace_timestamp < 0 do 70 | fetch_delta_validated(conn, worldspace_timestamp) 71 | else 72 | conn 73 | |> put_status(400) 74 | |> json(%{"error" => "Invalid negative timestamp: #{worldspace_timestamp_str}"}) 75 | end 76 | 77 | _ -> 78 | conn 79 | |> put_status(400) 80 | |> json(%{"error" => "Invalid timestamp: #{worldspace_timestamp_str}"}) 81 | end 82 | end 83 | 84 | def fetch_delta_validated(conn, worldspace_timestamp) do 85 | current_server_timestamp = DateTime.utc_now() |> DateTime.to_unix() 86 | 87 | # we need to convert from worldspace to serverspace 88 | # worldspace_timestamp = rem (realspace_timestamp |> DateTime.to_unix), 1000 89 | 90 | # note: 91 | # - only 1000 seconds resolution (thats fine for us) 92 | 93 | # basically utc now but ending in 000 lol 94 | base_timestamp = ((current_server_timestamp / 1000) |> trunc) * 1000 95 | 96 | server_delta_in_worldspace = current_server_timestamp - base_timestamp 97 | 98 | realspace_timestamp = 99 | if server_delta_in_worldspace < 100 and worldspace_timestamp > 900 do 100 | # use base_timestamp from previous cycle 101 | base_timestamp - 1000 + worldspace_timestamp 102 | else 103 | base_timestamp + worldspace_timestamp 104 | end 105 | 106 | {:ok, timeline} = 107 | Registry.lookup(Registry.Timeliners, "timeliner") 108 | |> Enum.random() 109 | |> then(fn {pid, _} -> 110 | VRHose.Timeliner.fetch(pid, realspace_timestamp) 111 | end) 112 | 113 | conn 114 | |> json(timeline |> vrcjson_workaround) 115 | end 116 | end 117 | -------------------------------------------------------------------------------- /test/vrhose/post_flags_test.exs: -------------------------------------------------------------------------------- 1 | defmodule VRHose.PostFlagsTest do 2 | use ExUnit.Case 3 | 4 | test "works on embed" do 5 | post = 6 | """ 7 | {"text":"Convinced me","$type":"app.bsky.feed.post","embed":{"$type":"app.bsky.embed.record","record":{"cid":"bafyreiczor6cyu2ar6aftgrdsmtgntj42uvvusmvnuz2kyiy7x35wb5s7u","uri":"at://did:plc:kbfrbbdkkw5l6g6isuxyqrje/app.bsky.feed.post/3lcf5dluaqc23"}},"langs":["en"],"createdAt":"2024-12-03T10:37:25.131Z"} 8 | """ 9 | |> Jason.decode!() 10 | 11 | flags = VRHose.Ingestor.post_flags_for(post) 12 | assert String.contains?(flags, "q") 13 | end 14 | 15 | test "filters out post with just embed" do 16 | post = 17 | """ 18 | {"text":"","$type":"app.bsky.feed.post","embed":{"$type":"app.bsky.embed.external","external":{"uri":"https://youtube.com/shorts/jeRqbT6KU0w?si=EnjiH1_pnYhA1p72","thumb":{"$type":"blob","ref":{"$link":"bafkreihx4tdapw6zpt7fxgspswnfdpfeixmkupjollgdyn6zzq2sdq4dx4"},"mimeType":"image/jpeg","size":37954},"title":"Clip of Donald Trump Mocking a Disabled Person Resurfaces","description":"YouTube video by NowThis Impact"}},"langs":["en"],"createdAt":"2024-12-03T11:28:04.158Z"} 19 | """ 20 | |> Jason.decode!() 21 | 22 | {_, filtered?} = VRHose.Ingestor.run_filters(post) 23 | assert filtered? 24 | end 25 | 26 | test "filters out these posts i think" do 27 | posts_to_filter = [ 28 | """ 29 | {"text":"","$type":"app.bsky.feed.post","embed":{"$type":"app.bsky.embed.recordWithMedia","media":{"$type":"app.bsky.embed.images","images":[{"alt":"","image":{"$type":"blob","ref":{"$link":"bafkreiboaz6mjvtaw2nv2d6plixlc6w4c3mkzractx4klzwbmw7go56a6e"},"mimeType":"image/jpeg","size":544839},"aspectRatio":{"width":934,"height":931}},{"alt":"","image":{"$type":"blob","ref":{"$link":"bafkreidk37e7uecrekovsm3pxdbgkuovimswvyk67kd2yb4oxicat3efny"},"mimeType":"image/jpeg","size":487014},"aspectRatio":{"width":750,"height":945}},{"alt":"","image":{"$type":"blob","ref":{"$link":"bafkreigzkjdviuo4plac6ccx56d3uamw3ym5tve336tp23ln6dzqlhktju"},"mimeType":"image/jpeg","size":74021},"aspectRatio":{"width":960,"height":736}},{"alt":"","image":{"$type":"blob","ref":{"$link":"bafkreicvoonwjowxyr4io67zi5237cxmpymr5opnduc62dfqvgwmb4ru34"},"mimeType":"image/jpeg","size":162080},"aspectRatio":{"width":960,"height":887}}]},"record":{"$type":"app.bsky.embed.record","record":{"cid":"bafyreia3e4orqipmnbrznrml5bjdllwsjtccaqd5ii7jqvpn6z5tjmik7u","uri":"at://did:plc:vowtbiwp2g2rqsq3ftrv6gqz/app.bsky.feed.post/3ld2e45ecl222"}}},"langs":["en"],"createdAt":"2024-12-14T04:49:25.192Z"} 30 | """, 31 | """ 32 | {"text":"","$type":"app.bsky.feed.post","embed":{"$type":"app.bsky.embed.recordWithMedia","media":{"$type":"app.bsky.embed.external","external":{"uri":"https://open.spotify.com/album/5hzSssDOGvtV8LtqHDdoDS?si=IyOLfdTGQlW6Pre2fcGLxg","thumb":{"$type":"blob","ref":{"$link":"bafkreiebiydw5yekm2rlsrdlatqhkvay7bzorecegotohqjexi5jgz3bdy"},"mimeType":"image/jpeg","size":277394},"title":"Xeno","description":"Santino Le Saint · EP · 2018 · 3 songs"}},"record":{"$type":"app.bsky.embed.record","record":{"cid":"bafyreiegqnwmxhb4lraxccmgsp7bf4w7xm4y47x2yob7igtkxfmw2imc7a","uri":"at://did:plc:rqehws5hhimzu47hkjal7i5w/app.bsky.feed.post/3ld4lmfer722e"}}},"langs":["en"],"createdAt":"2024-12-14T05:00:18.373Z"} 33 | """, 34 | """ 35 | {"text":"","$type":"app.bsky.feed.post","embed":{"$type":"app.bsky.embed.recordWithMedia","media":{"$type":"app.bsky.embed.external","external":{"uri":"https://media.tenor.com/WB_3sRiDCiAAAAAC/lonely-island-the-creep.gif?hh=280&ww=498","thumb":{"$type":"blob","ref":{"$link":"bafkreifl6wnsejq3m7lguxpwtwgywbjanmodbquecp4b63jvijph5t6t2m"},"mimeType":"image/jpeg","size":102167},"title":"Lonely Island The Creep GIF","description":"ALT: Lonely Island The Creep GIF"}},"record":{"$type":"app.bsky.embed.record","record":{"cid":"bafyreifjiprcysvxoiko2vo7uknboosj232bzwlno2bbeztbj37tiwzt7m","uri":"at://did:plc:egiwo4lte2phjkajxjd4a4ba/app.bsky.feed.post/3ldae4tez5k2x"}}},"langs":["en"],"createdAt":"2024-12-14T05:02:00.713Z"} 36 | """, 37 | """ 38 | {"text":"","$type":"app.bsky.feed.post","embed":{"$type":"app.bsky.embed.recordWithMedia","media":{"$type":"app.bsky.embed.video","video":{"$type":"blob","ref":{"$link":"bafkreihigop2xzvlktxcfjajihrfuvyuybhkofeiyzlr7xf2kr7onmr72e"},"mimeType":"video/mp4","size":1983468},"aspectRatio":{"width":888,"height":1920}},"record":{"$type":"app.bsky.embed.record","record":{"cid":"bafyreidx3drzczjgeq24lfvbtokl7qokctlag2clq2yx5uyodaoyiloe5i","uri":"at://did:plc:za4pbpzm45dsgw6pcyad6yyy/app.bsky.feed.post/3ld4rptp6v22x"}}},"langs":["en"],"createdAt":"2024-12-14T05:03:04.194Z"} 39 | """, 40 | """ 41 | {"text":"","$type":"app.bsky.feed.post","embed":{"$type":"app.bsky.embed.recordWithMedia","media":{"$type":"app.bsky.embed.images","images":[{"alt":"","image":{"$type":"blob","ref":{"$link":"bafkreih77l3xhgf6kj6nfluqux5eq423c2wkk3xqo4cz62xwc3fkyg7rnu"},"mimeType":"image/jpeg","size":255422},"aspectRatio":{"width":1440,"height":1443}}]},"record":{"$type":"app.bsky.embed.record","record":{"cid":"bafyreid4wzky7xogqeuhebb63ez237ktfelkgznfjy67uwi6b3ob2workm","uri":"at://did:plc:5o6k7jvowuyaquloafzn3cfw/app.bsky.feed.post/3ldadyhkmic2s"}}},"langs":["en"],"createdAt":"2024-12-14T05:03:26.689Z"} 42 | """ 43 | ] 44 | 45 | posts_to_filter 46 | |> Enum.map(fn post_json_text -> 47 | post = Jason.decode!(post_json_text) 48 | {_, filtered?} = VRHose.Ingestor.run_filters(post) 49 | assert filtered? 50 | end) 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /lib/vrhose/application.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Application do 2 | # See https://hexdocs.pm/elixir/Application.html 3 | # for more information on OTP Applications 4 | @moduledoc false 5 | 6 | use Application 7 | require Logger 8 | 9 | @jetstream "wss://jetstream.fire.hose.cam/subscribe" <> 10 | "?wantedCollections=app.bsky.feed.post" <> 11 | "&wantedCollections=app.bsky.feed.like" <> 12 | "&wantedCollections=app.bsky.graph.follow" <> 13 | "&wantedCollections=app.bsky.graph.block" <> 14 | "&wantedCollections=app.bsky.feed.repost" <> 15 | "&wantedCollections=app.bsky.actor.profile" <> 16 | "&compress=true" 17 | 18 | @impl true 19 | def start(_type, _args) do 20 | VRHose.TimelinerStorage.init(System.schedulers_online()) 21 | 22 | children = 23 | [ 24 | VRHoseWeb.Telemetry 25 | ] ++ 26 | repos() ++ 27 | [ 28 | {VRHose.QuickLeader, name: VRHose.QuickLeader}, 29 | {Finch, 30 | name: VRHose.Finch, 31 | pools: %{ 32 | :default => [size: 50, count: 50] 33 | }}, 34 | {DNSCluster, query: Application.get_env(:vrhose, :dns_cluster_query) || :ignore}, 35 | {Phoenix.PubSub, name: VRHose.PubSub}, 36 | { 37 | Registry, 38 | # , partitions: System.schedulers_online()}, 39 | keys: :duplicate, name: Registry.Timeliners 40 | }, 41 | {ExHashRing.Ring, name: VRHose.Hydrator.Ring} 42 | ] ++ 43 | hydration_workers() ++ 44 | [ 45 | {VRHose.Ingestor, name: {:global, VRHose.Ingestor}}, 46 | %{ 47 | start: 48 | {VRHose.Websocket, :start_and_connect, 49 | [ 50 | [ 51 | url: @jetstream, 52 | send_to: VRHose.Ingestor 53 | ] 54 | ]}, 55 | id: "websocket" 56 | }, 57 | VRHoseWeb.Endpoint 58 | ] ++ timeliner_workers() ++ janitor_workers() 59 | 60 | start_telemetry() 61 | IO.inspect(children, label: "application tree") 62 | 63 | # See https://hexdocs.pm/elixir/Supervisor.html 64 | # for other strategies and supported options 65 | opts = [strategy: :one_for_one, name: VRHose.Supervisor, max_restarts: 10] 66 | Supervisor.start_link(children, opts) 67 | end 68 | 69 | def primaries() do 70 | Application.fetch_env!(:vrhose, :ecto_repos) 71 | end 72 | 73 | defp repos() do 74 | Application.fetch_env!(:vrhose, :ecto_repos) 75 | |> Enum.map(fn primary -> 76 | primary 77 | |> to_string 78 | |> then(fn 79 | "Elixir.VRHose.Repo" <> _ -> 80 | spec = primary.repo_spec() 81 | [primary] ++ spec.read_replicas ++ spec.dedicated_replicas 82 | 83 | _ -> 84 | [] 85 | end) 86 | end) 87 | |> Enum.reduce(fn x, acc -> x ++ acc end) 88 | |> Enum.map(fn repo -> 89 | case Application.fetch_env(:vrhose, repo) do 90 | :error -> 91 | raise RuntimeError, "Repo #{repo} not configured" 92 | 93 | {:ok, cfg} -> 94 | if Access.get(cfg, :database) == nil do 95 | raise RuntimeError, "Repo #{repo} not configured. missing database" 96 | end 97 | 98 | repo 99 | end 100 | end) 101 | end 102 | 103 | def hydration_workers() do 104 | 1..20 105 | |> Enum.map(fn i -> 106 | %{ 107 | id: "worker_#{i}", 108 | start: { 109 | VRHose.Hydrator, 110 | :start_link, 111 | [ 112 | [ 113 | worker_id: "worker_#{i}" 114 | ] 115 | ] 116 | } 117 | } 118 | end) 119 | end 120 | 121 | def timeliner_workers() do 122 | 1..System.schedulers_online() 123 | |> Enum.map(fn i -> 124 | worker_id = "timeliner_#{i}" 125 | 126 | %{ 127 | start: 128 | {VRHose.Timeliner, :start_link, 129 | [ 130 | [ 131 | register_with: VRHose.Ingestor, 132 | worker_id: worker_id 133 | # name: {:via, Registry, {Registry.Timeliners, "timeliner", :awoo}} 134 | ] 135 | ]}, 136 | id: worker_id |> String.to_atom() 137 | } 138 | end) 139 | end 140 | 141 | defp start_telemetry do 142 | require Prometheus.Registry 143 | 144 | if Application.get_env(:prometheus, VRHose.Repo.Instrumenter) do 145 | Logger.info("starting db telemetry...") 146 | 147 | :ok = 148 | :telemetry.attach( 149 | "prometheus-ecto", 150 | [:vrhose, :repo, :query], 151 | &VRHose.Repo.Instrumenter.handle_event/4, 152 | %{} 153 | ) 154 | 155 | VRHose.Repo.Instrumenter.setup() 156 | end 157 | 158 | VRHoseWeb.Endpoint.MetricsExporter.setup() 159 | VRHoseWeb.Endpoint.PipelineInstrumenter.setup() 160 | VRHose.Ingestor.Metrics.setup() 161 | VRHose.Timeliner.Metrics.setup() 162 | 163 | # Note: disabled until prometheus-phx is integrated into prometheus-phoenix: 164 | # YtSearchWeb.Endpoint.Instrumenter.setup() 165 | PrometheusPhx.setup() 166 | Logger.info("telemetry started!") 167 | end 168 | 169 | defp janitor_specs do 170 | [ 171 | [VRHose.Identity.Janitor, [every: 60, jitter: -60..60]], 172 | [VRHose.Repo.Janitor, [every: 60, jitter: -30..30]] 173 | ] 174 | end 175 | 176 | defp janitor_workers do 177 | janitor_specs() 178 | |> Enum.map(fn [module, opts] -> 179 | VRHose.Tinycron.new(module, opts) 180 | end) 181 | end 182 | 183 | # Tell Phoenix to update the endpoint configuration 184 | # whenever the application is updated. 185 | @impl true 186 | def config_change(changed, _new, removed) do 187 | VRHoseWeb.Endpoint.config_change(changed, removed) 188 | :ok 189 | end 190 | end 191 | -------------------------------------------------------------------------------- /lib/vrhose/websocket.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Websocket do 2 | use GenServer 3 | 4 | require Logger 5 | require Mint.HTTP 6 | 7 | defstruct [ 8 | :conn, 9 | :websocket, 10 | :request_ref, 11 | :caller, 12 | :caller_pid, 13 | :status, 14 | :resp_headers, 15 | :closing? 16 | ] 17 | 18 | def start_and_connect(opts) do 19 | {:ok, pid} = GenServer.start_link(__MODULE__, opts) 20 | url = opts |> Keyword.get(:url) 21 | send_to = opts |> Keyword.get(:send_to) 22 | {:ok, :connected} = GenServer.call(pid, {:connect, url, send_to}) 23 | {:ok, pid} 24 | end 25 | 26 | def connect(url) do 27 | with {:ok, socket} <- GenServer.start_link(__MODULE__, []), 28 | {:ok, :connected} <- GenServer.call(socket, {:connect, url, self()}) do 29 | {:ok, socket} 30 | end 31 | end 32 | 33 | def send_message(pid, text) do 34 | GenServer.call(pid, {:send_text, text}) 35 | end 36 | 37 | def send_ping(pid) do 38 | GenServer.call(pid, :ping) 39 | end 40 | 41 | def close(pid, code, reason) do 42 | GenServer.call(pid, {:close, code, reason}) 43 | end 44 | 45 | @impl GenServer 46 | def init(_opts) do 47 | {:ok, %__MODULE__{}} 48 | end 49 | 50 | @impl GenServer 51 | def handle_call({:send_text, text}, _from, state) do 52 | {:ok, state} = send_frame(state, {:text, text}) 53 | {:reply, :ok, state} 54 | end 55 | 56 | @impl GenServer 57 | def handle_call(:ping, _from, state) do 58 | with {:ok, state} <- send_frame(state, {:ping, "ping!"}) do 59 | {:reply, :ok, state} 60 | else 61 | v -> 62 | {:reply, {:error, v}, state} 63 | end 64 | end 65 | 66 | @impl GenServer 67 | def handle_call({:close, code, reason}, _from, state) do 68 | _ = send_frame(state, {:close, code, reason}) 69 | Mint.HTTP.close(state.conn) 70 | {:reply, :ok, state} 71 | end 72 | 73 | @impl GenServer 74 | def handle_call({:connect, url, caller_pid}, from, state) do 75 | Logger.info("connecting to #{url}") 76 | uri = URI.parse(url) 77 | 78 | http_scheme = 79 | case uri.scheme do 80 | "ws" -> :http 81 | "wss" -> :https 82 | end 83 | 84 | ws_scheme = 85 | case uri.scheme do 86 | "ws" -> :ws 87 | "wss" -> :wss 88 | end 89 | 90 | path = 91 | case uri.query do 92 | nil -> uri.path 93 | query -> uri.path <> "?" <> query 94 | end 95 | 96 | with {:ok, conn} <- 97 | Mint.HTTP1.connect(http_scheme, uri.host, uri.port, 98 | transport_opts: [inet6: true, inet4: true, timeout: 3000] 99 | ), 100 | {:ok, conn, ref} <- Mint.WebSocket.upgrade(ws_scheme, conn, path, []) do 101 | state = %{state | conn: conn, request_ref: ref, caller: from, caller_pid: caller_pid} 102 | Logger.info("connected to #{url}!") 103 | send(caller_pid, {:ws_connected, self()}) 104 | {:noreply, state} 105 | else 106 | {:error, reason} -> 107 | Logger.error("failed to connect, #{inspect(reason)}") 108 | {:reply, {:error, reason}, state} 109 | 110 | {:error, conn, reason} -> 111 | Logger.error("failed to connect, #{inspect(conn)}, #{inspect(reason)}") 112 | {:reply, {:error, reason}, put_in(state.conn, conn)} 113 | end 114 | end 115 | 116 | @impl GenServer 117 | def handle_info(message, state) do 118 | case Mint.WebSocket.stream(state.conn, message) do 119 | {:ok, conn, responses} -> 120 | state = put_in(state.conn, conn) |> handle_responses(responses) 121 | if state.closing?, do: do_close(state), else: {:noreply, state} 122 | 123 | {:error, conn, reason, _responses} -> 124 | state = put_in(state.conn, conn) |> reply({:error, reason}) 125 | {:noreply, state} 126 | 127 | :unknown -> 128 | {:noreply, state} 129 | end 130 | end 131 | 132 | defp handle_responses(state, responses) 133 | 134 | defp handle_responses(%{request_ref: ref} = state, [{:status, ref, status} | rest]) do 135 | put_in(state.status, status) 136 | |> handle_responses(rest) 137 | end 138 | 139 | defp handle_responses(%{request_ref: ref} = state, [{:headers, ref, resp_headers} | rest]) do 140 | put_in(state.resp_headers, resp_headers) 141 | |> handle_responses(rest) 142 | end 143 | 144 | defp handle_responses(%{request_ref: ref} = state, [{:done, ref} | rest]) do 145 | case Mint.WebSocket.new(state.conn, ref, state.status, state.resp_headers) do 146 | {:ok, conn, websocket} -> 147 | %{state | conn: conn, websocket: websocket, status: nil, resp_headers: nil} 148 | |> reply({:ok, :connected}) 149 | |> handle_responses(rest) 150 | 151 | {:error, conn, reason} -> 152 | put_in(state.conn, conn) 153 | |> reply({:error, reason}) 154 | end 155 | end 156 | 157 | defp handle_responses(%{request_ref: ref, websocket: websocket} = state, [ 158 | {:data, ref, data} | rest 159 | ]) 160 | when websocket != nil do 161 | case Mint.WebSocket.decode(websocket, data) do 162 | {:ok, websocket, frames} -> 163 | put_in(state.websocket, websocket) 164 | |> handle_frames(frames) 165 | |> handle_responses(rest) 166 | 167 | {:error, websocket, reason} -> 168 | put_in(state.websocket, websocket) 169 | |> reply({:error, reason}) 170 | end 171 | end 172 | 173 | defp handle_responses(state, [_response | rest]) do 174 | handle_responses(state, rest) 175 | end 176 | 177 | defp handle_responses(state, []), do: state 178 | 179 | defp send_frame(state, frame) do 180 | with {:ok, websocket, data} <- Mint.WebSocket.encode(state.websocket, frame), 181 | state = put_in(state.websocket, websocket), 182 | {:ok, conn} <- Mint.WebSocket.stream_request_body(state.conn, state.request_ref, data) do 183 | {:ok, put_in(state.conn, conn)} 184 | else 185 | {:error, %Mint.WebSocket{} = websocket, reason} -> 186 | {:error, put_in(state.websocket, websocket), reason} 187 | 188 | {:error, conn, reason} -> 189 | {:error, put_in(state.conn, conn), reason} 190 | end 191 | end 192 | 193 | def handle_frames(state, frames) do 194 | Enum.reduce(frames, state, fn 195 | # reply to pings with pongs 196 | {:ping, data}, state -> 197 | {:ok, state} = send_frame(state, {:pong, data}) 198 | state 199 | 200 | {:pong, data}, state -> 201 | send(state.caller_pid, {:websocket_pong, data}) 202 | state 203 | 204 | {:close, _code, reason}, state -> 205 | Logger.debug("Closing connection: #{inspect(reason)}") 206 | %{state | closing?: true} 207 | 208 | {:text, text}, state -> 209 | timestamp = DateTime.utc_now() 210 | send(state.caller_pid, {:websocket_text, timestamp, text}) 211 | state 212 | 213 | {:binary, text}, state -> 214 | timestamp = DateTime.utc_now() 215 | send(state.caller_pid, {:websocket_binary, timestamp, text}) 216 | state 217 | 218 | frame, state -> 219 | Logger.warning("Unexpected frame received: #{inspect(frame)}") 220 | state 221 | end) 222 | end 223 | 224 | defp do_close(state) do 225 | # Streaming a close frame may fail if the server has already closed 226 | # for writing. 227 | Logger.info("closing #{inspect(state)}") 228 | _ = send_frame(state, {:close, 1000, nil}) 229 | Mint.HTTP.close(state.conn) 230 | {:stop, :normal, state} 231 | end 232 | 233 | defp reply(state, response) do 234 | if state.caller, do: GenServer.reply(state.caller, response) 235 | put_in(state.caller, nil) 236 | end 237 | end 238 | -------------------------------------------------------------------------------- /lib/vrhose/timeliner_storage.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const ring = @import("ring_buffer.zig"); 3 | const beam = @import("beam"); 4 | 5 | const MAX_POST_BUFFER_SIZE = if (DEBUG) 400 else 35000; // assuming firehose goes bananas and is 500posts/sec, and we want to hold 70sec of history, that's 35000 posts 6 | const MAX_POST_RETURN_SIZE = if (DEBUG) 100 else 10000; 7 | 8 | const IncomingPost = struct { 9 | timestamp: f64, 10 | text: []const u8, 11 | languages: []const u8, 12 | author_name: []const u8, 13 | author_handle: []const u8, 14 | author_did: []const u8, 15 | flags: []const u8, 16 | world_id: ?[]const u8, 17 | micro_id: []const u8, 18 | hash: i64, 19 | }; 20 | 21 | const Post = struct { 22 | init: bool, 23 | timestamp: f64, 24 | text: []const u8, 25 | languages: []const u8, 26 | author_name: []const u8, 27 | author_handle: []const u8, 28 | author_did: []const u8, 29 | flags: []const u8, 30 | world_id: ?[]const u8, 31 | micro_id: []const u8, 32 | hash: i64, 33 | 34 | const Self = @This(); 35 | pub fn createFromIncoming(post: IncomingPost, allocator: std.mem.Allocator) !Self { 36 | return Self{ 37 | .init = true, 38 | .timestamp = post.timestamp, 39 | .hash = post.hash, 40 | .text = try allocator.dupe(u8, post.text), 41 | .languages = try allocator.dupe(u8, post.languages), 42 | .author_name = try allocator.dupe(u8, post.author_name), 43 | .author_handle = try allocator.dupe(u8, post.author_handle), 44 | .author_did = try allocator.dupe(u8, post.author_did), 45 | .flags = try allocator.dupe(u8, post.flags), 46 | .world_id = if (post.world_id) |wrld_id| try allocator.dupe(u8, wrld_id) else null, 47 | .micro_id = try allocator.dupe(u8, post.micro_id), 48 | }; 49 | } 50 | 51 | pub fn deinitVia(self: *Self, allocator: std.mem.Allocator) void { 52 | if (self.init) { 53 | allocator.free(self.text); 54 | allocator.free(self.languages); 55 | allocator.free(self.author_name); 56 | allocator.free(self.author_handle); 57 | allocator.free(self.author_did); 58 | allocator.free(self.micro_id); 59 | allocator.free(self.flags); 60 | if (self.world_id) |id| allocator.free(id); 61 | self.init = false; 62 | } 63 | } 64 | }; 65 | const PostBuffer = ring.RingBuffer(Post); 66 | const Storage = struct { 67 | allocator: std.mem.Allocator, 68 | posts: PostBuffer, 69 | 70 | const Self = @This(); 71 | pub fn init(allocator: std.mem.Allocator) Self { 72 | debug("initializing a storage", .{}); 73 | const buf = PostBuffer.init(allocator, MAX_POST_BUFFER_SIZE) catch @panic("out of memory for post buffer init"); 74 | const self = Self{ 75 | .posts = buf, 76 | .allocator = allocator, 77 | }; 78 | return self; 79 | } 80 | 81 | pub fn deinit(self: *Self) void { 82 | for (self.posts.buffer) |*post| { 83 | post.deinitVia(self.posts.allocator); 84 | } 85 | self.posts.deinit(); 86 | } 87 | }; 88 | 89 | var mutex = std.Thread.Mutex{}; 90 | var debug_log_mutex = std.Thread.Mutex{}; 91 | 92 | // this approach (one core accesses only one "storage" out of the array) 93 | // lets us access those handles without requiring a global mutex, as long as fetching them stays in the same handle 94 | var storages: []Storage = undefined; 95 | var last_handle: usize = 0; 96 | 97 | fn debug(comptime fmt: []const u8, args: anytype) void { 98 | debug_log_mutex.lock(); 99 | defer debug_log_mutex.unlock(); 100 | std.debug.print(fmt ++ "\n", args); 101 | } 102 | 103 | pub fn init(num_cores: usize) void { 104 | return initBeam(num_cores); 105 | } 106 | 107 | fn initA(allocator: std.mem.Allocator, num_cores: usize) void { 108 | debug("initializing for given amount of cores: {d}", .{num_cores}); 109 | mutex.lock(); 110 | defer mutex.unlock(); 111 | storages = allocator.alloc(Storage, num_cores) catch @panic("out of memory for initialization"); 112 | for (0..num_cores) |i| { 113 | debug("init core {d}", .{i}); 114 | const storage = Storage.init(allocator); 115 | debug("initted {d}", .{i}); 116 | storages[i] = storage; 117 | } 118 | } 119 | fn initBeam(num_cores: usize) void { 120 | debug("initializing for given amount of cores: {d}", .{num_cores}); 121 | mutex.lock(); 122 | defer mutex.unlock(); 123 | 124 | storages = beam.allocator.alloc(Storage, num_cores) catch @panic("out of memory for initialization"); 125 | for (0..num_cores) |i| { 126 | debug("init core {d}", .{i}); 127 | const storage = Storage.init(beam.allocator); 128 | debug("initted {d}", .{i}); 129 | storages[i] = storage; 130 | } 131 | } 132 | 133 | pub fn create() usize { 134 | const handle = last_handle; 135 | debug("creating for handle {d}", .{handle}); 136 | last_handle += 1; 137 | if (last_handle > storages.len) { 138 | // TODO: make this possible. a timeliner could die and we can attach to terminate(). release the storage then 139 | // reassign so that teimeliner crashes don't crash the entire app 140 | @panic("no more handles available. one of the timeliners crashed and wants to continue, but that is not possible."); 141 | } 142 | return handle; 143 | } 144 | 145 | pub fn insert_post(handle: usize, post: IncomingPost) void { 146 | return insertPost((&storages[handle]).allocator, handle, post); 147 | } 148 | 149 | const DEBUG = false; 150 | 151 | fn insertPost(allocator: std.mem.Allocator, handle: usize, post: IncomingPost) void { 152 | //debug("insert!", .{}); 153 | const storage = &storages[handle]; 154 | if (storage.posts.len == MAX_POST_BUFFER_SIZE) { 155 | const post_to_delete = storage.posts.pop().?; 156 | if (DEBUG and handle == 0) { 157 | debug("popped {}", .{post_to_delete.timestamp}); 158 | } 159 | post_to_delete.deinitVia(allocator); 160 | } 161 | std.debug.assert(storage.posts.len <= MAX_POST_BUFFER_SIZE); 162 | if (DEBUG and false) { 163 | debug( 164 | "[{d}] {d}/{d}, timestamp={}, text={s}, languages={s}, author_handle={s}, hash={}", 165 | .{ handle, storage.posts.readableLength(), MAX_POST_BUFFER_SIZE, post.timestamp, post.text, post.languages, post.author_handle, post.hash }, 166 | ); 167 | } 168 | const owned_post = Post.createFromIncoming(post, allocator) catch @panic("ran out of memory for string dupe"); 169 | storage.posts.push(owned_post) catch @panic("must not be out of memory here"); 170 | } 171 | 172 | //pub fn fetch(handle: usize, timestamp: f64) ![]*const Post { 173 | // return fetchA((&storages[handle]).allocator, handle, timestamp); 174 | //} 175 | 176 | pub fn fetch(handle: usize, timestamp: f64) !beam.term { 177 | const storage = &storages[handle]; 178 | 179 | var cnt: usize = 0; 180 | var it1 = storage.posts.iterator(); 181 | while (it1.next()) |post| { 182 | if (post.timestamp >= timestamp) { 183 | cnt += 1; 184 | } 185 | } 186 | if (cnt > MAX_POST_RETURN_SIZE) cnt = MAX_POST_RETURN_SIZE; 187 | var result = try storage.allocator.alloc(*const Post, cnt); 188 | 189 | var idx: usize = 0; 190 | var it = storage.posts.iterator(); 191 | while (it.next()) |post| { 192 | if (idx >= result.len) break; 193 | if (post.timestamp >= timestamp) { 194 | result[idx] = post; 195 | idx += 1; 196 | } 197 | } 198 | if (idx != cnt) @panic("must not be!"); 199 | if (DEBUG) debug("sending {d} posts", .{result.len}); 200 | const term = beam.make(result, .{}); 201 | storage.allocator.free(result); 202 | return term; 203 | 204 | //return result; 205 | } 206 | 207 | // test "it works" { 208 | // const allocator = std.testing.allocator; 209 | // initA(allocator, 1); 210 | // defer { 211 | // for (storages) |*storage| { 212 | // storage.deinit(); 213 | // allocator.destroy(storage); 214 | // } 215 | // } 216 | // const handle = create(); 217 | // const BASE_TIMESTAMP = 100377371; 218 | // for (0..MAX_POST_BUFFER_SIZE) |i| { 219 | // insertPost(allocator, handle, .{ 220 | // .timestamp = @floatFromInt(BASE_TIMESTAMP + i), 221 | // .text = "a", 222 | // .languages = "b", 223 | // .author_name = "c", 224 | // .author_handle = "c", 225 | // .hash = @intCast(19327 + i), 226 | // .flags = "f", 227 | // .world_id = "w", 228 | // }); 229 | // } 230 | // const storage = &storages[handle]; 231 | // defer { 232 | // for (storage.posts.buffer) |*post| { 233 | // post.deinitVia(allocator); 234 | // } 235 | // } 236 | 237 | // const posts = try fetchA(allocator, handle, BASE_TIMESTAMP); 238 | // defer deinitGivenList(allocator, posts); 239 | // try std.testing.expectEqual(MAX_POST_BUFFER_SIZE, posts.len); 240 | // insertPost(allocator, handle, .{ 241 | // .timestamp = @floatFromInt(BASE_TIMESTAMP + MAX_POST_BUFFER_SIZE + 1), 242 | // .text = "a", 243 | // .languages = "b", 244 | // .author_name = "c", 245 | // .author_handle = "c", 246 | // .hash = @intCast(88567376), 247 | // .flags = "f", 248 | // .world_id = "w", 249 | // }); 250 | 251 | // const posts2 = try fetchA(allocator, handle, BASE_TIMESTAMP); 252 | // defer deinitGivenList(allocator, posts2); 253 | // try std.testing.expectEqual(MAX_POST_BUFFER_SIZE, posts2.len); 254 | // insertPost(allocator, handle, .{ 255 | // .timestamp = @floatFromInt(BASE_TIMESTAMP + MAX_POST_BUFFER_SIZE + 2), 256 | // .text = "a", 257 | // .languages = "b", 258 | // .author_name = "c", 259 | // .author_handle = "c", 260 | // .hash = @intCast(88567376), 261 | // .flags = "f", 262 | // .world_id = "w", 263 | // }); 264 | // const posts3 = try fetchA(allocator, handle, BASE_TIMESTAMP + MAX_POST_BUFFER_SIZE); 265 | // defer deinitGivenList(allocator, posts3); 266 | // try std.testing.expectEqual(2, posts3.len); 267 | // } 268 | 269 | // fn deinitGivenList(allocator: std.mem.Allocator, list: []*Post) void { 270 | // for (list) |post| post.deinitVia(allocator); 271 | // allocator.free(list); 272 | // } 273 | -------------------------------------------------------------------------------- /lib/vrhose/timeliner.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Timeliner do 2 | use GenServer 3 | require Logger 4 | @worlds_in_timeline 10 5 | 6 | defmodule Counters do 7 | defstruct posts: 0, 8 | likes: 0, 9 | reposts: 0, 10 | follows: 0, 11 | blocks: 0, 12 | signups: 0 13 | end 14 | 15 | def start_link(opts \\ []) do 16 | IO.inspect(opts) 17 | GenServer.start_link(__MODULE__, opts, opts) 18 | end 19 | 20 | def fetch_all(pid) do 21 | now = DateTime.utc_now() |> DateTime.to_unix(:second) 22 | GenServer.call(pid, {:fetch, now - 30, false}) 23 | end 24 | 25 | def fetch(pid, timestamp) do 26 | GenServer.call(pid, {:fetch, timestamp, true}) 27 | end 28 | 29 | defmodule Metrics do 30 | use Prometheus.Metric 31 | 32 | def setup() do 33 | Histogram.declare( 34 | name: :vrhose_timeliner_events, 35 | help: "sent posts from timeliner processes to users", 36 | labels: [:call_type], 37 | buckets: 38 | [ 39 | 10..100//10, 40 | 100..1000//100, 41 | 1000..2000//100, 42 | 2000..4000//500, 43 | 4000..10000//1000, 44 | 10000..20000//1500, 45 | 20000..40000//2000 46 | ] 47 | |> Enum.flat_map(&Enum.to_list/1) 48 | |> Enum.uniq() 49 | ) 50 | end 51 | 52 | def sent_events(call_type, amount) do 53 | Histogram.observe( 54 | [ 55 | name: :vrhose_timeliner_events, 56 | labels: [call_type] 57 | ], 58 | amount 59 | ) 60 | end 61 | end 62 | 63 | @impl true 64 | def init(opts) do 65 | Registry.register(Registry.Timeliners, "timeliner", :awoo) 66 | 67 | register_with = 68 | opts 69 | |> Keyword.get(:register_with) 70 | 71 | worker_id = 72 | opts 73 | |> Keyword.get(:worker_id) 74 | 75 | ingestor_pid = GenServer.whereis(register_with) 76 | monitor_ref = Process.monitor(ingestor_pid) 77 | :ok = VRHose.Ingestor.subscribe(register_with) 78 | handle = VRHose.TimelinerStorage.create() 79 | 80 | Process.send_after(self(), :compute_rates, 1000) 81 | 82 | worlds = VRHose.World.last_worlds(@worlds_in_timeline) 83 | 84 | resolved_worlds = 85 | worlds 86 | |> Enum.map(fn world -> 87 | identity = VRHose.Identity.one(world.poster_did) 88 | 89 | if identity == nil do 90 | %{ 91 | id: world.vrchat_id, 92 | author_handle: "@" <> world.poster_did, 93 | author_name: "" 94 | } 95 | else 96 | %{ 97 | id: world.vrchat_id, 98 | author_handle: VRHose.Identity.to_handle(identity), 99 | author_name: identity.name 100 | } 101 | end 102 | end) 103 | |> Enum.reverse() 104 | 105 | {:ok, 106 | %{ 107 | registered_with: register_with, 108 | worker_id: worker_id, 109 | storage: handle, 110 | debug_counters: %__MODULE__.Counters{}, 111 | start_time: System.os_time(:second), 112 | counters: %__MODULE__.Counters{}, 113 | rates: [], 114 | monitor_ref: monitor_ref, 115 | world_ids: %{ 116 | time: System.os_time(:millisecond) / 1000, 117 | ids: resolved_worlds 118 | } 119 | }} 120 | end 121 | 122 | @impl true 123 | def handle_continue(:reconnect, state) do 124 | register_with = state.registered_with 125 | Logger.info("#{state.worker_id}: reconnecting to #{inspect(register_with)}") 126 | ingestor_pid = GenServer.whereis(register_with) 127 | monitor_ref = Process.monitor(ingestor_pid) 128 | :ok = VRHose.Ingestor.subscribe(register_with) 129 | {:noreply, put_in(state.monitor_ref, monitor_ref)} 130 | end 131 | 132 | defp persist_world(world_id, author_did) do 133 | case VRHose.QuickLeader.acquire() do 134 | :leader -> 135 | {:ok, _} = VRHose.World.insert(world_id, author_did) 136 | :ok 137 | 138 | :not_leader -> 139 | :ok 140 | end 141 | end 142 | 143 | # last 120 seconds worth of counters 144 | @rate_max_storage 120 145 | 146 | @impl true 147 | def handle_info(:compute_rates, state) do 148 | state = 149 | put_in( 150 | state.rates, 151 | if length(state.rates) >= @rate_max_storage do 152 | state.rates 153 | |> Enum.drop(-1) 154 | |> List.insert_at(0, state.counters) 155 | else 156 | state.rates 157 | |> List.insert_at(0, state.counters) 158 | end 159 | ) 160 | 161 | if state.storage == 0 do 162 | Logger.info("counters: #{inspect(state.counters)}") 163 | end 164 | 165 | state = put_in(state.counters, %__MODULE__.Counters{}) 166 | Process.send_after(self(), :compute_rates, 1000) 167 | {:noreply, state} 168 | end 169 | 170 | @impl true 171 | def handle_info({:post, post}, state) do 172 | state = 173 | if post.world_id != nil do 174 | world_ids = state.world_ids.ids |> Enum.map(fn wrld -> wrld.id end) 175 | IO.inspect(world_ids, label: "world_ids from #{state.storage}") 176 | 177 | put_in(state.world_ids, %{ 178 | time: System.os_time(:millisecond) / 1000, 179 | ids: 180 | unless Enum.member?(world_ids, post.world_id) do 181 | # one of the timeliners must become a leader so it can send this 182 | # world id to the database 183 | :ok = persist_world(post.world_id, post.author_did) 184 | 185 | wrld = %{ 186 | id: post.world_id, 187 | author_handle: post.author_handle, 188 | author_name: post.author_name 189 | } 190 | 191 | if length(state.world_ids.ids) > @worlds_in_timeline do 192 | state.world_ids.ids 193 | # pop oldest 194 | |> Enum.drop(1) 195 | # insert into earliest 196 | |> List.insert_at(-1, wrld) 197 | else 198 | # append 199 | state.world_ids.ids 200 | |> List.insert_at(-1, wrld) 201 | end 202 | |> IO.inspect(label: "AFTER world_ids from #{state.storage}") 203 | else 204 | IO.puts("already a wrld id, not inserting #{state.storage}") 205 | state.world_ids.ids 206 | end 207 | }) 208 | else 209 | state 210 | end 211 | 212 | VRHose.TimelinerStorage.insert_post(state.storage, post) 213 | 214 | state = 215 | put_in(state.counters, %{state.counters | posts: state.counters.posts + 1}) 216 | 217 | state = 218 | put_in(state.debug_counters, %{state.debug_counters | posts: state.debug_counters.posts + 1}) 219 | 220 | {:noreply, state} 221 | end 222 | 223 | @impl true 224 | def handle_info(entity, state) when entity in [:like, :follow, :block, :repost, :signup] do 225 | key = 226 | case entity do 227 | :like -> :likes 228 | :follow -> :follows 229 | :block -> :blocks 230 | :repost -> :reposts 231 | :signup -> :signups 232 | end 233 | 234 | state = 235 | put_in( 236 | state.counters, 237 | Map.put(state.counters, key, Map.get(state.counters, key) + 1) 238 | ) 239 | 240 | state = 241 | put_in( 242 | state.debug_counters, 243 | Map.put(state.debug_counters, key, Map.get(state.debug_counters, key) + 1) 244 | ) 245 | 246 | {:noreply, state} 247 | end 248 | 249 | @impl true 250 | def handle_info({:DOWN, down_ref, :process, _pid, reason}, state) do 251 | if state.monitor_ref == down_ref do 252 | Logger.warning("ingestor process died, reason: #{inspect(reason)}") 253 | Logger.warning("waiting 1 second then reconnecting...") 254 | Process.sleep(1000) 255 | {:noreply, state, {:continue, :reconnect}} 256 | else 257 | Logger.warning( 258 | "received unknown ref #{inspect(down_ref)}, expected #{inspect(state.monitor_ref)}" 259 | ) 260 | 261 | {:noreply, state} 262 | end 263 | end 264 | 265 | @impl true 266 | def handle_info(unhandled_message, state) do 267 | Logger.warning("timeliner received unhandled message: #{inspect(unhandled_message)}") 268 | {:noreply, state} 269 | end 270 | 271 | @impl true 272 | def handle_call({:fetch, timestamp, is_delta?}, _, state) do 273 | timeline = 274 | VRHose.TimelinerStorage.fetch(state.storage, timestamp * 1.0) 275 | |> Enum.map(fn post -> 276 | %{ 277 | t: "p", 278 | a: post.author_name, 279 | b: post.author_handle, 280 | c: post.text |> to_string, 281 | d: post.timestamp, 282 | l: post.languages |> to_string, 283 | h: post.hash |> to_string, 284 | f: post.flags |> to_string, 285 | i: post.micro_id |> to_string 286 | } 287 | end) 288 | 289 | timeline_length = length(timeline) 290 | 291 | __MODULE__.Metrics.sent_events( 292 | if is_delta? do 293 | "delta" 294 | else 295 | "init" 296 | end, 297 | timeline_length 298 | ) 299 | 300 | {:reply, 301 | {:ok, 302 | %{ 303 | time: System.os_time(:millisecond) / 1000, 304 | batch: timeline, 305 | worlds: state.world_ids, 306 | rates: rates(state, timestamp) 307 | }}, state} 308 | end 309 | 310 | defp rates(state, timestamp) do 311 | # calculate how many seconds back this timestamp is 312 | now = System.os_time(:second) 313 | seconds_from_now = now - timestamp 314 | 315 | Logger.info("seconds_from_now = #{seconds_from_now}, rate array=#{length(state.rates)}") 316 | 317 | cond do 318 | Enum.empty?(state.rates) -> 319 | Logger.warning("sending inexact rates due to no data!") 320 | 321 | state.counters 322 | |> Map.from_struct() 323 | |> Map.to_list() 324 | |> Enum.map(fn {key, counter} -> 325 | {key, 326 | %{ 327 | rate: counter, 328 | inexact: true 329 | }} 330 | end) 331 | |> Map.new() 332 | 333 | seconds_from_now < 1 -> 334 | Logger.warning("delta too low! reusing last counters delta=#{seconds_from_now}") 335 | 336 | state.rates 337 | |> Enum.at(-1) 338 | |> Map.from_struct() 339 | |> Map.to_list() 340 | |> Enum.map(fn {key, counter} -> 341 | {key, 342 | %{ 343 | rate: counter, 344 | inexact: true 345 | }} 346 | end) 347 | |> Map.new() 348 | 349 | true -> 350 | Logger.debug("computing rates from #{seconds_from_now}sec ago") 351 | 352 | rates = 353 | state.rates 354 | |> Enum.slice(0..seconds_from_now) 355 | |> Enum.map(fn counters -> 356 | Map.from_struct(counters) 357 | end) 358 | 359 | sums = 360 | rates 361 | |> Enum.reduce(%{}, fn counters, acc -> 362 | Map.merge(acc, counters, fn _k, v1, v2 -> 363 | v1 + v2 364 | end) 365 | end) 366 | 367 | sums 368 | |> Enum.map(fn {k, v} -> 369 | {k, 370 | %{ 371 | rate: v / length(rates), 372 | inexact: false 373 | }} 374 | end) 375 | |> Map.new() 376 | end 377 | end 378 | end 379 | -------------------------------------------------------------------------------- /lib/vrhose/ingestor.ex: -------------------------------------------------------------------------------- 1 | defmodule VRHose.Ingestor do 2 | use GenServer 3 | require Logger 4 | 5 | # @host "jetstream2.us-west.bsky.network" 6 | # @path "/subscribe" 7 | 8 | # @jetstream "wss://jetstream2.us-west.bsky.network/subscribe?wantedCollections=app.bsky.feed.post" 9 | # Client API 10 | 11 | def start_link(opts \\ []) do 12 | GenServer.start_link(__MODULE__, opts, name: __MODULE__) 13 | end 14 | 15 | def subscribe(ingestor) do 16 | GenServer.call(ingestor, :subscribe) 17 | end 18 | 19 | defmodule Metrics do 20 | use Prometheus.Metric 21 | 22 | def setup() do 23 | Counter.declare( 24 | name: :vrhose_firehose_event_count, 25 | help: "fire hose...... wrow", 26 | labels: [:kind, :operation, :type] 27 | ) 28 | end 29 | 30 | def commit(operation, type) do 31 | Counter.inc( 32 | name: :vrhose_firehose_event_count, 33 | labels: ["commit", to_string(operation), to_string(type)] 34 | ) 35 | end 36 | 37 | def identity() do 38 | Counter.inc( 39 | name: :vrhose_firehose_event_count, 40 | labels: ["identity", "", ""] 41 | ) 42 | end 43 | 44 | def account(active, status) do 45 | Counter.inc( 46 | name: :vrhose_firehose_event_count, 47 | labels: ["account", active, status] 48 | ) 49 | end 50 | end 51 | 52 | # Server Callbacks 53 | 54 | @impl true 55 | def init(_opts) do 56 | Logger.info("initializing ingestor") 57 | Process.send_after(self(), :print_stats, 1000) 58 | Process.send_after(self(), :ping_ws, 20000) 59 | 60 | zstd_ctx = :ezstd.create_decompression_context(8192) 61 | dd = :ezstd.create_ddict(File.read!(Path.join([:code.priv_dir(:vrhose), "/zstd_dictionary"]))) 62 | :ezstd.select_ddict(zstd_ctx, dd) 63 | 64 | {:ok, 65 | %{ 66 | subscribers: [], 67 | handles: %{}, 68 | counter: 0, 69 | message_counter: 0, 70 | unfiltered_post_counter: 0, 71 | filtered_post_counter: 0, 72 | zero_counter: 0, 73 | conn_pid: nil, 74 | pong: true, 75 | zstd_ctx: zstd_ctx 76 | }, {:continue, :connect}} 77 | end 78 | 79 | @impl true 80 | def handle_continue(:connect, state) do 81 | # Logger.info("opening connection to #{@jetstream}...") 82 | 83 | # {:ok, pid} = VRHose.Websocket.connect(@jetstream) 84 | # {:noreply, put_in(state.conn_pid, pid)} 85 | {:noreply, state} 86 | end 87 | 88 | @impl true 89 | def handle_call(:subscribe, {pid, _}, state) do 90 | {:reply, :ok, put_in(state.subscribers, state.subscribers ++ [pid])} 91 | end 92 | 93 | defp kill_websocket(state, reason) do 94 | ws_pid = 95 | Supervisor.which_children(VRHose.Supervisor) 96 | |> Enum.filter(fn {name, _, _, _} -> 97 | name == "websocket" 98 | end) 99 | |> Enum.at(0) 100 | |> then(fn {_, pid, _, _} -> pid end) 101 | 102 | Logger.warning( 103 | "killing #{inspect(ws_pid)} due to reason=#{inspect(reason)}.. ws should restart afterwards" 104 | ) 105 | 106 | if ws_pid != :restarting do 107 | :erlang.exit(ws_pid, reason) 108 | end 109 | 110 | put_in(state.conn_pid, nil) 111 | end 112 | 113 | @impl true 114 | def handle_info({:ws_connected, pid}, state) do 115 | {:noreply, put_in(state.conn_pid, pid)} 116 | end 117 | 118 | @impl true 119 | def handle_info(:print_stats, state) do 120 | Logger.info( 121 | "#{DateTime.utc_now()} - message counter: #{state.message_counter}, unfiltered posts: #{state.unfiltered_post_counter}, filtered posts: #{state.filtered_post_counter}" 122 | ) 123 | 124 | if state.zero_counter > 0 do 125 | Logger.warning("got zero messages for the #{state.zero_counter} time") 126 | end 127 | 128 | state = 129 | if state.zero_counter > 20 do 130 | Logger.error("must restart") 131 | kill_websocket(state, :zero_msgs) 132 | else 133 | state 134 | end 135 | 136 | Process.send_after(self(), :print_stats, 1000) 137 | 138 | zero_counter = 139 | if state.message_counter > 0 do 140 | 0 141 | else 142 | state.zero_counter + 1 143 | end 144 | 145 | state = put_in(state.zero_counter, zero_counter) 146 | state = put_in(state.message_counter, 0) 147 | state = put_in(state.unfiltered_post_counter, 0) 148 | state = put_in(state.filtered_post_counter, 0) 149 | {:noreply, state} 150 | end 151 | 152 | @impl true 153 | def handle_info(:ping_ws, state) do 154 | Logger.info("#{DateTime.utc_now()} - pinging websocket") 155 | 156 | Process.send_after(self(), :check_websocket_pong, 10000) 157 | 158 | if state.conn_pid do 159 | Process.send_after(self(), :ping_ws, 20000) 160 | :ok = VRHose.Websocket.send_ping(state.conn_pid) 161 | {:noreply, put_in(state.pong, false)} 162 | else 163 | # restart the websocket immediately (its been 20sec) 164 | Logger.warning( 165 | "no connection available to ping, this should not happen, finding process to kill.." 166 | ) 167 | 168 | state = kill_websocket(state, :no_pid) 169 | 170 | {:noreply, put_in(state.pong, true)} 171 | end 172 | end 173 | 174 | @impl true 175 | def handle_info({:websocket_binary, timestamp, compressed}, state) do 176 | decompressed = :ezstd.decompress_streaming(state.zstd_ctx, compressed) 177 | 178 | case decompressed do 179 | {:error, v} -> 180 | Logger.error("Decompression error: #{inspect(v)}") 181 | 182 | {:noreply, state} 183 | 184 | decompressed -> 185 | send(self(), {:websocket_text, timestamp, decompressed}) 186 | {:noreply, state} 187 | end 188 | end 189 | 190 | @impl true 191 | def handle_info({:websocket_text, timestamp, text}, state) do 192 | msg = 193 | text 194 | |> Jason.decode!() 195 | 196 | state = put_in(state.message_counter, state.message_counter + 1) 197 | 198 | case msg["kind"] do 199 | "commit" -> 200 | case msg["commit"]["operation"] do 201 | "create" -> 202 | event_type = msg["commit"]["record"]["$type"] 203 | __MODULE__.Metrics.commit(:create, event_type) 204 | 205 | case event_type do 206 | "app.bsky.feed.post" -> 207 | state = put_in(state.unfiltered_post_counter, state.unfiltered_post_counter + 1) 208 | state = fanout_post(state, timestamp, msg) 209 | {:noreply, state} 210 | 211 | "app.bsky.feed.like" -> 212 | fanout(state, :like) 213 | {:noreply, state} 214 | 215 | "app.bsky.graph.follow" -> 216 | fanout(state, :follow) 217 | {:noreply, state} 218 | 219 | "app.bsky.graph.block" -> 220 | fanout(state, :block) 221 | {:noreply, state} 222 | 223 | "app.bsky.feed.repost" -> 224 | fanout(state, :repost) 225 | {:noreply, state} 226 | 227 | "app.bsky.actor.profile" -> 228 | fanout(state, :signup) 229 | {:noreply, state} 230 | end 231 | 232 | "delete" -> 233 | event_type = msg["commit"]["collection"] 234 | __MODULE__.Metrics.commit(:delete, event_type) 235 | {:noreply, state} 236 | 237 | "update" -> 238 | event_type = msg["commit"]["record"]["$type"] 239 | __MODULE__.Metrics.commit(:update, event_type) 240 | {:noreply, state} 241 | 242 | v -> 243 | Logger.warning("Unsupported commit type: #{inspect(v)} from #{inspect(msg)}") 244 | {:noreply, state} 245 | end 246 | 247 | "identity" -> 248 | __MODULE__.Metrics.identity() 249 | did = msg["identity"]["did"] 250 | handle = msg["identity"]["handle"] 251 | {:noreply, put_in(state.handles, Map.put(state.handles, did, handle))} 252 | 253 | "account" -> 254 | active? = msg["account"]["active"] 255 | status = msg["account"]["status"] 256 | 257 | __MODULE__.Metrics.account( 258 | if active? do 259 | "active" 260 | else 261 | "inactive" 262 | end, 263 | status 264 | ) 265 | 266 | {:noreply, state} 267 | 268 | v -> 269 | Logger.warning("Unsupported message from jetstream: #{inspect(v)}: #{inspect(msg)}") 270 | {:noreply, state} 271 | end 272 | end 273 | 274 | @impl true 275 | def handle_info({:websocket_pong, _data}, state) do 276 | Logger.info("pong") 277 | {:noreply, put_in(state.pong, true)} 278 | end 279 | 280 | @impl true 281 | def handle_info(:check_websocket_pong, state) do 282 | if state.pong do 283 | {:noreply, put_in(state.pong, true)} 284 | else 285 | Logger.warning("no pong... killing the connection") 286 | state = kill_websocket(state, :timeout_ping) 287 | {:noreply, put_in(state.pong, true)} 288 | end 289 | end 290 | 291 | defp maybe_reply_flag(rec) do 292 | if rec["reply"] != nil do 293 | ["r"] 294 | else 295 | [] 296 | end 297 | end 298 | 299 | defp maybe_quote_flag(rec) do 300 | has_bsky_link_facet? = 301 | Enum.any?( 302 | (rec["facets"] || []) 303 | |> Enum.filter(fn facet -> 304 | facet["features"] 305 | |> Enum.filter(fn feature -> 306 | is_link = feature["$type"] == "app.bsky.richtext.facet#link" 307 | is_bsky = String.starts_with?(feature["uri"] || "", "https://bsky.app") 308 | is_link and is_bsky 309 | end) 310 | |> Enum.any?() 311 | end) 312 | ) 313 | 314 | has_post_embed? = 315 | (rec["embed"] || %{})["$type"] == "app.bsky.embed.record" and 316 | String.contains?(((rec["embed"] || %{})["record"] || %{})["uri"], "app.bsky.feed.post") 317 | 318 | if has_bsky_link_facet? || has_post_embed? do 319 | ["q"] 320 | else 321 | [] 322 | end 323 | end 324 | 325 | defp maybe_mention_flag(rec) do 326 | unless Enum.empty?( 327 | (rec["facets"] || []) 328 | |> Enum.filter(fn facet -> 329 | facet["features"] 330 | |> Enum.filter(fn feature -> 331 | is_mention = feature["$type"] == "app.bsky.richtext.facet#mention" 332 | has_did = feature["did"] != nil 333 | is_mention and has_did 334 | end) 335 | |> Enum.any?() 336 | end) 337 | ) do 338 | ["m"] 339 | else 340 | [] 341 | end 342 | end 343 | 344 | @media_embed_types [ 345 | "app.bsky.embed.images", 346 | "app.bsky.embed.video" 347 | ] 348 | 349 | defp maybe_media_flag(rec) do 350 | maybe_embed = rec["embed"] || %{} 351 | embed_type = maybe_embed["$type"] 352 | 353 | if embed_type in @media_embed_types do 354 | ["M"] 355 | else 356 | [] 357 | end 358 | end 359 | 360 | # test posts: 361 | # https://pdsls.dev/at/did:plc:iw5dbzqr3hbt4qrsqv5bsv2n/app.bsky.feed.post/3lbwdzpzu722e 362 | # https://pdsls.dev/at/did:plc:ghmhveudel6es5chzycsi2hi/app.bsky.feed.post/3lb2ed5bl7222 363 | @wrld_id_regex ~r/wrld_[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ 364 | 365 | defp is_vrchat_feature(feature) do 366 | maybe_uri = feature["uri"] || "" 367 | 368 | feature["$type"] == "app.bsky.richtext.facet#link" and 369 | (String.starts_with?(maybe_uri, "https://vrchat.com/home/world/wrld_") or 370 | String.starts_with?(maybe_uri, "https://vrchat.com/home/launch?worldId=wrld_")) 371 | end 372 | 373 | defp extract_world_id(rec) do 374 | first_wrld_link = 375 | (rec["facets"] || []) 376 | |> Enum.filter(fn facet -> 377 | facet["features"] 378 | |> Enum.filter(&is_vrchat_feature/1) 379 | |> Enum.any?() 380 | end) 381 | |> Enum.at(0) 382 | |> then(fn 383 | nil -> 384 | nil 385 | 386 | facet -> 387 | feature = 388 | facet["features"] 389 | |> Enum.filter(&is_vrchat_feature/1) 390 | |> Enum.at(0) 391 | 392 | feature["uri"] 393 | end) 394 | 395 | if first_wrld_link == nil do 396 | # fallback to post text, maybe they posted the wrld id directly 397 | @wrld_id_regex 398 | |> Regex.run(rec["text"] || "") 399 | |> then(fn 400 | nil -> [] 401 | v -> v 402 | end) 403 | |> Enum.at(0) 404 | else 405 | # regex so it extracts wrld_ id (could do String.trim too if im up for optimizing lol) 406 | @wrld_id_regex 407 | |> Regex.run(first_wrld_link) 408 | |> then(fn 409 | nil -> 410 | Logger.error("no wrld id found in first wrld link: #{inspect(first_wrld_link)}") 411 | 412 | v -> 413 | v 414 | |> Enum.at(0) 415 | end) 416 | end 417 | end 418 | 419 | @wordfilter [ 420 | "nsfw", 421 | "cock", 422 | "dick", 423 | "penis", 424 | "nude", 425 | "findom", 426 | "pussy", 427 | "porn", 428 | "p0rn", 429 | "2dfd", 430 | "onlyfans", 431 | "fansly", 432 | "bbw", 433 | "paypig", 434 | "horny", 435 | "sissy", 436 | "boobs", 437 | "kinks", 438 | "orgy", 439 | "threesome", 440 | "gangbang", 441 | "fetish", 442 | "blacked", 443 | "semen", 444 | "sperm", 445 | "stroking", 446 | "#cum", 447 | "selfcest", 448 | "incest", 449 | "naked", 450 | "cumming", 451 | "titties", 452 | "boobies", 453 | "gooning", 454 | "cuckold", 455 | "jacking off", 456 | "masturbating", 457 | "masturbate", 458 | "fucktoy", 459 | "fuck toy", 460 | "abdl", 461 | "bondage", 462 | "shxta", 463 | "irlshota", 464 | "#shota", 465 | "ageplay" 466 | ] 467 | def run_filters(post) do 468 | text = (post["text"] || "") |> String.trim() 469 | 470 | # TODO faster filter chain (via regex) 471 | unless String.length(text) == 0 do 472 | {text, 473 | @wordfilter 474 | |> Enum.map(fn word -> 475 | text 476 | |> String.downcase() 477 | |> String.contains?(word) 478 | end) 479 | |> Enum.any?()} 480 | else 481 | # filter out posts without any text 482 | {text, true} 483 | end 484 | end 485 | 486 | defp fanout_post(state, timestamp, msg) do 487 | post = msg["commit"]["record"] 488 | {text, filtered?} = run_filters(post) 489 | 490 | unless filtered? do 491 | fanout_filtered_post(state, timestamp, msg, text) 492 | state 493 | else 494 | put_in(state.filtered_post_counter, state.filtered_post_counter + 1) 495 | end 496 | end 497 | 498 | def post_flags_for(post_record) do 499 | (maybe_reply_flag(post_record) ++ 500 | maybe_quote_flag(post_record) ++ 501 | maybe_mention_flag(post_record) ++ 502 | maybe_media_flag(post_record)) 503 | |> Enum.join("") 504 | end 505 | 506 | defp fanout_filtered_post(state, timestamp, msg, text) do 507 | post_record = msg["commit"]["record"] 508 | post_flags = post_flags_for(post_record) 509 | 510 | # NOTE: the hydrator may overwrite fields set here (currently author_name, author_handle, timestamp) 511 | # so updates to formats here should also be carried out there 512 | post_data = %{ 513 | timestamp: (timestamp |> DateTime.to_unix(:millisecond)) / 1000, 514 | text: text, 515 | languages: (post_record["langs"] || []) |> Enum.at(0) || "", 516 | author_name: "<...processing...>", 517 | author_handle: "@" <> (Map.get(state.handles, msg["did"]) || msg["did"]), 518 | author_did: msg["did"], 519 | hash: :erlang.phash2(text <> msg["did"]), 520 | flags: post_flags, 521 | world_id: extract_world_id(post_record), 522 | micro_id: msg["commit"]["rkey"] 523 | } 524 | 525 | {:ok, worker} = ExHashRing.Ring.find_node(VRHose.Hydrator.Ring, msg["did"]) 526 | worker_pid = worker |> to_charlist() |> :erlang.list_to_pid() 527 | VRHose.Hydrator.submit_post(worker_pid, {msg["did"], post_data, state.subscribers}) 528 | end 529 | 530 | defp fanout(state, anything) do 531 | state.subscribers 532 | |> Enum.each(fn pid -> 533 | send(pid, anything) 534 | end) 535 | end 536 | 537 | @impl true 538 | def terminate(reason, state) do 539 | if state.conn_pid != nil do 540 | VRHose.Websocket.close(state.conn_pid, 1000, "uwaa") 541 | end 542 | 543 | Logger.error("terminating #{inspect(reason)} #{inspect(state)}") 544 | end 545 | end 546 | -------------------------------------------------------------------------------- /mix.lock: -------------------------------------------------------------------------------- 1 | %{ 2 | "accept": {:hex, :accept, "0.3.5", "b33b127abca7cc948bbe6caa4c263369abf1347cfa9d8e699c6d214660f10cd1", [:rebar3], [], "hexpm", "11b18c220bcc2eab63b5470c038ef10eb6783bcb1fcdb11aa4137defa5ac1bb8"}, 3 | "bandit": {:hex, :bandit, "1.5.7", "6856b1e1df4f2b0cb3df1377eab7891bec2da6a7fd69dc78594ad3e152363a50", [:mix], [{:hpax, "~> 1.0.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "f2dd92ae87d2cbea2fa9aa1652db157b6cba6c405cb44d4f6dd87abba41371cd"}, 4 | "castore": {:hex, :castore, "1.0.10", "43bbeeac820f16c89f79721af1b3e092399b3a1ecc8df1a472738fd853574911", [:mix], [], "hexpm", "1b0b7ea14d889d9ea21202c43a4fa015eb913021cb535e8ed91946f4b77a8848"}, 5 | "cc_precompiler": {:hex, :cc_precompiler, "0.1.10", "47c9c08d8869cf09b41da36538f62bc1abd3e19e41701c2cea2675b53c704258", [:mix], [{:elixir_make, "~> 0.7", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "f6e046254e53cd6b41c6bacd70ae728011aa82b2742a80d6e2214855c6e06b22"}, 6 | "certifi": {:hex, :certifi, "2.13.0", "e52be248590050b2dd33b0bb274b56678f9068e67805dca8aa8b1ccdb016bbf6", [:rebar3], [], "hexpm", "8f3d9533a0f06070afdfd5d596b32e21c6580667a492891851b0e2737bc507a1"}, 7 | "cowlib": {:hex, :cowlib, "2.13.0", "db8f7505d8332d98ef50a3ef34b34c1afddec7506e4ee4dd4a3a266285d282ca", [:make, :rebar3], [], "hexpm", "e1e1284dc3fc030a64b1ad0d8382ae7e99da46c3246b815318a4b848873800a4"}, 8 | "db_connection": {:hex, :db_connection, "2.7.0", "b99faa9291bb09892c7da373bb82cba59aefa9b36300f6145c5f201c7adf48ec", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "dcf08f31b2701f857dfc787fbad78223d61a32204f217f15e881dd93e4bdd3ff"}, 9 | "decimal": {:hex, :decimal, "2.2.0", "df3d06bb9517e302b1bd265c1e7f16cda51547ad9d99892049340841f3e15836", [:mix], [], "hexpm", "af8daf87384b51b7e611fb1a1f2c4d4876b65ef968fa8bd3adf44cff401c7f21"}, 10 | "dns_cluster": {:hex, :dns_cluster, "0.1.3", "0bc20a2c88ed6cc494f2964075c359f8c2d00e1bf25518a6a6c7fd277c9b0c66", [:mix], [], "hexpm", "46cb7c4a1b3e52c7ad4cbe33ca5079fbde4840dedeafca2baf77996c2da1bc33"}, 11 | "earmark_parser": {:hex, :earmark_parser, "1.4.41", "ab34711c9dc6212dda44fcd20ecb87ac3f3fce6f0ca2f28d4a00e4154f8cd599", [:mix], [], "hexpm", "a81a04c7e34b6617c2792e291b5a2e57ab316365c2644ddc553bb9ed863ebefa"}, 12 | "ecto": {:hex, :ecto, "3.12.4", "267c94d9f2969e6acc4dd5e3e3af5b05cdae89a4d549925f3008b2b7eb0b93c3", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ef04e4101688a67d061e1b10d7bc1fbf00d1d13c17eef08b71d070ff9188f747"}, 13 | "ecto_sql": {:hex, :ecto_sql, "3.12.1", "c0d0d60e85d9ff4631f12bafa454bc392ce8b9ec83531a412c12a0d415a3a4d0", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.12", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "aff5b958a899762c5f09028c847569f7dfb9cc9d63bdb8133bff8a5546de6bf5"}, 14 | "ecto_sqlite3": {:hex, :ecto_sqlite3, "0.17.5", "fbee5c17ff6afd8e9ded519b0abb363926c65d30b27577232bb066b2a79957b8", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:ecto, "~> 3.12", [hex: :ecto, repo: "hexpm", optional: false]}, {:ecto_sql, "~> 3.12", [hex: :ecto_sql, repo: "hexpm", optional: false]}, {:exqlite, "~> 0.22", [hex: :exqlite, repo: "hexpm", optional: false]}], "hexpm", "3b54734d998cbd032ac59403c36acf4e019670e8b6ceef9c6c33d8986c4e9704"}, 15 | "elixir_make": {:hex, :elixir_make, "0.9.0", "6484b3cd8c0cee58f09f05ecaf1a140a8c97670671a6a0e7ab4dc326c3109726", [:mix], [], "hexpm", "db23d4fd8b757462ad02f8aa73431a426fe6671c80b200d9710caf3d1dd0ffdb"}, 16 | "ex_doc": {:hex, :ex_doc, "0.35.0", "14dcaac6ee0091d1e6938a7ddaf62a4a8c6c0d0b0002e6a9252997a08df719a0", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.0", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14 or ~> 1.0", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1 or ~> 1.0", [hex: :makeup_erlang, repo: "hexpm", optional: false]}, {:makeup_html, ">= 0.1.0", [hex: :makeup_html, repo: "hexpm", optional: true]}], "hexpm", "d69a789ea0248a108c80eef509ec88ffe277f74828169c33f6f7ddaef89c98a5"}, 17 | "ex_hash_ring": {:hex, :ex_hash_ring, "6.0.4", "bef9d2d796afbbe25ab5b5a7ed746e06b99c76604f558113c273466d52fa6d6b", [:mix], [], "hexpm", "89adabf31f7d3dfaa36802ce598ce918e9b5b33bae8909ac1a4d052e1e567d18"}, 18 | "ex_zstd": {:hex, :ex_zstd, "0.1.0", "4b1b5ebd7c0417e69308db8cdd478b9adb3e2d1a03b6e7366cf0a9aadeae11af", [:make, :mix], [{:ex_doc, ">= 0.0.0", [hex: :ex_doc, repo: "hexpm", optional: false]}], "hexpm", "2c9542a5c088e0eab14aa9b10d18bc084a6060ecf09025bbfc5b08684568bc67"}, 19 | "exqlite": {:hex, :exqlite, "0.27.0", "2ef6021862e74c6253d1fb1f5701bd47e4e779b035d34daf2a13ec83945a05ba", [:make, :mix], [{:cc_precompiler, "~> 0.1", [hex: :cc_precompiler, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:elixir_make, "~> 0.8", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "b947b9db15bb7aad11da6cd18a0d8b78f7fcce89508a27a5b9be18350fe12c59"}, 20 | "ezstd": {:hex, :ezstd, "1.1.0", "d3b483d6acfadfb65dba4015371e6d54526dbf3d9ef0941b5add8bf5890731f4", [:rebar3], [], "hexpm", "28cfa0ed6cc3922095ad5ba0f23392a1664273358b17184baa909868361184e7"}, 21 | "finch": {:hex, :finch, "0.19.0", "c644641491ea854fc5c1bbaef36bfc764e3f08e7185e1f084e35e0672241b76d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "fc5324ce209125d1e2fa0fcd2634601c52a787aff1cd33ee833664a5af4ea2b6"}, 22 | "gen_stage": {:hex, :gen_stage, "1.2.1", "19d8b5e9a5996d813b8245338a28246307fd8b9c99d1237de199d21efc4c76a1", [:mix], [], "hexpm", "83e8be657fa05b992ffa6ac1e3af6d57aa50aace8f691fcf696ff02f8335b001"}, 23 | "gun": {:hex, :gun, "2.1.0", "b4e4cbbf3026d21981c447e9e7ca856766046eff693720ba43114d7f5de36e87", [:make, :rebar3], [{:cowlib, "2.13.0", [hex: :cowlib, repo: "hexpm", optional: false]}], "hexpm", "52fc7fc246bfc3b00e01aea1c2854c70a366348574ab50c57dfe796d24a0101d"}, 24 | "hpax": {:hex, :hpax, "1.0.0", "28dcf54509fe2152a3d040e4e3df5b265dcb6cb532029ecbacf4ce52caea3fd2", [:mix], [], "hexpm", "7f1314731d711e2ca5fdc7fd361296593fc2542570b3105595bb0bc6d0fad601"}, 25 | "inet64_tcp": {:git, "https://github.com/skunkwerks/inet64_tcp", "56e1cd568dc6329dd8f29270c8561bbab0e65dbf", []}, 26 | "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, 27 | "libzstd": {:git, "https://github.com/facebook/zstd.git", "b0a179d469680276adbd4007435989a6b7fd8b4f", []}, 28 | "makeup": {:hex, :makeup, "1.2.1", "e90ac1c65589ef354378def3ba19d401e739ee7ee06fb47f94c687016e3713d1", [:mix], [{:nimble_parsec, "~> 1.4", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "d36484867b0bae0fea568d10131197a4c2e47056a6fbe84922bf6ba71c8d17ce"}, 29 | "makeup_elixir": {:hex, :makeup_elixir, "1.0.0", "74bb8348c9b3a51d5c589bf5aebb0466a84b33274150e3b6ece1da45584afc82", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "49159b7d7d999e836bedaf09dcf35ca18b312230cf901b725a64f3f42e407983"}, 30 | "makeup_erlang": {:hex, :makeup_erlang, "1.0.1", "c7f58c120b2b5aa5fd80d540a89fdf866ed42f1f3994e4fe189abebeab610839", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "8a89a1eeccc2d798d6ea15496a6e4870b75e014d1af514b1b71fa33134f57814"}, 31 | "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"}, 32 | "mint": {:hex, :mint, "1.6.2", "af6d97a4051eee4f05b5500671d47c3a67dac7386045d87a904126fd4bbcea2e", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "5ee441dffc1892f1ae59127f74afe8fd82fda6587794278d924e4d90ea3d63f9"}, 33 | "mint_web_socket": {:hex, :mint_web_socket, "1.0.4", "0b539116dbb3d3f861cdf5e15e269a933cb501c113a14db7001a3157d96ffafd", [:mix], [{:mint, ">= 1.4.1 and < 2.0.0-0", [hex: :mint, repo: "hexpm", optional: false]}], "hexpm", "027d4c5529c45a4ba0ce27a01c0f35f284a5468519c045ca15f43decb360a991"}, 34 | "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"}, 35 | "nimble_parsec": {:hex, :nimble_parsec, "1.4.0", "51f9b613ea62cfa97b25ccc2c1b4216e81df970acd8e16e8d1bdc58fef21370d", [:mix], [], "hexpm", "9c565862810fb383e9838c1dd2d7d2c437b3d13b267414ba6af33e50d2d1cf28"}, 36 | "nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"}, 37 | "pegasus": {:hex, :pegasus, "0.2.5", "38123461fe41add54f715ce41f89137a31cd217d353005b057f88b9b67c39b6f", [:mix], [{:nimble_parsec, "~> 1.2", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "ee80708608807f4447f1da1e6e0ebd9604f5bda4fbe2d4bdd9aa6dd67afde020"}, 38 | "phoenix": {:hex, :phoenix, "1.7.14", "a7d0b3f1bc95987044ddada111e77bd7f75646a08518942c72a8440278ae7825", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "c7859bc56cc5dfef19ecfc240775dae358cbaa530231118a9e014df392ace61a"}, 39 | "phoenix_ecto": {:hex, :phoenix_ecto, "4.6.3", "f686701b0499a07f2e3b122d84d52ff8a31f5def386e03706c916f6feddf69ef", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.1", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.16 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}], "hexpm", "909502956916a657a197f94cc1206d9a65247538de8a5e186f7537c895d95764"}, 40 | "phoenix_html": {:hex, :phoenix_html, "4.1.1", "4c064fd3873d12ebb1388425a8f2a19348cef56e7289e1998e2d2fa758aa982e", [:mix], [], "hexpm", "f2f2df5a72bc9a2f510b21497fd7d2b86d932ec0598f0210fed4114adc546c6f"}, 41 | "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.8.4", "4508e481f791ce62ec6a096e13b061387158cbeefacca68c6c1928e1305e23ed", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:ecto_sqlite3_extras, "~> 1.1.7 or ~> 1.2.0", [hex: :ecto_sqlite3_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.19 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "2984aae96994fbc5c61795a73b8fb58153b41ff934019cfb522343d2d3817d59"}, 42 | "phoenix_live_view": {:hex, :phoenix_live_view, "0.20.17", "f396bbdaf4ba227b82251eb75ac0afa6b3da5e509bc0d030206374237dfc9450", [:mix], [{:floki, "~> 0.36", [hex: :floki, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a61d741ffb78c85fdbca0de084da6a48f8ceb5261a79165b5a0b59e5f65ce98b"}, 43 | "phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"}, 44 | "phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"}, 45 | "plug": {:hex, :plug, "1.16.1", "40c74619c12f82736d2214557dedec2e9762029b2438d6d175c5074c933edc9d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a13ff6b9006b03d7e33874945b2755253841b238c34071ed85b0e86057f8cddc"}, 46 | "plug_crypto": {:hex, :plug_crypto, "2.1.0", "f44309c2b06d249c27c8d3f65cfe08158ade08418cf540fd4f72d4d6863abb7b", [:mix], [], "hexpm", "131216a4b030b8f8ce0f26038bc4421ae60e4bb95c5cf5395e1421437824c4fa"}, 47 | "prometheus": {:hex, :prometheus, "4.11.0", "b95f8de8530f541bd95951e18e355a840003672e5eda4788c5fa6183406ba29a", [:mix, :rebar3], [{:quantile_estimator, "~> 0.2.1", [hex: :quantile_estimator, repo: "hexpm", optional: false]}], "hexpm", "719862351aabf4df7079b05dc085d2bbcbe3ac0ac3009e956671b1d5ab88247d"}, 48 | "prometheus_ecto": {:hex, :prometheus_ecto, "1.4.3", "3dd4da1812b8e0dbee81ea58bb3b62ed7588f2eae0c9e97e434c46807ff82311", [:mix], [{:ecto, "~> 2.0 or ~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:prometheus_ex, "~> 1.1 or ~> 2.0 or ~> 3.0", [hex: :prometheus_ex, repo: "hexpm", optional: false]}], "hexpm", "8d66289f77f913b37eda81fd287340c17e61a447549deb28efc254532b2bed82"}, 49 | "prometheus_ex": {:git, "https://github.com/lanodan/prometheus.ex.git", "31f7fbe4b71b79ba27efc2a5085746c4011ceb8f", [branch: "fix/elixir-1.14"]}, 50 | "prometheus_phoenix": {:hex, :prometheus_phoenix, "1.3.0", "c4b527e0b3a9ef1af26bdcfbfad3998f37795b9185d475ca610fe4388fdd3bb5", [:mix], [{:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}, {:prometheus_ex, "~> 1.3 or ~> 2.0 or ~> 3.0", [hex: :prometheus_ex, repo: "hexpm", optional: false]}], "hexpm", "c4d1404ac4e9d3d963da601db2a7d8ea31194f0017057fabf0cfb9bf5a6c8c75"}, 51 | "prometheus_phx": {:git, "https://git.pleroma.social/pleroma/elixir-libraries/prometheus-phx.git", "9cd8f248c9381ffedc799905050abce194a97514", [branch: "no-logging"]}, 52 | "prometheus_plugs": {:hex, :prometheus_plugs, "1.1.5", "25933d48f8af3a5941dd7b621c889749894d8a1082a6ff7c67cc99dec26377c5", [:mix], [{:accept, "~> 0.1", [hex: :accept, repo: "hexpm", optional: false]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: false]}, {:prometheus_ex, "~> 1.1 or ~> 2.0 or ~> 3.0", [hex: :prometheus_ex, repo: "hexpm", optional: false]}, {:prometheus_process_collector, "~> 1.1", [hex: :prometheus_process_collector, repo: "hexpm", optional: true]}], "hexpm", "0273a6483ccb936d79ca19b0ab629aef0dba958697c94782bb728b920dfc6a79"}, 53 | "protoss": {:hex, :protoss, "0.2.1", "fcf437ed65178d6cbf9a600886e3da9f7173697223972f062ee593941c2588b1", [:mix], [], "hexpm", "2261dbdc4d5913ce1e88d1410108d97f21140a118f45f6acc3edc4ecdb952052"}, 54 | "quantile_estimator": {:hex, :quantile_estimator, "0.2.1", "ef50a361f11b5f26b5f16d0696e46a9e4661756492c981f7b2229ef42ff1cd15", [:rebar3], [], "hexpm", "282a8a323ca2a845c9e6f787d166348f776c1d4a41ede63046d72d422e3da946"}, 55 | "recon": {:hex, :recon, "2.5.6", "9052588e83bfedfd9b72e1034532aee2a5369d9d9343b61aeb7fbce761010741", [:mix, :rebar3], [], "hexpm", "96c6799792d735cc0f0fd0f86267e9d351e63339cbe03df9d162010cefc26bb0"}, 56 | "req": {:hex, :req, "0.5.7", "b722680e03d531a2947282adff474362a48a02aa54b131196fbf7acaff5e4cee", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "c6035374615120a8923e8089d0c21a3496cf9eda2d287b806081b8f323ceee29"}, 57 | "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, 58 | "telemetry_metrics": {:hex, :telemetry_metrics, "1.0.0", "29f5f84991ca98b8eb02fc208b2e6de7c95f8bb2294ef244a176675adc7775df", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "f23713b3847286a534e005126d4c959ebcca68ae9582118ce436b521d1d47d5d"}, 59 | "telemetry_poller": {:hex, :telemetry_poller, "1.1.0", "58fa7c216257291caaf8d05678c8d01bd45f4bdbc1286838a28c4bb62ef32999", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "9eb9d9cbfd81cbd7cdd24682f8711b6e2b691289a0de6826e58452f28c103c8f"}, 60 | "thousand_island": {:hex, :thousand_island, "1.3.5", "6022b6338f1635b3d32406ff98d68b843ba73b3aa95cfc27154223244f3a6ca5", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "2be6954916fdfe4756af3239fb6b6d75d0b8063b5df03ba76fd8a4c87849e180"}, 61 | "websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"}, 62 | "websock_adapter": {:hex, :websock_adapter, "0.5.8", "3b97dc94e407e2d1fc666b2fb9acf6be81a1798a2602294aac000260a7c4a47d", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "315b9a1865552212b5f35140ad194e67ce31af45bcee443d4ecb96b5fd3f3782"}, 63 | "xrpc": {:git, "https://github.com/moomerman/xrpc", "3c4037a1f6715bcaa94a6939ccf26e1dd3a068a9", [branch: "main"]}, 64 | "zig_get": {:hex, :zig_get, "0.13.1", "0c5ba23e8ed9bfabb22ddee3f728fe382b72db057423956549e71c9a33aed090", [:mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "bb05db6ed83a72e3100ab0110aad0f3c81ea005f9e85f22c63c3527a82257b4f"}, 65 | "zig_parser": {:hex, :zig_parser, "0.4.0", "5230576fcea30c061f08f6053448ad3dc5194a45485065564a7f8047bb351ce9", [:mix], [{:pegasus, "~> 0.2.4", [hex: :pegasus, repo: "hexpm", optional: false]}], "hexpm", "ec54cf14e80a1485e29a80b42756d0421426db81eb9e2630721fd46ab5c21bcb"}, 66 | "zigler": {:hex, :zigler, "0.13.3", "18f9a1b4d230154156b9955b209fbd85c4195d5b35e0d55dae5d3a48c646e8c8", [:mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:protoss, "~> 0.2", [hex: :protoss, repo: "hexpm", optional: false]}, {:zig_get, "0.13.1", [hex: :zig_get, repo: "hexpm", optional: false]}, {:zig_parser, "~> 0.4.0", [hex: :zig_parser, repo: "hexpm", optional: false]}], "hexpm", "b83bfd7c8bfad275cc59a4816846b2c863f1dcf9842303323bf3110ac2597134"}, 67 | } 68 | --------------------------------------------------------------------------------