├── .env.sample ├── .github └── workflows │ └── test.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── README.md ├── diesel.toml ├── migrations ├── .keep ├── 00000000000000_diesel_initial_setup │ ├── down.sql │ └── up.sql ├── 2024-01-29-234154_baseline │ ├── down.sql │ └── up.sql ├── 2024-02-04-155233_zap-balances │ ├── down.sql │ └── up.sql ├── 2024-02-08-143851_scheduled │ ├── down.sql │ └── up.sql ├── 2024-02-08-162944_fix_trigger │ ├── down.sql │ └── up.sql └── 2024-02-09-110155_oracle │ ├── down.sql │ └── up.sql └── src ├── config.rs ├── invoice_subscriber.rs ├── job_listener.rs ├── main.rs ├── models ├── event.rs ├── event_job.rs ├── event_nonce.rs ├── job.rs ├── mod.rs ├── oracle_metadata.rs ├── schema.rs ├── zap.rs └── zap_balance.rs ├── routes.rs └── wasm_handler.rs /.env.sample: -------------------------------------------------------------------------------- 1 | DATABASE_URL=postgres://username:password@localhost:5432/wasm_dvm 2 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | rust_tests: 8 | name: Rust Checks 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v3 12 | - uses: actions-rs/toolchain@v1 13 | with: 14 | toolchain: nightly 15 | components: clippy, rustfmt 16 | override: true 17 | profile: minimal 18 | 19 | - name: Setup trunk 20 | uses: jetli/trunk-action@v0.1.0 21 | with: 22 | version: 'latest' 23 | 24 | - uses: actions/cache@v2 25 | with: 26 | path: | 27 | ~/.cargo/registry 28 | ~/.cargo/git 29 | target 30 | key: cargo-${{ runner.os }}-rust-tests-${{ hashFiles('**/Cargo.toml') }} 31 | restore-keys: | 32 | cargo-${{ runner.os }}-rust-tests- 33 | cargo-${{ runner.os }}- 34 | 35 | - name: Check formatting 36 | run: cargo fmt --check 37 | 38 | - name: Check clippy 39 | run: cargo clippy 40 | 41 | - name: Run tests 42 | run: cargo test 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | keys.json 3 | .env 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "wasm-dvm" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | anyhow = "1.0" 10 | axum = "0.6.20" 11 | bitcoin = "0.30.2" 12 | tower-http = { version = "0.4.4", features = ["cors"] } 13 | log = "0.4.20" 14 | pretty_env_logger = "0.5.0" 15 | clap = { version = "4.4.18", features = ["derive"] } 16 | chrono = { version = "0.4", features = ["serde"] } 17 | diesel = { version = "2.1", features = ["postgres", "r2d2", "chrono", "numeric", "serde_json"] } 18 | diesel_migrations = "2.1.0" 19 | home = "0.5.9" 20 | hex = "0.4.3" 21 | nostr = "=0.28.0" 22 | nostr-sdk = "0.28.0" 23 | serde = "1.0" 24 | serde_json = "1.0" 25 | lightning-invoice = { version = "0.29.0", features = ["serde"] } 26 | lnurl-rs = { version = "0.4.0", default-features = false } 27 | reqwest = { version = "0.11", features = ["blocking"] } 28 | tempfile = "3.2" 29 | tokio = { version = "1", features = ["full"] } 30 | extism = "1.0.3" 31 | tonic_openssl_lnd = "0.2.0" 32 | sha2 = "0.10.8" 33 | kormir = { version = "0.1.9", features = ["nostr"] } 34 | 35 | # needed until next release of nostr 36 | [patch.crates-io] 37 | nostr = { git = 'https://github.com/benthecarman/nostr.git', rev = "62f4832808d2995e4695e36169aa5e2b80dea1e2" } 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # WASM DVM 2 | 3 | ## Introduction 4 | 5 | WASM DVM is a WebAssembly-based [data vending machine](https://www.data-vending-machines.org/). 6 | 7 | Currently, this uses [extism](https://extism.org/) as the execution environment. The Wasm code is executed in a 8 | WebAssembly runtime environment. If you want to develop a wasm plugin, you can use the [extism](https://extism.org/) 9 | PDK for developing and testing your wasm code. 10 | 11 | ## Currently Supported Features 12 | 13 | - [x] Pay per time execution 14 | - [x] Pre-paid execution with zaps 15 | - [x] Encrypted input and output 16 | - [x] Scheduled execution 17 | - [x] DLC announcement based execution 18 | 19 | ## Nostr Events 20 | 21 | ### Input 22 | 23 | Clients must provide the Wasm code in the `i` tag field. The Wasm code must be directly accessible at the provided URL. 24 | It must also provide the input data and the provided function name to be executed. 25 | 26 | The input should be a stringified JSON object with the following fields: 27 | 28 | - `url` (string): The URL of the Wasm binary. 29 | - `function` (string): The name of the function to be executed. 30 | - `input` (string): The input data for the function. 31 | - `time` (number): The maximum time in milliseconds to execute the function. 32 | - `checksum` (string): The sha256 hash of the Wasm binary in hex. 33 | - `shedule` (object): Scheduling parameters for the execution. The object should have the following fields: 34 | - `run_date` (number): The date in seconds since the epoch to execute the function. 35 | - `name` (optional string): Name of the event. Only used for DLC announcement 36 | - `expected_outputs` (optional string array): The list of expected outputs from the function. Only used for DLC 37 | announcement. 38 | 39 | ### Output 40 | 41 | The result of the execution is returned in the `content` field. 42 | 43 | ### Example 44 | 45 | Count number of vowels in a string. 46 | 47 | #### Request 48 | 49 | ```json 50 | { 51 | "content": "", 52 | "kind": 5600, 53 | "tags": [ 54 | [ 55 | "i", 56 | "{\"url\":\"https://github.com/extism/plugins/releases/download/v0.5.0/count_vowels.wasm\",\"function\":\"count_vowels\",\"input\":\"Hello World\",\"time\": 1000, \"checksum\": \"93898457953d30d016f712ccf4336ce7e9971db5f7f3aff1edd252764f75d5d7\"}", 57 | "text" 58 | ] 59 | ] 60 | } 61 | ``` 62 | 63 | #### Response 64 | 65 | ```json 66 | { 67 | "content": "{\"count\":3,\"total\":3,\"vowels\":\"aeiouAEIOU\"}", 68 | "kind": 6600 69 | } 70 | ``` 71 | -------------------------------------------------------------------------------- /diesel.toml: -------------------------------------------------------------------------------- 1 | # For documentation on how to configure this file, 2 | # see https://diesel.rs/guides/configuring-diesel-cli 3 | 4 | [print_schema] 5 | file = "src/models/schema.rs" 6 | 7 | [migrations_directory] 8 | dir = "migrations" 9 | -------------------------------------------------------------------------------- /migrations/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/benthecarman/wasm-dvm/e5e2b0d14546b5eed7c4d8cb628987fa4d5948c7/migrations/.keep -------------------------------------------------------------------------------- /migrations/00000000000000_diesel_initial_setup/down.sql: -------------------------------------------------------------------------------- 1 | -- This file was automatically created by Diesel to setup helper functions 2 | -- and other internal bookkeeping. This file is safe to edit, any future 3 | -- changes will be added to existing projects as new migrations. 4 | 5 | DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); 6 | DROP FUNCTION IF EXISTS diesel_set_updated_at(); 7 | -------------------------------------------------------------------------------- /migrations/00000000000000_diesel_initial_setup/up.sql: -------------------------------------------------------------------------------- 1 | -- This file was automatically created by Diesel to setup helper functions 2 | -- and other internal bookkeeping. This file is safe to edit, any future 3 | -- changes will be added to existing projects as new migrations. 4 | 5 | 6 | 7 | 8 | -- Sets up a trigger for the given table to automatically set a column called 9 | -- `updated_at` whenever the row is modified (unless `updated_at` was included 10 | -- in the modified columns) 11 | -- 12 | -- # Example 13 | -- 14 | -- ```sql 15 | -- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); 16 | -- 17 | -- SELECT diesel_manage_updated_at('users'); 18 | -- ``` 19 | CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ 20 | BEGIN 21 | EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s 22 | FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); 23 | END; 24 | $$ LANGUAGE plpgsql; 25 | 26 | CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ 27 | BEGIN 28 | IF ( 29 | NEW IS DISTINCT FROM OLD AND 30 | NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at 31 | ) THEN 32 | NEW.updated_at := current_timestamp; 33 | END IF; 34 | RETURN NEW; 35 | END; 36 | $$ LANGUAGE plpgsql; 37 | -------------------------------------------------------------------------------- /migrations/2024-01-29-234154_baseline/down.sql: -------------------------------------------------------------------------------- 1 | drop table jobs; 2 | -------------------------------------------------------------------------------- /migrations/2024-01-29-234154_baseline/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE jobs 2 | ( 3 | id SERIAL PRIMARY KEY, 4 | payment_hash bytea NOT NULL UNIQUE, 5 | request jsonb NOT NULL UNIQUE, 6 | response_id bytea UNIQUE, 7 | created_at timestamp NOT NULL DEFAULT NOW(), 8 | updated_at timestamp NOT NULL DEFAULT NOW() 9 | ); 10 | 11 | create unique index jobs_payment_hash_idx on jobs (payment_hash); 12 | 13 | -- Function to set updated_at during UPDATE 14 | CREATE OR REPLACE FUNCTION set_updated_at() 15 | RETURNS TRIGGER AS 16 | $$ 17 | BEGIN 18 | NEW.updated_at := CURRENT_TIMESTAMP; 19 | RETURN NEW; 20 | END; 21 | $$ LANGUAGE plpgsql; 22 | 23 | CREATE TRIGGER tr_set_dates_after_update 24 | BEFORE UPDATE 25 | ON jobs 26 | FOR EACH ROW 27 | EXECUTE FUNCTION set_updated_at(); 28 | -------------------------------------------------------------------------------- /migrations/2024-02-04-155233_zap-balances/down.sql: -------------------------------------------------------------------------------- 1 | drop table zaps; 2 | drop table zap_balances; 3 | -------------------------------------------------------------------------------- /migrations/2024-02-04-155233_zap-balances/up.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE zap_balances 2 | ( 3 | npub bytea NOT NULL PRIMARY KEY, 4 | balance_msats INTEGER NOT NULL DEFAULT 0, 5 | created_at timestamp NOT NULL DEFAULT NOW() 6 | ); 7 | 8 | CREATE TABLE zaps 9 | ( 10 | payment_hash bytea PRIMARY KEY, 11 | invoice TEXT UNIQUE NOT NULL, 12 | amount_msats INTEGER NOT NULL, 13 | request jsonb NOT NULL, 14 | npub bytea NOT NULL REFERENCES zap_balances (npub), 15 | note_id bytea UNIQUE, 16 | created_at timestamp NOT NULL DEFAULT NOW() 17 | ); 18 | -------------------------------------------------------------------------------- /migrations/2024-02-08-143851_scheduled/down.sql: -------------------------------------------------------------------------------- 1 | DROP TRIGGER IF EXISTS jobs_scheduled_at_check ON jobs; 2 | DROP FUNCTION IF EXISTS jobs_scheduled_at_check(); 3 | 4 | ALTER TABLE jobs DROP COLUMN IF EXISTS scheduled_at; 5 | -------------------------------------------------------------------------------- /migrations/2024-02-08-143851_scheduled/up.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE jobs 2 | ADD COLUMN scheduled_at TIMESTAMP; 3 | 4 | -- add trigger to make sure scheduled_at is set in the future 5 | CREATE OR REPLACE FUNCTION jobs_scheduled_at_check() 6 | RETURNS TRIGGER AS $$ 7 | BEGIN 8 | IF NEW.scheduled_at < now() THEN 9 | RAISE EXCEPTION 'scheduled_at must be in the future'; 10 | END IF; 11 | RETURN NEW; 12 | END; 13 | $$ LANGUAGE plpgsql; 14 | CREATE TRIGGER jobs_scheduled_at_check 15 | BEFORE INSERT OR UPDATE 16 | ON jobs 17 | FOR EACH ROW 18 | EXECUTE FUNCTION jobs_scheduled_at_check(); 19 | -------------------------------------------------------------------------------- /migrations/2024-02-08-162944_fix_trigger/down.sql: -------------------------------------------------------------------------------- 1 | DROP FUNCTION IF EXISTS jobs_scheduled_at_check(); 2 | -------------------------------------------------------------------------------- /migrations/2024-02-08-162944_fix_trigger/up.sql: -------------------------------------------------------------------------------- 1 | -- add trigger to make sure scheduled_at is set in the future, only on insert 2 | CREATE OR REPLACE FUNCTION jobs_scheduled_at_check() 3 | RETURNS TRIGGER AS $$ 4 | BEGIN 5 | IF TG_OP = 'UPDATE' AND OLD.scheduled_at = NEW.scheduled_at THEN 6 | RETURN NEW; 7 | END IF; 8 | IF NEW.scheduled_at < now() THEN 9 | RAISE EXCEPTION 'scheduled_at must be in the future'; 10 | END IF; 11 | RETURN NEW; 12 | END; 13 | $$ LANGUAGE plpgsql; 14 | -------------------------------------------------------------------------------- /migrations/2024-02-09-110155_oracle/down.sql: -------------------------------------------------------------------------------- 1 | drop table event_jobs; 2 | drop table oracle_metadata; 3 | drop table event_nonces; 4 | drop table events; 5 | -------------------------------------------------------------------------------- /migrations/2024-02-09-110155_oracle/up.sql: -------------------------------------------------------------------------------- 1 | -- Table of information about the oracle, mostly to prevent multiple keys from being used with the same database 2 | -- singleton_constant is a dummy column to ensure there is only one row 3 | CREATE TABLE oracle_metadata 4 | ( 5 | pubkey bytea NOT NULL UNIQUE PRIMARY KEY, 6 | name TEXT NOT NULL UNIQUE, 7 | created_at timestamp NOT NULL DEFAULT NOW(), 8 | updated_at timestamp NOT NULL DEFAULT NOW(), 9 | singleton_constant BOOLEAN NOT NULL DEFAULT TRUE, -- make sure there is only one row 10 | CONSTRAINT one_row_check UNIQUE (singleton_constant) 11 | ); 12 | 13 | -- Primary table containing information about events, 14 | -- contains a broken up oracle announcement, excluding the oracle pubkey which is in memory 15 | -- also contains the name of the event, and whether it is an enum or not for faster lookups 16 | CREATE TABLE events 17 | ( 18 | id SERIAL PRIMARY KEY, 19 | announcement_signature bytea NOT NULL, 20 | oracle_event bytea NOT NULL, 21 | name TEXT NOT NULL UNIQUE, 22 | is_enum BOOLEAN NOT NULL, 23 | announcement_event_id bytea UNIQUE, 24 | attestation_event_id bytea UNIQUE, 25 | created_at timestamp NOT NULL DEFAULT NOW(), 26 | updated_at timestamp NOT NULL DEFAULT NOW() 27 | ); 28 | 29 | -- index for faster lookups by name 30 | CREATE UNIQUE INDEX event_name_index ON events (name); 31 | 32 | -- Table for storing the nonces for each event 33 | -- The signature and outcome are optional, and are only filled in when the event is completed 34 | CREATE TABLE event_nonces 35 | ( 36 | id INTEGER PRIMARY KEY, 37 | event_id INTEGER NOT NULL REFERENCES events (id), 38 | index INTEGER NOT NULL, 39 | nonce bytea NOT NULL UNIQUE, 40 | signature bytea, 41 | outcome TEXT, 42 | created_at timestamp NOT NULL DEFAULT NOW(), 43 | updated_at timestamp NOT NULL DEFAULT NOW() 44 | ); 45 | 46 | -- index for faster lookups by event_id 47 | CREATE INDEX event_nonces_event_id_index ON event_nonces (event_id); 48 | 49 | -- Table for linking jobs to events 50 | 51 | CREATE TABLE event_jobs 52 | ( 53 | job_id INTEGER NOT NULL REFERENCES jobs (id) PRIMARY KEY, 54 | event_id INTEGER NOT NULL REFERENCES events (id) 55 | ); 56 | 57 | CREATE UNIQUE INDEX event_jobs_event_id_index ON event_jobs (event_id); -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use nostr::bitcoin::Network; 3 | use nostr::{Event, Keys}; 4 | use serde::{Deserialize, Serialize}; 5 | use std::fs::File; 6 | use std::io::{BufReader, Write}; 7 | use std::path::PathBuf; 8 | use std::str::FromStr; 9 | 10 | #[derive(Parser, Debug, Clone)] 11 | #[command(version, author, about)] 12 | /// A tool for zapping based on reactions to notes. 13 | pub struct Config { 14 | /// Postgres connection string 15 | #[clap(long)] 16 | pub pg_url: String, 17 | /// Location keys files 18 | #[clap(default_value = ".", long)] 19 | pub data_dir: String, 20 | /// Relay to connect to, can be specified multiple times 21 | #[clap(short, long)] 22 | pub relay: Vec, 23 | /// Host of the GRPC server for lnd 24 | #[clap(default_value_t = String::from("127.0.0.1"), long)] 25 | pub lnd_host: String, 26 | /// Port of the GRPC server for lnd 27 | #[clap(default_value_t = 10009, long)] 28 | pub lnd_port: u32, 29 | /// Network lnd is running on ["bitcoin", "testnet", "signet, "regtest"] 30 | #[clap(default_value_t = Network::Bitcoin, short, long)] 31 | pub network: Network, 32 | /// Path to tls.cert file for lnd 33 | #[clap(long)] 34 | cert_file: Option, 35 | /// Path to admin.macaroon file for lnd 36 | #[clap(long)] 37 | macaroon_file: Option, 38 | /// The domain name you are running the lnurl server on 39 | #[clap(default_value_t = String::from("localhost:3000"), long)] 40 | pub domain: String, 41 | /// Bind address for webserver 42 | #[clap(default_value_t = String::from("0.0.0.0"), long)] 43 | pub bind: String, 44 | /// Port for webserver 45 | #[clap(default_value_t = 3000, long)] 46 | pub port: u16, 47 | /// How many millisats per millisecond of runtime 48 | #[clap(default_value_t = 1.0, long)] 49 | pub price: f64, 50 | } 51 | 52 | impl Config { 53 | pub fn macaroon_file(&self) -> String { 54 | self.macaroon_file 55 | .clone() 56 | .unwrap_or_else(|| default_macaroon_file(&self.network)) 57 | } 58 | 59 | pub fn cert_file(&self) -> String { 60 | self.cert_file.clone().unwrap_or_else(default_cert_file) 61 | } 62 | } 63 | 64 | fn home_directory() -> String { 65 | let buf = home::home_dir().expect("Failed to get home dir"); 66 | let str = format!("{}", buf.display()); 67 | 68 | // to be safe remove possible trailing '/' and 69 | // we can manually add it to paths 70 | match str.strip_suffix('/') { 71 | Some(stripped) => stripped.to_string(), 72 | None => str, 73 | } 74 | } 75 | 76 | pub fn default_cert_file() -> String { 77 | format!("{}/.lnd/tls.cert", home_directory()) 78 | } 79 | 80 | pub fn default_macaroon_file(network: &Network) -> String { 81 | let network_str = match network { 82 | Network::Bitcoin => "mainnet", 83 | Network::Testnet => "testnet", 84 | Network::Signet => "signet", 85 | Network::Regtest => "regtest", 86 | _ => panic!("Unsupported network"), 87 | }; 88 | 89 | format!( 90 | "{}/.lnd/data/chain/bitcoin/{}/admin.macaroon", 91 | home_directory(), 92 | network_str 93 | ) 94 | } 95 | 96 | #[derive(Debug, Clone, Deserialize, Serialize)] 97 | pub struct ServerKeys { 98 | server_key: String, 99 | pub kind0: Option, 100 | pub kind31990: Option, 101 | } 102 | 103 | impl ServerKeys { 104 | fn generate() -> Self { 105 | let server_key = Keys::generate(); 106 | 107 | ServerKeys { 108 | server_key: server_key.secret_key().unwrap().to_secret_hex(), 109 | kind0: None, 110 | kind31990: None, 111 | } 112 | } 113 | 114 | pub fn keys(&self) -> Keys { 115 | Keys::from_str(&self.server_key).unwrap() 116 | } 117 | 118 | pub fn get_keys(path: &PathBuf) -> ServerKeys { 119 | match File::open(path) { 120 | Ok(file) => { 121 | let reader = BufReader::new(file); 122 | serde_json::from_reader(reader).expect("Could not parse JSON") 123 | } 124 | Err(_) => { 125 | let keys = ServerKeys::generate(); 126 | keys.write(path); 127 | 128 | keys 129 | } 130 | } 131 | } 132 | 133 | pub fn write(&self, path: &PathBuf) { 134 | let json_str = serde_json::to_string(&self).expect("Could not serialize data"); 135 | 136 | let mut file = File::create(path).expect("Could not create file"); 137 | file.write_all(json_str.as_bytes()) 138 | .expect("Could not write to file"); 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /src/invoice_subscriber.rs: -------------------------------------------------------------------------------- 1 | use crate::job_listener::get_job_params; 2 | use crate::models::event_job::EventJob; 3 | use crate::models::job::Job; 4 | use crate::models::zap::Zap; 5 | use crate::models::{mark_zap_paid, PostgresStorage}; 6 | use crate::wasm_handler::{download_and_run_wasm, JobParams}; 7 | use bitcoin::hashes::sha256; 8 | use bitcoin::hashes::Hash; 9 | use bitcoin::key::Secp256k1; 10 | use bitcoin::secp256k1::rand::rngs::OsRng; 11 | use bitcoin::secp256k1::rand::RngCore; 12 | use bitcoin::secp256k1::SecretKey; 13 | use diesel::r2d2::{ConnectionManager, Pool}; 14 | use diesel::PgConnection; 15 | use kormir::Oracle; 16 | use lightning_invoice::{Currency, InvoiceBuilder, PaymentSecret}; 17 | use log::{error, info}; 18 | use nostr::nips::nip04; 19 | use nostr::prelude::DataVendingMachineStatus; 20 | use nostr::{Event, EventBuilder, Keys, Kind, Tag, TagKind, ToBech32}; 21 | use nostr_sdk::Client; 22 | use std::time::{SystemTime, UNIX_EPOCH}; 23 | use tonic_openssl_lnd::lnrpc::invoice::InvoiceState; 24 | use tonic_openssl_lnd::lnrpc::Invoice; 25 | use tonic_openssl_lnd::{lnrpc, LndLightningClient}; 26 | 27 | pub async fn start_invoice_subscription( 28 | mut lnd: LndLightningClient, 29 | relays: Vec, 30 | keys: Keys, 31 | http: reqwest::Client, 32 | db_pool: Pool>, 33 | oracle: Oracle, 34 | ) -> anyhow::Result<()> { 35 | info!("Starting invoice subscription"); 36 | 37 | let sub = lnrpc::InvoiceSubscription::default(); 38 | let mut invoice_stream = lnd 39 | .subscribe_invoices(sub) 40 | .await 41 | .expect("Failed to start invoice subscription") 42 | .into_inner(); 43 | 44 | let client = Client::new(&keys); 45 | client.add_relays(relays).await?; 46 | client.connect().await; 47 | 48 | while let Some(ln_invoice) = invoice_stream 49 | .message() 50 | .await 51 | .expect("Failed to receive invoices") 52 | { 53 | match InvoiceState::from_i32(ln_invoice.state) { 54 | Some(InvoiceState::Settled) => { 55 | let client = client.clone(); 56 | let http = http.clone(); 57 | let db_pool = db_pool.clone(); 58 | let keys = keys.clone(); 59 | let oracle = oracle.clone(); 60 | 61 | tokio::spawn(async move { 62 | if let Err(e) = 63 | handle_invoice(ln_invoice, http, client, &keys, db_pool, oracle).await 64 | { 65 | error!("handle invoice error: {e}"); 66 | } 67 | }); 68 | } 69 | None 70 | | Some(InvoiceState::Canceled) 71 | | Some(InvoiceState::Open) 72 | | Some(InvoiceState::Accepted) => {} 73 | } 74 | } 75 | 76 | client.disconnect().await?; 77 | 78 | Ok(()) 79 | } 80 | 81 | pub async fn handle_invoice( 82 | ln_invoice: Invoice, 83 | http: reqwest::Client, 84 | client: Client, 85 | keys: &Keys, 86 | db_pool: Pool>, 87 | oracle: Oracle, 88 | ) -> anyhow::Result<()> { 89 | let mut conn = db_pool.get()?; 90 | let job = Job::get_by_payment_hash(&mut conn, &ln_invoice.r_hash)?; 91 | 92 | if job.is_none() { 93 | // if it is not a job, try to handle it as a zap 94 | return handle_paid_zap(&mut conn, ln_invoice.r_hash, client).await; 95 | } 96 | let job = job.unwrap(); 97 | 98 | let event = job.request(); 99 | let (params, input) = get_job_params(&event, keys).expect("must have valid params"); 100 | let relays = client 101 | .relays() 102 | .await 103 | .keys() 104 | .map(|r| r.to_string()) 105 | .collect::>(); 106 | let job_result = handle_job_request( 107 | &mut conn, event, params, input, keys, &http, &oracle, relays, 108 | ) 109 | .await?; 110 | 111 | if let Some(builder) = job_result.reply_event { 112 | let event_id = client.send_event_builder(builder).await?; 113 | info!("Sent response: {event_id}"); 114 | 115 | Job::set_response_id(&mut conn, job.id, event_id)?; 116 | } 117 | 118 | if let Some(oracle_announcement) = job_result.oracle_announcement { 119 | let event_id = client.send_event(oracle_announcement).await?; 120 | info!("Sent oracle announcement: {event_id}"); 121 | } 122 | 123 | Ok(()) 124 | } 125 | 126 | pub struct HandleJobResult { 127 | pub reply_event: Option, 128 | pub oracle_announcement: Option, 129 | } 130 | 131 | pub async fn handle_job_request( 132 | conn: &mut PgConnection, 133 | event: Event, 134 | params: JobParams, 135 | input: String, 136 | keys: &Keys, 137 | http: &reqwest::Client, 138 | oracle: &Oracle, 139 | relays: Vec, 140 | ) -> anyhow::Result { 141 | match params.schedule.as_ref() { 142 | Some(schedule) => { 143 | if schedule.run_date <= SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs() { 144 | anyhow::bail!("Schedule run date must be in the future"); 145 | } 146 | 147 | let oracle_data = if let Some(outcomes) = schedule.expected_outputs.clone() { 148 | let event_name = schedule.name.clone().unwrap_or(event.id.to_hex()); 149 | let (id, ann) = oracle 150 | .create_enum_event(event_name, outcomes, schedule.run_date as u32) 151 | .await?; 152 | 153 | let event = kormir::nostr_events::create_announcement_event( 154 | &oracle.nostr_keys(), 155 | &ann, 156 | &relays, 157 | )?; 158 | 159 | Some((id, event)) 160 | } else { 161 | None 162 | }; 163 | 164 | let job = Job::create_scheduled(conn, &event, schedule.run_date)?; 165 | if let Some((event_id, event)) = oracle_data { 166 | EventJob::create(conn, job.id, event_id as i32)?; 167 | oracle 168 | .storage 169 | .add_announcement_event_id(event_id, event.id) 170 | .await?; 171 | } 172 | Ok(HandleJobResult { 173 | reply_event: None, 174 | oracle_announcement: Some(event), 175 | }) 176 | } 177 | None => run_job_request(event, params, input, keys, http) 178 | .await 179 | .map(|reply_event| HandleJobResult { 180 | reply_event: Some(reply_event), 181 | oracle_announcement: None, 182 | }), 183 | } 184 | } 185 | 186 | pub async fn run_job_request( 187 | event: Event, 188 | params: JobParams, 189 | input: String, 190 | keys: &Keys, 191 | http: &reqwest::Client, 192 | ) -> anyhow::Result { 193 | match download_and_run_wasm(params, event.id, http).await { 194 | Ok(result) => { 195 | let mut tags = vec![ 196 | Tag::public_key(event.pubkey), 197 | Tag::event(event.id), 198 | Tag::Generic(TagKind::I, vec![input]), 199 | Tag::Request(event.clone()), 200 | ]; 201 | 202 | if event.tags.iter().any(|t| matches!(t, Tag::Encrypted)) { 203 | tags.push(Tag::Encrypted); 204 | let encrypted = nip04::encrypt(keys.secret_key()?, &event.pubkey, result)?; 205 | Ok(EventBuilder::new(Kind::JobResult(6600), encrypted, tags)) 206 | } else { 207 | Ok(EventBuilder::new(Kind::JobResult(6600), result, tags)) 208 | } 209 | } 210 | Err(e) => { 211 | error!("Error running event {}: {e}", event.id); 212 | Ok(EventBuilder::job_feedback( 213 | &event, 214 | DataVendingMachineStatus::Error, 215 | Some(e.to_string()), 216 | 0, 217 | None, 218 | None, 219 | )) 220 | } 221 | } 222 | } 223 | 224 | async fn handle_paid_zap( 225 | conn: &mut PgConnection, 226 | payment_hash: Vec, 227 | client: Client, 228 | ) -> anyhow::Result<()> { 229 | match Zap::find_by_payment_hash(conn, &payment_hash)? { 230 | None => Ok(()), 231 | Some(zap) => { 232 | if zap.note_id.is_some() { 233 | return Ok(()); 234 | } 235 | 236 | let invoice = zap.invoice(); 237 | 238 | let mut preimage = [0u8; 32]; 239 | OsRng.fill_bytes(&mut preimage); 240 | let invoice_hash = sha256::Hash::hash(&preimage); 241 | 242 | let mut payment_secret = [0u8; 32]; 243 | OsRng.fill_bytes(&mut payment_secret); 244 | 245 | let private_key = SecretKey::new(&mut OsRng); 246 | 247 | let amt_msats = invoice 248 | .amount_milli_satoshis() 249 | .expect("Invoice must have an amount"); 250 | 251 | let zap_request = zap.request(); 252 | 253 | info!( 254 | "Received zap for {amt_msats} msats from {}!", 255 | zap_request.pubkey.to_bech32()? 256 | ); 257 | 258 | let fake_invoice = InvoiceBuilder::new(Currency::Bitcoin) 259 | .amount_milli_satoshis(amt_msats) 260 | .invoice_description(invoice.description()) 261 | .current_timestamp() 262 | .payment_hash(invoice_hash) 263 | .payment_secret(PaymentSecret(payment_secret)) 264 | .min_final_cltv_expiry_delta(144) 265 | .basic_mpp() 266 | .build_signed(|hash| { 267 | Secp256k1::signing_only().sign_ecdsa_recoverable(hash, &private_key) 268 | })?; 269 | 270 | let event = EventBuilder::zap_receipt( 271 | fake_invoice.to_string(), 272 | Some(hex::encode(preimage)), 273 | zap_request, 274 | ); 275 | 276 | let event_id = client.send_event_builder(event).await?; 277 | 278 | info!( 279 | "Broadcasted zap event id: {}!", 280 | event_id.to_bech32().expect("bech32") 281 | ); 282 | 283 | mark_zap_paid(conn, payment_hash, event_id)?; 284 | 285 | Ok(()) 286 | } 287 | } 288 | } 289 | -------------------------------------------------------------------------------- /src/job_listener.rs: -------------------------------------------------------------------------------- 1 | use crate::config::Config; 2 | use crate::invoice_subscriber::{handle_job_request, run_job_request}; 3 | use crate::models::event_job::EventJob; 4 | use crate::models::job::Job; 5 | use crate::models::zap_balance::ZapBalance; 6 | use crate::models::PostgresStorage; 7 | use crate::wasm_handler::JobParams; 8 | use anyhow::anyhow; 9 | use diesel::r2d2::{ConnectionManager, Pool}; 10 | use diesel::PgConnection; 11 | use kormir::nostr_events::create_attestation_event; 12 | use kormir::storage::Storage; 13 | use kormir::{EventDescriptor, Oracle}; 14 | use lightning_invoice::Bolt11Invoice; 15 | use log::{debug, error, info, warn}; 16 | use nostr::nips::nip04; 17 | use nostr::prelude::DataVendingMachineStatus; 18 | use nostr::secp256k1::ThirtyTwoByteHash; 19 | use nostr::{Event, EventBuilder, EventId, Filter, Keys, Kind, Tag, TagKind, Timestamp}; 20 | use nostr_sdk::{Client, RelayPoolNotification}; 21 | use std::collections::HashSet; 22 | use std::str::FromStr; 23 | use std::sync::Arc; 24 | use tokio::spawn; 25 | use tokio::sync::Mutex; 26 | use tonic_openssl_lnd::{lnrpc, LndLightningClient}; 27 | 28 | pub async fn listen_for_jobs( 29 | config: &Config, 30 | keys: Keys, 31 | lnd: LndLightningClient, 32 | db_pool: Pool>, 33 | http: reqwest::Client, 34 | oracle: Oracle, 35 | ) -> anyhow::Result<()> { 36 | let client = Client::new(&keys); 37 | client.add_relays(config.relay.clone()).await?; 38 | client.connect().await; 39 | 40 | let filter = Filter::new() 41 | .kind(Kind::JobRequest(5600)) 42 | .since(Timestamp::now()); 43 | 44 | client.subscribe(vec![filter]).await; 45 | 46 | let mut notifications = client.notifications(); 47 | 48 | while let Ok(msg) = notifications.recv().await { 49 | match msg { 50 | RelayPoolNotification::Event { event, .. } => { 51 | if event.kind == Kind::JobRequest(5600) { 52 | // spawn thread to handle event 53 | let client = client.clone(); 54 | let keys = keys.clone(); 55 | let lnd = lnd.clone(); 56 | let db = db_pool.clone(); 57 | let http = http.clone(); 58 | let price = config.price; 59 | let oracle = oracle.clone(); 60 | spawn(async move { 61 | if let Err(e) = 62 | handle_event(price, event, client, keys, lnd, db, &http, oracle).await 63 | { 64 | error!("Error handling event: {e}"); 65 | } 66 | }); 67 | } 68 | } 69 | RelayPoolNotification::Message { .. } => {} 70 | RelayPoolNotification::RelayStatus { .. } => {} 71 | RelayPoolNotification::Stop => {} 72 | RelayPoolNotification::Shutdown => {} 73 | } 74 | } 75 | 76 | client.disconnect().await?; 77 | 78 | Ok(()) 79 | } 80 | 81 | pub async fn handle_event( 82 | price: f64, 83 | event: Event, 84 | client: Client, 85 | keys: Keys, 86 | mut lnd: LndLightningClient, 87 | db_pool: Pool>, 88 | http: &reqwest::Client, 89 | oracle: Oracle, 90 | ) -> anyhow::Result<()> { 91 | let (params, input) = get_job_params(&event, &keys)?; 92 | 93 | if params.time > 60 * 10 * 1_000 { 94 | let builder = EventBuilder::job_feedback( 95 | &event, 96 | DataVendingMachineStatus::Error, 97 | Some("Time must be less than 10 minutes".to_string()), 98 | 0, 99 | None, 100 | None, 101 | ); 102 | let event_id = client.send_event_builder(builder).await?; 103 | info!("Sent error response: {event_id}"); 104 | return Ok(()); 105 | } else if params.time < 10 { 106 | let builder = EventBuilder::job_feedback( 107 | &event, 108 | DataVendingMachineStatus::Error, 109 | Some("Time must be greater than 10ms".to_string()), 110 | 0, 111 | None, 112 | None, 113 | ); 114 | let event_id = client.send_event_builder(builder).await?; 115 | info!("Sent error response: {event_id}"); 116 | return Ok(()); 117 | } 118 | 119 | let value_msat = (params.time as f64 * price) as u64; 120 | 121 | let mut conn = db_pool.get()?; 122 | let balance = ZapBalance::get(&mut conn, &event.pubkey)?; 123 | 124 | match balance { 125 | Some(mut b) if (b.balance_msats as u64) >= value_msat => { 126 | info!( 127 | "User has enough balance, deducting {value_msat}msats from balance and running job" 128 | ); 129 | // deduct balance 130 | let amt = value_msat as i32; 131 | b.update_balance(&mut conn, -amt)?; 132 | 133 | let relays = client 134 | .relays() 135 | .await 136 | .keys() 137 | .map(|r| r.to_string()) 138 | .collect::>(); 139 | 140 | // handle job 141 | let job_result = handle_job_request( 142 | &mut conn, 143 | event.clone(), 144 | params, 145 | input, 146 | &keys, 147 | http, 148 | &oracle, 149 | relays, 150 | ) 151 | .await?; 152 | 153 | if let Some(builder) = job_result.reply_event { 154 | let event_id = client.send_event_builder(builder).await?; 155 | info!("Sent response: {event_id}"); 156 | 157 | Job::create_completed(&mut conn, &event, &event_id)?; 158 | } 159 | 160 | if let Some(oracle_announcement) = job_result.oracle_announcement { 161 | let event_id = client.send_event(oracle_announcement).await?; 162 | info!("Sent oracle announcement: {event_id}"); 163 | } 164 | } 165 | _ => { 166 | let builder = create_job_feedback_invoice( 167 | &event, 168 | value_msat, 169 | params.schedule.map(|u| u.run_date), 170 | &mut lnd, 171 | &mut conn, 172 | ) 173 | .await?; 174 | let event_id = client.send_event_builder(builder).await?; 175 | info!("Sent response: {event_id}"); 176 | } 177 | } 178 | 179 | Ok(()) 180 | } 181 | 182 | async fn create_job_feedback_invoice( 183 | event: &Event, 184 | value_msat: u64, 185 | scheduled_at: Option, 186 | lnd: &mut LndLightningClient, 187 | conn: &mut PgConnection, 188 | ) -> anyhow::Result { 189 | let request = lnrpc::Invoice { 190 | value_msat: value_msat as i64, 191 | memo: "Wasm DVM Request".to_string(), 192 | expiry: 86_400, // one day 193 | ..Default::default() 194 | }; 195 | let resp = lnd.add_invoice(request).await?.into_inner(); 196 | let bolt11 = resp.payment_request; 197 | let invoice = Bolt11Invoice::from_str(&bolt11)?; 198 | 199 | debug!("Created invoice: {bolt11}"); 200 | 201 | Job::create(conn, invoice.payment_hash().into_32(), event, scheduled_at)?; 202 | 203 | let builder = EventBuilder::job_feedback( 204 | event, 205 | DataVendingMachineStatus::PaymentRequired, 206 | None, 207 | value_msat, 208 | Some(bolt11), 209 | None, 210 | ); 211 | Ok(builder) 212 | } 213 | 214 | pub fn get_job_params(event: &Event, keys: &Keys) -> anyhow::Result<(JobParams, String)> { 215 | // if it is encrypted, decrypt the content to a tags array 216 | let tags = if event.tags.iter().any(|t| matches!(t, Tag::Encrypted)) { 217 | let p_tag = event 218 | .tags 219 | .iter() 220 | .find_map(|t| { 221 | if let Tag::PublicKey { 222 | public_key, 223 | uppercase: false, 224 | .. 225 | } = t 226 | { 227 | Some(*public_key) 228 | } else { 229 | None 230 | } 231 | }) 232 | .ok_or(anyhow!("Encrypted tag not found: {event:?}"))?; 233 | 234 | if p_tag != keys.public_key() { 235 | return Err(anyhow!("Params are not encrypted to us!")); 236 | } 237 | 238 | let cleartext = nip04::decrypt(keys.secret_key()?, &event.pubkey, &event.content)?; 239 | let tags: Vec = serde_json::from_str(&cleartext)?; 240 | 241 | tags 242 | } else { 243 | event.tags.clone() 244 | }; 245 | 246 | let string = tags 247 | .into_iter() 248 | .find_map(|t| { 249 | if t.kind() == TagKind::I { 250 | let vec = t.as_vec(); 251 | if vec.len() == 2 || (vec.len() == 3 && vec[2] == "text") { 252 | Some(vec[1].clone()) 253 | } else { 254 | None 255 | } 256 | } else { 257 | None 258 | } 259 | }) 260 | .ok_or(anyhow!("Valid input tag not found: {event:?}"))?; 261 | 262 | let params: JobParams = serde_json::from_str(&string)?; 263 | 264 | Ok((params, string)) 265 | } 266 | 267 | pub async fn process_schedule_jobs_round( 268 | client: &Client, 269 | keys: Keys, 270 | db_pool: Pool>, 271 | http: reqwest::Client, 272 | oracle: Oracle, 273 | active_jobs: Arc>>, 274 | ) -> anyhow::Result<()> { 275 | let mut conn = db_pool.get()?; 276 | let mut jobs = Job::get_ready_to_run_jobs(&mut conn)?; 277 | drop(conn); 278 | 279 | // update active jobs 280 | let mut active = active_jobs.lock().await; 281 | jobs.retain(|j| !active.contains(&j.id)); 282 | active.extend(jobs.iter().map(|j| j.id)); 283 | drop(active); 284 | 285 | if !jobs.is_empty() { 286 | info!("Running {} scheduled jobs", jobs.len()); 287 | } 288 | 289 | for job in jobs { 290 | let client = client.clone(); 291 | let keys = keys.clone(); 292 | let db_pool = db_pool.clone(); 293 | let http = http.clone(); 294 | let oracle = oracle.clone(); 295 | let active = active_jobs.clone(); 296 | 297 | spawn(async move { 298 | if let Err(e) = 299 | run_scheduled_job(client, keys, db_pool, http, oracle, active, job).await 300 | { 301 | error!("Error running scheduled job: {e}"); 302 | } 303 | }); 304 | } 305 | 306 | Ok(()) 307 | } 308 | 309 | async fn run_scheduled_job( 310 | client: Client, 311 | keys: Keys, 312 | db_pool: Pool>, 313 | http: reqwest::Client, 314 | oracle: Oracle, 315 | active_jobs: Arc>>, 316 | job: Job, 317 | ) -> anyhow::Result<()> { 318 | let event = job.request(); 319 | let (params, input) = get_job_params(&event, &keys)?; 320 | 321 | let builder = run_job_request(event, params, input, &keys, &http).await?; 322 | let event = builder.to_event(&keys)?; 323 | let outcome = event.content.clone(); 324 | let event_id = client.send_event(event).await?; 325 | info!("Sent response: {event_id}"); 326 | 327 | let mut active = active_jobs.lock().await; 328 | active.remove(&job.id); 329 | 330 | let mut conn = db_pool.get()?; 331 | Job::set_response_id(&mut conn, job.id, event_id)?; 332 | // handle oracle stuff 333 | if let Some(event_job) = EventJob::get_by_job_id(&mut conn, job.id)? { 334 | if let Some(oracle_event) = oracle.storage.get_event(event_job.event_id as u32).await? { 335 | let outcomes = match oracle_event.announcement.oracle_event.event_descriptor { 336 | EventDescriptor::EnumEvent(enum_event) => enum_event.outcomes, 337 | EventDescriptor::DigitDecompositionEvent(_) => { 338 | unimplemented!("Numeric events not implemented") 339 | } 340 | }; 341 | if oracle_event.announcement_event_id.is_none() { 342 | warn!("Oracle event not announced, skipping attestation"); 343 | return Ok(()); 344 | } 345 | 346 | if outcomes.contains(&outcome) { 347 | let att = oracle 348 | .sign_enum_event(event_job.event_id as u32, outcome) 349 | .await?; 350 | let att_event = create_attestation_event( 351 | &oracle.nostr_keys(), 352 | &att, 353 | EventId::from_str(&oracle_event.announcement_event_id.unwrap())?, 354 | )?; 355 | oracle 356 | .storage 357 | .add_attestation_event_id(event_job.event_id as u32, att_event.id) 358 | .await?; 359 | let att_id = client.send_event(att_event).await?; 360 | info!("Sent oracle event outcome: {att_id}"); 361 | } else { 362 | warn!("Outcome not valid for oracle event, got {outcome}, expected one of {outcomes:?}"); 363 | } 364 | } 365 | } 366 | 367 | Ok(()) 368 | } 369 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::too_many_arguments)] 2 | 3 | use crate::config::{Config, ServerKeys}; 4 | use crate::job_listener::{listen_for_jobs, process_schedule_jobs_round}; 5 | use crate::models::{PostgresStorage, MIGRATIONS}; 6 | use crate::routes::{get_invoice, get_lnurl_pay, get_nip05}; 7 | use axum::http::{Method, StatusCode, Uri}; 8 | use axum::routing::get; 9 | use axum::{http, Extension, Router}; 10 | use clap::Parser; 11 | use diesel::r2d2::{ConnectionManager, Pool}; 12 | use diesel::PgConnection; 13 | use diesel_migrations::MigrationHarness; 14 | use kormir::Oracle; 15 | use log::{error, info}; 16 | use nostr::{EventBuilder, Keys, Kind, Metadata, Tag, TagKind, ToBech32}; 17 | use nostr_sdk::Client; 18 | use std::collections::HashSet; 19 | use std::path::PathBuf; 20 | use std::sync::Arc; 21 | use tokio::signal::unix::{signal, SignalKind}; 22 | use tokio::spawn; 23 | use tokio::sync::{oneshot, Mutex}; 24 | use tokio::time::sleep; 25 | use tonic_openssl_lnd::lnrpc::{GetInfoRequest, GetInfoResponse}; 26 | use tonic_openssl_lnd::LndLightningClient; 27 | use tower_http::cors::{Any, CorsLayer}; 28 | 29 | mod config; 30 | mod invoice_subscriber; 31 | mod job_listener; 32 | mod models; 33 | mod routes; 34 | mod wasm_handler; 35 | 36 | #[derive(Clone)] 37 | pub struct State { 38 | pub db_pool: Pool>, 39 | pub lnd: LndLightningClient, 40 | pub keys: Keys, 41 | pub relays: Vec, 42 | pub domain: String, 43 | } 44 | 45 | #[tokio::main] 46 | async fn main() -> anyhow::Result<()> { 47 | pretty_env_logger::try_init()?; 48 | let config: Config = Config::parse(); 49 | 50 | // DB management 51 | let manager = ConnectionManager::::new(&config.pg_url); 52 | let db_pool = Pool::builder() 53 | .max_size(16) 54 | .test_on_check_out(true) 55 | .build(manager) 56 | .expect("Could not build connection pool"); 57 | 58 | // run migrations 59 | let mut conn = db_pool.get()?; 60 | conn.run_pending_migrations(MIGRATIONS) 61 | .expect("migrations could not run"); 62 | drop(conn); 63 | 64 | // Create the datadir if it doesn't exist 65 | let mut path = PathBuf::from(&config.data_dir); 66 | std::fs::create_dir_all(path.clone())?; 67 | 68 | let keys_path = { 69 | path.push("keys.json"); 70 | path 71 | }; 72 | 73 | let mut server_keys = ServerKeys::get_keys(&keys_path); 74 | let keys = server_keys.keys(); 75 | 76 | let mut events = vec![]; 77 | if server_keys.kind0.is_none() { 78 | let metadata = Metadata { 79 | name: Some("wasm_dvm".to_string()), 80 | display_name: Some("Wasm DVM".to_string()), 81 | picture: Some("https://camo.githubusercontent.com/df088e16e0c36ae3804306bdf1ec1f27b0953dc5986bce126b59502a33d8072d/68747470733a2f2f692e696d6775722e636f6d2f6d58626c5233392e706e67".to_string()), 82 | nip05: Some(format!("_@{}", config.domain)), 83 | lud16: Some(format!("wasm-dvm@{}", config.domain)), 84 | ..Default::default() 85 | }; 86 | let event = EventBuilder::metadata(&metadata).to_event(&keys)?; 87 | server_keys.kind0 = Some(event.clone()); 88 | events.push(event) 89 | } 90 | if server_keys.kind31990.is_none() { 91 | let tags = vec![ 92 | Tag::Generic(TagKind::Custom("k".to_string()), vec!["5600".to_string()]), 93 | Tag::Generic( 94 | TagKind::D, 95 | vec!["9b38e816e53e412a934b0c8ff3135875".to_string()], 96 | ), 97 | ]; 98 | let event = EventBuilder::new( 99 | Kind::Custom(31990), 100 | server_keys.kind0.as_ref().unwrap().content.clone(), 101 | tags, 102 | ) 103 | .to_event(&keys)?; 104 | server_keys.kind31990 = Some(event.clone()); 105 | events.push(event) 106 | } 107 | 108 | if !events.is_empty() { 109 | // send to relays 110 | let client = Client::new(&keys); 111 | client.add_relays(config.relay.clone()).await?; 112 | client.connect().await; 113 | client.batch_event(events, Default::default()).await?; 114 | client.disconnect().await?; 115 | info!("Broadcasted metadata events"); 116 | // write to storage 117 | server_keys.write(&keys_path); 118 | } 119 | 120 | // connect to lnd 121 | let mut client = tonic_openssl_lnd::connect( 122 | config.lnd_host.clone(), 123 | config.lnd_port, 124 | config.cert_file(), 125 | config.macaroon_file(), 126 | ) 127 | .await 128 | .expect("failed to connect"); 129 | 130 | let mut ln_client = client.lightning().clone(); 131 | let lnd_info: GetInfoResponse = ln_client 132 | .get_info(GetInfoRequest {}) 133 | .await 134 | .expect("Failed to get lnd info") 135 | .into_inner(); 136 | 137 | let lnd = client.lightning().clone(); 138 | 139 | info!("Connected to LND: {}", lnd_info.identity_pubkey); 140 | 141 | // initialize oracle, need to convert to old rust-bitcoin types 142 | let oracle = { 143 | let signing_key = 144 | kormir::bitcoin::secp256k1::SecretKey::from_slice(&keys.secret_key()?.secret_bytes())?; 145 | let pubkey = kormir::bitcoin::XOnlyPublicKey::from_slice(&keys.public_key().serialize())?; 146 | Oracle::from_signing_key(PostgresStorage::new(db_pool.clone(), pubkey)?, signing_key)? 147 | }; 148 | 149 | let http = reqwest::Client::new(); 150 | 151 | let invoice_lnd = lnd.clone(); 152 | let invoice_relays = config.relay.clone(); 153 | let invoice_keys = keys.clone(); 154 | let invoice_db_pool = db_pool.clone(); 155 | let invoice_http = http.clone(); 156 | let invoice_oracle = oracle.clone(); 157 | spawn(async move { 158 | loop { 159 | if let Err(e) = invoice_subscriber::start_invoice_subscription( 160 | invoice_lnd.clone(), 161 | invoice_relays.clone(), 162 | invoice_keys.clone(), 163 | invoice_http.clone(), 164 | invoice_db_pool.clone(), 165 | invoice_oracle.clone(), 166 | ) 167 | .await 168 | { 169 | error!("Error in invoice loop: {e}"); 170 | } 171 | } 172 | }); 173 | 174 | let bech32 = keys.public_key().to_bech32()?; 175 | let jobs_config = config.clone(); 176 | let jobs_keys = keys.clone(); 177 | let jobs_lnd = lnd.clone(); 178 | let jobs_db_pool = db_pool.clone(); 179 | let jobs_http = http.clone(); 180 | let jobs_oracle = oracle.clone(); 181 | spawn(async move { 182 | loop { 183 | info!("Starting listen with key: {bech32}"); 184 | if let Err(e) = listen_for_jobs( 185 | &jobs_config, 186 | jobs_keys.clone(), 187 | jobs_lnd.clone(), 188 | jobs_db_pool.clone(), 189 | jobs_http.clone(), 190 | jobs_oracle.clone(), 191 | ) 192 | .await 193 | { 194 | error!("Error listening for jobs: {e}"); 195 | } 196 | } 197 | }); 198 | 199 | // listen for scheduled jobs 200 | let schedule_db_pool = db_pool.clone(); 201 | let schedule_keys = keys.clone(); 202 | let relays = config.relay.clone(); 203 | spawn(async move { 204 | // check for scheduled jobs every 10 seconds 205 | let duration = std::time::Duration::from_secs(10); 206 | 207 | let client = Client::new(&schedule_keys); 208 | client 209 | .add_relays(relays) 210 | .await 211 | .unwrap_or_else(|_| panic!("Failed to add relays for scheduled jobs")); 212 | client.connect().await; 213 | 214 | // Create a lock on the active jobs we are running so we don't run the same job twice 215 | let active_jobs: Arc>> = Arc::new(Mutex::new(HashSet::new())); 216 | 217 | info!("Starting scheduled job loop"); 218 | loop { 219 | if let Err(e) = process_schedule_jobs_round( 220 | &client, 221 | schedule_keys.clone(), 222 | schedule_db_pool.clone(), 223 | http.clone(), 224 | oracle.clone(), 225 | active_jobs.clone(), 226 | ) 227 | .await 228 | { 229 | error!("Error processing scheduled jobs: {e}"); 230 | } 231 | sleep(duration).await 232 | } 233 | }); 234 | 235 | let state = State { 236 | db_pool, 237 | lnd, 238 | keys, 239 | relays: config.relay.clone(), 240 | domain: config.domain.clone(), 241 | }; 242 | 243 | let addr: std::net::SocketAddr = format!("{}:{}", config.bind, config.port) 244 | .parse() 245 | .expect("Failed to parse bind/port for webserver"); 246 | 247 | info!("Webserver running on http://{}", addr); 248 | 249 | let server_router = Router::new() 250 | .route("/get-invoice/:hash", get(get_invoice)) 251 | .route("/.well-known/lnurlp/:name", get(get_lnurl_pay)) 252 | .route("/.well-known/nostr.json", get(get_nip05)) 253 | .fallback(fallback) 254 | .layer(Extension(state)) 255 | .layer( 256 | CorsLayer::new() 257 | .allow_origin(Any) 258 | .allow_headers(vec![http::header::CONTENT_TYPE]) 259 | .allow_methods([Method::GET, Method::POST]), 260 | ); 261 | 262 | let server = axum::Server::bind(&addr).serve(server_router.into_make_service()); 263 | 264 | // Set up a oneshot channel to handle shutdown signal 265 | let (tx, rx) = oneshot::channel(); 266 | 267 | // Spawn a task to listen for shutdown signals 268 | spawn(async move { 269 | let mut term_signal = signal(SignalKind::terminate()) 270 | .map_err(|e| error!("failed to install TERM signal handler: {e}")) 271 | .unwrap(); 272 | let mut int_signal = signal(SignalKind::interrupt()) 273 | .map_err(|e| { 274 | error!("failed to install INT signal handler: {e}"); 275 | }) 276 | .unwrap(); 277 | 278 | tokio::select! { 279 | _ = term_signal.recv() => { 280 | info!("Received SIGTERM"); 281 | }, 282 | _ = int_signal.recv() => { 283 | info!("Received SIGINT"); 284 | }, 285 | } 286 | 287 | let _ = tx.send(()); 288 | }); 289 | 290 | let graceful = server.with_graceful_shutdown(async { 291 | let _ = rx.await; 292 | }); 293 | 294 | // Await the server to receive the shutdown signal 295 | if let Err(e) = graceful.await { 296 | error!("shutdown error: {}", e); 297 | } 298 | 299 | info!("Graceful shutdown complete"); 300 | 301 | Ok(()) 302 | } 303 | 304 | async fn fallback(uri: Uri) -> (StatusCode, String) { 305 | (StatusCode::NOT_FOUND, format!("No route for {}", uri)) 306 | } 307 | -------------------------------------------------------------------------------- /src/models/event.rs: -------------------------------------------------------------------------------- 1 | use diesel::prelude::*; 2 | use kormir::OracleEvent; 3 | use kormir::{Readable, Signature}; 4 | use nostr::EventId; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | use super::schema::events; 8 | 9 | #[derive( 10 | Queryable, 11 | Insertable, 12 | Identifiable, 13 | AsChangeset, 14 | Serialize, 15 | Deserialize, 16 | Debug, 17 | Clone, 18 | PartialEq, 19 | )] 20 | #[diesel(primary_key(id))] 21 | #[diesel(check_for_backend(diesel::pg::Pg))] 22 | pub struct Event { 23 | pub id: i32, 24 | announcement_signature: Vec, 25 | oracle_event: Vec, 26 | pub name: String, 27 | pub is_enum: bool, 28 | pub announcement_event_id: Option>, 29 | pub attestation_event_id: Option>, 30 | created_at: chrono::NaiveDateTime, 31 | updated_at: chrono::NaiveDateTime, 32 | } 33 | 34 | #[derive(Insertable, AsChangeset)] 35 | #[diesel(table_name = events)] 36 | pub struct NewEvent<'a> { 37 | pub announcement_signature: Vec, 38 | pub oracle_event: Vec, 39 | pub name: &'a str, 40 | pub is_enum: bool, 41 | } 42 | 43 | impl Event { 44 | pub fn announcement_signature(&self) -> Signature { 45 | Signature::from_slice(&self.announcement_signature).expect("invalid signature") 46 | } 47 | 48 | pub fn announcement_event_id(&self) -> Option { 49 | self.announcement_event_id 50 | .as_ref() 51 | .map(|id| EventId::from_slice(id).expect("invalid even tid")) 52 | } 53 | 54 | pub fn attestation_event_id(&self) -> Option { 55 | self.attestation_event_id 56 | .as_ref() 57 | .map(|id| EventId::from_slice(id).expect("invalid event id")) 58 | } 59 | 60 | pub fn oracle_event(&self) -> OracleEvent { 61 | let mut cursor = std::io::Cursor::new(&self.oracle_event); 62 | OracleEvent::read(&mut cursor).expect("invalid oracle event") 63 | } 64 | 65 | pub fn get_event_count(conn: &mut PgConnection) -> anyhow::Result { 66 | let count = events::table.count().get_result(conn)?; 67 | Ok(count) 68 | } 69 | 70 | pub fn get_by_id(conn: &mut PgConnection, id: i32) -> anyhow::Result> { 71 | Ok(events::table.find(id).first::(conn).optional()?) 72 | } 73 | 74 | pub fn get_by_name(conn: &mut PgConnection, name: &str) -> anyhow::Result> { 75 | Ok(events::table 76 | .filter(events::name.eq(name)) 77 | .first::(conn) 78 | .optional()?) 79 | } 80 | 81 | pub fn list(conn: &mut PgConnection) -> anyhow::Result> { 82 | Ok(events::table.load::(conn)?) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/models/event_job.rs: -------------------------------------------------------------------------------- 1 | use diesel::prelude::*; 2 | use serde::{Deserialize, Serialize}; 3 | 4 | use super::schema::event_jobs; 5 | 6 | #[derive( 7 | Queryable, 8 | Insertable, 9 | Identifiable, 10 | AsChangeset, 11 | Serialize, 12 | Deserialize, 13 | Debug, 14 | Clone, 15 | PartialEq, 16 | )] 17 | #[diesel(primary_key(job_id))] 18 | #[diesel(check_for_backend(diesel::pg::Pg))] 19 | pub struct EventJob { 20 | pub job_id: i32, 21 | pub event_id: i32, 22 | } 23 | 24 | impl EventJob { 25 | pub fn create(conn: &mut PgConnection, job_id: i32, event_id: i32) -> anyhow::Result { 26 | Ok(diesel::insert_into(event_jobs::table) 27 | .values(&EventJob { job_id, event_id }) 28 | .get_result(conn)?) 29 | } 30 | 31 | pub fn get_by_job_id(conn: &mut PgConnection, job_id: i32) -> anyhow::Result> { 32 | Ok(event_jobs::table 33 | .filter(event_jobs::job_id.eq(job_id)) 34 | .first::(conn) 35 | .optional()?) 36 | } 37 | 38 | pub fn get_by_event_id(conn: &mut PgConnection, event_id: i32) -> anyhow::Result> { 39 | Ok(event_jobs::table 40 | .filter(event_jobs::event_id.eq(event_id)) 41 | .first::(conn) 42 | .optional()?) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/models/event_nonce.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::key::XOnlyPublicKey; 2 | use diesel::prelude::*; 3 | use kormir::Signature; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | use super::schema::event_nonces; 7 | 8 | #[derive( 9 | Queryable, 10 | Insertable, 11 | Identifiable, 12 | AsChangeset, 13 | Serialize, 14 | Deserialize, 15 | Debug, 16 | Clone, 17 | PartialEq, 18 | )] 19 | #[diesel(primary_key(id))] 20 | #[diesel(check_for_backend(diesel::pg::Pg))] 21 | pub struct EventNonce { 22 | pub id: i32, 23 | pub event_id: i32, 24 | pub index: i32, 25 | nonce: Vec, 26 | pub signature: Option>, 27 | pub outcome: Option, 28 | created_at: chrono::NaiveDateTime, 29 | updated_at: chrono::NaiveDateTime, 30 | } 31 | 32 | #[derive(Insertable, AsChangeset)] 33 | #[diesel(table_name = event_nonces)] 34 | pub struct NewEventNonce { 35 | pub id: i32, 36 | pub event_id: i32, 37 | pub index: i32, 38 | pub nonce: Vec, 39 | } 40 | 41 | impl EventNonce { 42 | pub fn nonce(&self) -> XOnlyPublicKey { 43 | XOnlyPublicKey::from_slice(&self.nonce).expect("invalid nonce") 44 | } 45 | 46 | pub fn signature(&self) -> Option { 47 | self.signature 48 | .as_ref() 49 | .map(|sig| Signature::from_slice(sig).expect("invalid signature")) 50 | } 51 | 52 | pub fn outcome_and_sig(&self) -> Option<(String, Signature)> { 53 | match (self.outcome.clone(), self.signature()) { 54 | (Some(outcome), Some(sig)) => Some((outcome, sig)), 55 | _ => None, 56 | } 57 | } 58 | 59 | pub fn get_next_id(conn: &mut PgConnection) -> anyhow::Result { 60 | let num = event_nonces::table 61 | .select(diesel::dsl::max(event_nonces::id)) 62 | .first::>(conn)? 63 | .map(|id| id + 1) 64 | .unwrap_or(0); 65 | Ok(num) 66 | } 67 | 68 | pub fn get_by_id(conn: &mut PgConnection, id: i32) -> anyhow::Result> { 69 | Ok(event_nonces::table 70 | .find(id) 71 | .first::(conn) 72 | .optional()?) 73 | } 74 | 75 | pub fn get_by_event_id(conn: &mut PgConnection, event_id: i32) -> anyhow::Result> { 76 | Ok(event_nonces::table 77 | .filter(event_nonces::event_id.eq(event_id)) 78 | .order_by(event_nonces::index.asc()) 79 | .get_results(conn)?) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/models/job.rs: -------------------------------------------------------------------------------- 1 | use crate::models::schema::jobs; 2 | use diesel::{ 3 | AsChangeset, ExpressionMethods, Identifiable, Insertable, OptionalExtension, PgConnection, 4 | QueryDsl, Queryable, RunQueryDsl, 5 | }; 6 | use nostr::{Event, EventId}; 7 | use serde::{Deserialize, Serialize}; 8 | use serde_json::Value; 9 | 10 | #[derive( 11 | Queryable, 12 | Insertable, 13 | Identifiable, 14 | AsChangeset, 15 | Serialize, 16 | Deserialize, 17 | Debug, 18 | Clone, 19 | PartialEq, 20 | Eq, 21 | )] 22 | #[diesel(primary_key(id))] 23 | #[diesel(check_for_backend(diesel::pg::Pg))] 24 | pub struct Job { 25 | pub id: i32, 26 | payment_hash: Vec, 27 | request: Value, 28 | response_id: Option>, 29 | created_at: chrono::NaiveDateTime, 30 | updated_at: chrono::NaiveDateTime, 31 | scheduled_at: Option, 32 | } 33 | 34 | #[derive(Insertable, AsChangeset)] 35 | #[diesel(table_name = jobs)] 36 | struct NewJob { 37 | payment_hash: Vec, 38 | request: Value, 39 | scheduled_at: Option, 40 | } 41 | 42 | #[derive(Insertable, AsChangeset)] 43 | #[diesel(table_name = jobs)] 44 | struct CompletedJob { 45 | payment_hash: Vec, 46 | request: Value, 47 | response_id: Vec, 48 | } 49 | 50 | impl Job { 51 | pub fn request(&self) -> Event { 52 | serde_json::from_value(self.request.clone()).expect("invalid request") 53 | } 54 | 55 | pub fn response_id(&self) -> Option { 56 | self.response_id 57 | .as_ref() 58 | .map(|v| EventId::from_slice(v).expect("invalid response id")) 59 | } 60 | 61 | pub fn create( 62 | conn: &mut PgConnection, 63 | payment_hash: [u8; 32], 64 | request: &Event, 65 | scheduled_at: Option, 66 | ) -> anyhow::Result { 67 | let scheduled_at = scheduled_at 68 | .map(|t| { 69 | chrono::NaiveDateTime::from_timestamp_opt(t as i64, 0) 70 | .ok_or(anyhow::anyhow!("invalid timestamp")) 71 | }) 72 | .transpose()?; 73 | 74 | let new_job = NewJob { 75 | payment_hash: payment_hash.to_vec(), 76 | request: serde_json::to_value(request)?, 77 | scheduled_at, 78 | }; 79 | 80 | let res = diesel::insert_into(jobs::table) 81 | .values(new_job) 82 | .get_result::(conn)?; 83 | 84 | Ok(res) 85 | } 86 | 87 | pub fn create_scheduled( 88 | conn: &mut PgConnection, 89 | request: &Event, 90 | scheduled_at: u64, 91 | ) -> anyhow::Result { 92 | let scheduled_at = chrono::NaiveDateTime::from_timestamp_opt(scheduled_at as i64, 0) 93 | .ok_or(anyhow::anyhow!("invalid timestamp"))?; 94 | let new_job = NewJob { 95 | payment_hash: request.id.to_bytes().to_vec(), 96 | request: serde_json::to_value(request)?, 97 | scheduled_at: Some(scheduled_at), 98 | }; 99 | 100 | let res = diesel::insert_into(jobs::table) 101 | .values(new_job) 102 | .get_result::(conn)?; 103 | 104 | Ok(res) 105 | } 106 | 107 | pub fn create_completed( 108 | conn: &mut PgConnection, 109 | request: &Event, 110 | response_id: &EventId, 111 | ) -> anyhow::Result { 112 | let new_job = CompletedJob { 113 | payment_hash: request.id.to_bytes().to_vec(), 114 | request: serde_json::to_value(request)?, 115 | response_id: response_id.to_bytes().to_vec(), 116 | }; 117 | 118 | let res = diesel::insert_into(jobs::table) 119 | .values(new_job) 120 | .get_result::(conn)?; 121 | 122 | Ok(res) 123 | } 124 | 125 | pub fn get_by_payment_hash( 126 | conn: &mut PgConnection, 127 | payment_hash: &Vec, 128 | ) -> anyhow::Result> { 129 | let res = jobs::table 130 | .filter(jobs::payment_hash.eq(payment_hash)) 131 | .first::(conn) 132 | .optional()?; 133 | 134 | Ok(res) 135 | } 136 | 137 | pub fn set_response_id( 138 | conn: &mut PgConnection, 139 | id: i32, 140 | response_id: EventId, 141 | ) -> anyhow::Result { 142 | let job = diesel::update(jobs::table) 143 | .filter(jobs::id.eq(id)) 144 | .set(jobs::response_id.eq(response_id.as_bytes())) 145 | .get_result::(conn)?; 146 | 147 | Ok(job) 148 | } 149 | 150 | /// Get jobs that we haven't run and who's scheduled time is in the past 151 | pub fn get_ready_to_run_jobs(conn: &mut PgConnection) -> anyhow::Result> { 152 | let res = jobs::table 153 | .filter(jobs::response_id.is_null()) 154 | .filter(jobs::scheduled_at.lt(diesel::dsl::now)) 155 | .load::(conn)?; 156 | 157 | Ok(res) 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /src/models/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::models::event::NewEvent; 2 | use crate::models::event_nonce::{EventNonce, NewEventNonce}; 3 | use crate::models::zap::Zap; 4 | use crate::models::zap_balance::ZapBalance; 5 | use anyhow::anyhow; 6 | use diesel::r2d2::{ConnectionManager, Pool}; 7 | use diesel::{Connection, ExpressionMethods, PgConnection, RunQueryDsl}; 8 | use diesel_migrations::{embed_migrations, EmbeddedMigrations}; 9 | use kormir::bitcoin::XOnlyPublicKey; 10 | use kormir::error::Error; 11 | use kormir::storage::{OracleEventData, Storage}; 12 | use kormir::{EventDescriptor, OracleAnnouncement, Signature, Writeable}; 13 | use lightning_invoice::Bolt11Invoice; 14 | use log::info; 15 | use nostr::{Event, EventId, ToBech32}; 16 | use std::collections::HashMap; 17 | use std::sync::atomic::{AtomicU32, Ordering}; 18 | use std::sync::Arc; 19 | 20 | pub mod event; 21 | pub mod event_job; 22 | pub mod event_nonce; 23 | pub mod job; 24 | pub mod oracle_metadata; 25 | mod schema; 26 | pub mod zap; 27 | pub mod zap_balance; 28 | 29 | pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!(); 30 | 31 | pub fn create_zap( 32 | conn: &mut PgConnection, 33 | invoice: &Bolt11Invoice, 34 | request: &Event, 35 | for_npub: nostr::PublicKey, 36 | ) -> anyhow::Result { 37 | conn.transaction(|conn| { 38 | let bal = ZapBalance::get(conn, &for_npub)?; 39 | if bal.is_none() { 40 | ZapBalance::create(conn, for_npub)?; 41 | } 42 | 43 | Zap::create(conn, invoice, request, &for_npub) 44 | }) 45 | } 46 | 47 | pub fn mark_zap_paid( 48 | conn: &mut PgConnection, 49 | payment_hash: Vec, 50 | note_id: EventId, 51 | ) -> anyhow::Result<()> { 52 | conn.transaction(|conn| { 53 | let zap = Zap::update_note_id(conn, payment_hash, note_id)?; 54 | let npub = zap.npub(); 55 | let bal = ZapBalance::get(conn, &npub)?; 56 | if let Some(mut bal) = bal { 57 | bal.update_balance(conn, zap.amount_msats)?; 58 | info!( 59 | "Updated balance for {}: {}msats", 60 | npub.to_bech32()?, 61 | bal.balance_msats 62 | ); 63 | } 64 | 65 | Ok(()) 66 | }) 67 | } 68 | 69 | #[derive(Clone)] 70 | pub struct PostgresStorage { 71 | db_pool: Pool>, 72 | oracle_public_key: XOnlyPublicKey, 73 | current_index: Arc, 74 | } 75 | 76 | impl PostgresStorage { 77 | pub fn new( 78 | db_pool: Pool>, 79 | oracle_public_key: XOnlyPublicKey, 80 | ) -> anyhow::Result { 81 | let mut conn = db_pool.get()?; 82 | let current_index = EventNonce::get_next_id(&mut conn)?; 83 | 84 | Ok(Self { 85 | db_pool, 86 | oracle_public_key, 87 | current_index: Arc::new(AtomicU32::new(current_index as u32)), 88 | }) 89 | } 90 | 91 | #[allow(unused)] 92 | pub async fn list_events(&self) -> Result, Error> { 93 | let mut conn = self.db_pool.get().map_err(|_| Error::StorageFailure)?; 94 | 95 | conn.transaction::<_, anyhow::Error, _>(|conn| { 96 | let events = event::Event::list(conn)?; 97 | 98 | let mut oracle_events = Vec::with_capacity(events.len()); 99 | for event in events { 100 | let mut event_nonces = EventNonce::get_by_event_id(conn, event.id)?; 101 | event_nonces.sort_by_key(|nonce| nonce.index); 102 | 103 | let indexes = event_nonces 104 | .iter() 105 | .map(|nonce| nonce.index as u32) 106 | .collect::>(); 107 | 108 | let signatures = event_nonces 109 | .into_iter() 110 | .flat_map(|nonce| nonce.outcome_and_sig()) 111 | .collect(); 112 | 113 | let data = OracleEventData { 114 | announcement: OracleAnnouncement { 115 | announcement_signature: event.announcement_signature(), 116 | oracle_public_key: self.oracle_public_key, 117 | oracle_event: event.oracle_event(), 118 | }, 119 | indexes, 120 | signatures, 121 | announcement_event_id: None, 122 | attestation_event_id: None, 123 | }; 124 | oracle_events.push(data); 125 | } 126 | 127 | Ok(oracle_events) 128 | }) 129 | .map_err(|_| Error::StorageFailure) 130 | } 131 | 132 | pub async fn add_announcement_event_id(&self, id: u32, event_id: EventId) -> Result<(), Error> { 133 | let mut conn = self.db_pool.get().map_err(|_| Error::StorageFailure)?; 134 | let id = id as i32; 135 | 136 | diesel::update(schema::events::table) 137 | .filter(schema::events::id.eq(id)) 138 | .set(schema::events::announcement_event_id.eq(Some(event_id.as_bytes().to_vec()))) 139 | .execute(&mut conn) 140 | .map_err(|e| { 141 | log::error!("Failed to add announcement event id: {}", e); 142 | Error::StorageFailure 143 | })?; 144 | 145 | Ok(()) 146 | } 147 | 148 | pub async fn add_attestation_event_id(&self, id: u32, event_id: EventId) -> Result<(), Error> { 149 | let mut conn = self.db_pool.get().map_err(|_| Error::StorageFailure)?; 150 | let id = id as i32; 151 | 152 | diesel::update(schema::events::table) 153 | .filter(schema::events::id.eq(id)) 154 | .set(schema::events::attestation_event_id.eq(Some(event_id.as_bytes().to_vec()))) 155 | .execute(&mut conn) 156 | .map_err(|e| { 157 | log::error!("Failed to add announcement event id: {}", e); 158 | Error::StorageFailure 159 | })?; 160 | 161 | Ok(()) 162 | } 163 | } 164 | 165 | impl Storage for PostgresStorage { 166 | async fn get_next_nonce_indexes(&self, num: usize) -> Result, Error> { 167 | let mut current_index = self.current_index.fetch_add(num as u32, Ordering::SeqCst); 168 | let mut indexes = Vec::with_capacity(num); 169 | for _ in 0..num { 170 | indexes.push(current_index); 171 | current_index += 1; 172 | } 173 | Ok(indexes) 174 | } 175 | 176 | async fn save_announcement( 177 | &self, 178 | announcement: OracleAnnouncement, 179 | indexes: Vec, 180 | ) -> Result { 181 | let is_enum = match announcement.oracle_event.event_descriptor { 182 | EventDescriptor::EnumEvent(_) => true, 183 | EventDescriptor::DigitDecompositionEvent(_) => false, 184 | }; 185 | let new_event = NewEvent { 186 | announcement_signature: announcement.announcement_signature.encode(), 187 | oracle_event: announcement.oracle_event.encode(), 188 | name: &announcement.oracle_event.event_id, 189 | is_enum, 190 | }; 191 | 192 | let mut conn = self.db_pool.get().map_err(|_| Error::StorageFailure)?; 193 | conn.transaction::<_, anyhow::Error, _>(|conn| { 194 | let event_id = diesel::insert_into(schema::events::table) 195 | .values(&new_event) 196 | .returning(schema::events::id) 197 | .get_result(conn)?; 198 | 199 | let new_event_nonces = indexes 200 | .into_iter() 201 | .zip(announcement.oracle_event.oracle_nonces) 202 | .enumerate() 203 | .map(|(index, (id, nonce))| NewEventNonce { 204 | id: id as i32, 205 | event_id, 206 | index: index as i32, 207 | nonce: nonce.serialize().to_vec(), 208 | }) 209 | .collect::>(); 210 | 211 | diesel::insert_into(schema::event_nonces::table) 212 | .values(&new_event_nonces) 213 | .execute(conn)?; 214 | 215 | Ok(event_id as u32) 216 | }) 217 | .map_err(|_| Error::StorageFailure) 218 | } 219 | 220 | async fn save_signatures( 221 | &self, 222 | id: u32, 223 | signatures: HashMap, 224 | ) -> Result { 225 | let id = id as i32; 226 | let mut conn = self.db_pool.get().map_err(|_| Error::StorageFailure)?; 227 | 228 | conn.transaction(|conn| { 229 | let event = event::Event::get_by_id(conn, id)?.ok_or(anyhow!("Not Found"))?; 230 | 231 | let mut event_nonces = EventNonce::get_by_event_id(conn, id)?; 232 | if event_nonces.len() != signatures.len() { 233 | return Err(anyhow!("Invalid number of signatures")); 234 | } 235 | event_nonces.sort_by_key(|nonce| nonce.index); 236 | let indexes = event_nonces 237 | .into_iter() 238 | .zip(signatures.clone()) 239 | .map(|(mut nonce, (outcome, sig))| { 240 | nonce.outcome = Some(outcome); 241 | nonce.signature = Some(sig.encode()); 242 | 243 | // set in db 244 | diesel::update(&nonce).set(&nonce).execute(conn)?; 245 | 246 | Ok(nonce.id as u32) 247 | }) 248 | .collect::>>()?; 249 | 250 | Ok(OracleEventData { 251 | announcement: OracleAnnouncement { 252 | announcement_signature: event.announcement_signature(), 253 | oracle_public_key: self.oracle_public_key, 254 | oracle_event: event.oracle_event(), 255 | }, 256 | indexes, 257 | signatures, 258 | announcement_event_id: event.announcement_event_id().map(|id| id.to_hex()), 259 | attestation_event_id: event.attestation_event_id().map(|id| id.to_hex()), 260 | }) 261 | }) 262 | .map_err(|_| Error::StorageFailure) 263 | } 264 | 265 | async fn get_event(&self, id: u32) -> Result, Error> { 266 | let id = id as i32; 267 | let mut conn = self.db_pool.get().map_err(|_| Error::StorageFailure)?; 268 | 269 | conn.transaction::<_, anyhow::Error, _>(|conn| { 270 | let Some(event) = event::Event::get_by_id(conn, id)? else { 271 | return Ok(None); 272 | }; 273 | 274 | let mut event_nonces = EventNonce::get_by_event_id(conn, id)?; 275 | event_nonces.sort_by_key(|nonce| nonce.index); 276 | 277 | let indexes = event_nonces 278 | .iter() 279 | .map(|nonce| nonce.index as u32) 280 | .collect::>(); 281 | 282 | let signatures = event_nonces 283 | .into_iter() 284 | .flat_map(|nonce| nonce.outcome_and_sig()) 285 | .collect(); 286 | 287 | Ok(Some(OracleEventData { 288 | announcement: OracleAnnouncement { 289 | announcement_signature: event.announcement_signature(), 290 | oracle_public_key: self.oracle_public_key, 291 | oracle_event: event.oracle_event(), 292 | }, 293 | indexes, 294 | signatures, 295 | announcement_event_id: event.announcement_event_id().map(|id| id.to_hex()), 296 | attestation_event_id: event.attestation_event_id().map(|id| id.to_hex()), 297 | })) 298 | }) 299 | .map_err(|_| Error::StorageFailure) 300 | } 301 | } 302 | -------------------------------------------------------------------------------- /src/models/oracle_metadata.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::key::XOnlyPublicKey; 2 | use diesel::prelude::*; 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use super::schema::oracle_metadata; 6 | 7 | #[derive( 8 | Queryable, 9 | Insertable, 10 | Identifiable, 11 | AsChangeset, 12 | Serialize, 13 | Deserialize, 14 | Debug, 15 | Clone, 16 | PartialEq, 17 | )] 18 | #[diesel(primary_key(pubkey))] 19 | #[diesel(check_for_backend(diesel::pg::Pg))] 20 | #[diesel(table_name = oracle_metadata)] 21 | pub struct OracleMetadata { 22 | pubkey: Vec, 23 | pub name: String, 24 | created_at: chrono::NaiveDateTime, 25 | updated_at: chrono::NaiveDateTime, 26 | singleton_constant: bool, 27 | } 28 | 29 | #[derive(Insertable, AsChangeset)] 30 | #[diesel(table_name = oracle_metadata)] 31 | struct NewOracleMetadata<'a> { 32 | pubkey: Vec, 33 | name: &'a str, 34 | } 35 | 36 | impl OracleMetadata { 37 | pub fn pubkey(&self) -> XOnlyPublicKey { 38 | XOnlyPublicKey::from_slice(&self.pubkey).expect("invalid pubkey") 39 | } 40 | 41 | pub fn get(conn: &mut PgConnection) -> anyhow::Result> { 42 | Ok(oracle_metadata::table 43 | .filter(oracle_metadata::singleton_constant.eq(true)) 44 | .first::(conn) 45 | .optional()?) 46 | } 47 | 48 | pub fn upsert(conn: &mut PgConnection, pubkey: XOnlyPublicKey) -> anyhow::Result<()> { 49 | let pubkey = pubkey.serialize().to_vec(); 50 | let name = "Kormir"; 51 | let new = NewOracleMetadata { pubkey, name }; 52 | diesel::insert_into(oracle_metadata::table) 53 | .values(&new) 54 | .on_conflict(oracle_metadata::pubkey) 55 | .do_update() 56 | .set(&new) 57 | .execute(conn)?; 58 | Ok(()) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/models/schema.rs: -------------------------------------------------------------------------------- 1 | // @generated automatically by Diesel CLI. 2 | 3 | diesel::table! { 4 | event_jobs (job_id) { 5 | job_id -> Int4, 6 | event_id -> Int4, 7 | } 8 | } 9 | 10 | diesel::table! { 11 | event_nonces (id) { 12 | id -> Int4, 13 | event_id -> Int4, 14 | index -> Int4, 15 | nonce -> Bytea, 16 | signature -> Nullable, 17 | outcome -> Nullable, 18 | created_at -> Timestamp, 19 | updated_at -> Timestamp, 20 | } 21 | } 22 | 23 | diesel::table! { 24 | events (id) { 25 | id -> Int4, 26 | announcement_signature -> Bytea, 27 | oracle_event -> Bytea, 28 | name -> Text, 29 | is_enum -> Bool, 30 | announcement_event_id -> Nullable, 31 | attestation_event_id -> Nullable, 32 | created_at -> Timestamp, 33 | updated_at -> Timestamp, 34 | } 35 | } 36 | 37 | diesel::table! { 38 | jobs (id) { 39 | id -> Int4, 40 | payment_hash -> Bytea, 41 | request -> Jsonb, 42 | response_id -> Nullable, 43 | created_at -> Timestamp, 44 | updated_at -> Timestamp, 45 | scheduled_at -> Nullable, 46 | } 47 | } 48 | 49 | diesel::table! { 50 | oracle_metadata (pubkey) { 51 | pubkey -> Bytea, 52 | name -> Text, 53 | created_at -> Timestamp, 54 | updated_at -> Timestamp, 55 | singleton_constant -> Bool, 56 | } 57 | } 58 | 59 | diesel::table! { 60 | zap_balances (npub) { 61 | npub -> Bytea, 62 | balance_msats -> Int4, 63 | created_at -> Timestamp, 64 | } 65 | } 66 | 67 | diesel::table! { 68 | zaps (payment_hash) { 69 | payment_hash -> Bytea, 70 | invoice -> Text, 71 | amount_msats -> Int4, 72 | request -> Jsonb, 73 | npub -> Bytea, 74 | note_id -> Nullable, 75 | created_at -> Timestamp, 76 | } 77 | } 78 | 79 | diesel::joinable!(event_jobs -> events (event_id)); 80 | diesel::joinable!(event_jobs -> jobs (job_id)); 81 | diesel::joinable!(event_nonces -> events (event_id)); 82 | diesel::joinable!(zaps -> zap_balances (npub)); 83 | 84 | diesel::allow_tables_to_appear_in_same_query!( 85 | event_jobs, 86 | event_nonces, 87 | events, 88 | jobs, 89 | oracle_metadata, 90 | zap_balances, 91 | zaps, 92 | ); 93 | -------------------------------------------------------------------------------- /src/models/zap.rs: -------------------------------------------------------------------------------- 1 | use crate::models::schema::zaps; 2 | use bitcoin::secp256k1::ThirtyTwoByteHash; 3 | use diesel::{ 4 | AsChangeset, ExpressionMethods, Identifiable, Insertable, OptionalExtension, PgConnection, 5 | QueryDsl, Queryable, RunQueryDsl, 6 | }; 7 | use lightning_invoice::Bolt11Invoice; 8 | use nostr::{Event, EventId}; 9 | use serde::{Deserialize, Serialize}; 10 | use serde_json::Value; 11 | use std::str::FromStr; 12 | 13 | #[derive( 14 | Queryable, 15 | Insertable, 16 | Identifiable, 17 | AsChangeset, 18 | Serialize, 19 | Deserialize, 20 | Debug, 21 | Clone, 22 | PartialEq, 23 | Eq, 24 | )] 25 | #[diesel(primary_key(payment_hash))] 26 | #[diesel(check_for_backend(diesel::pg::Pg))] 27 | pub struct Zap { 28 | payment_hash: Vec, 29 | invoice: String, 30 | pub amount_msats: i32, 31 | request: Value, 32 | npub: Vec, 33 | pub note_id: Option>, 34 | created_at: chrono::NaiveDateTime, 35 | } 36 | 37 | #[derive(Insertable, AsChangeset)] 38 | #[diesel(table_name = zaps)] 39 | struct NewZap { 40 | payment_hash: Vec, 41 | invoice: String, 42 | amount_msats: i32, 43 | request: Value, 44 | npub: Vec, 45 | } 46 | 47 | impl Zap { 48 | pub fn payment_hash(&self) -> [u8; 32] { 49 | self.payment_hash 50 | .clone() 51 | .try_into() 52 | .expect("Invalid length") 53 | } 54 | 55 | pub fn invoice(&self) -> Bolt11Invoice { 56 | Bolt11Invoice::from_str(&self.invoice).expect("Invalid invoice") 57 | } 58 | 59 | pub fn request(&self) -> Event { 60 | serde_json::from_value(self.request.clone()).expect("Invalid event") 61 | } 62 | 63 | pub fn npub(&self) -> nostr::PublicKey { 64 | nostr::PublicKey::from_slice(&self.npub).expect("Invalid key") 65 | } 66 | 67 | pub fn note_id(&self) -> Option { 68 | self.note_id 69 | .as_ref() 70 | .map(|id| EventId::from_slice(id).expect("Invalid id")) 71 | } 72 | 73 | pub fn create( 74 | conn: &mut PgConnection, 75 | invoice: &Bolt11Invoice, 76 | request: &Event, 77 | for_npub: &nostr::PublicKey, 78 | ) -> anyhow::Result { 79 | let new = NewZap { 80 | payment_hash: invoice.payment_hash().into_32().to_vec(), 81 | invoice: invoice.to_string(), 82 | amount_msats: invoice.amount_milli_satoshis().expect("Invalid amount") as i32, 83 | request: serde_json::to_value(request)?, 84 | npub: for_npub.to_bytes().to_vec(), 85 | }; 86 | 87 | let res = diesel::insert_into(zaps::table) 88 | .values(new) 89 | .get_result::(conn)?; 90 | 91 | Ok(res) 92 | } 93 | 94 | pub fn find_by_payment_hash( 95 | conn: &mut PgConnection, 96 | payment_hash: &Vec, 97 | ) -> anyhow::Result> { 98 | let res = zaps::table 99 | .filter(zaps::payment_hash.eq(payment_hash)) 100 | .first::(conn) 101 | .optional()?; 102 | 103 | Ok(res) 104 | } 105 | 106 | pub fn update_note_id( 107 | conn: &mut PgConnection, 108 | payment_hash: Vec, 109 | note_id: EventId, 110 | ) -> anyhow::Result { 111 | let res = diesel::update(zaps::table) 112 | .filter(zaps::payment_hash.eq(payment_hash)) 113 | .set(zaps::note_id.eq(note_id.as_bytes())) 114 | .get_result::(conn)?; 115 | 116 | Ok(res) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/models/zap_balance.rs: -------------------------------------------------------------------------------- 1 | use crate::models::schema::zap_balances; 2 | use diesel::{ 3 | AsChangeset, ExpressionMethods, Identifiable, Insertable, OptionalExtension, PgConnection, 4 | QueryDsl, Queryable, RunQueryDsl, 5 | }; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | #[derive( 9 | Queryable, 10 | Insertable, 11 | Identifiable, 12 | AsChangeset, 13 | Serialize, 14 | Deserialize, 15 | Debug, 16 | Clone, 17 | PartialEq, 18 | Eq, 19 | )] 20 | #[diesel(primary_key(npub))] 21 | #[diesel(check_for_backend(diesel::pg::Pg))] 22 | pub struct ZapBalance { 23 | npub: Vec, 24 | pub balance_msats: i32, 25 | created_at: chrono::NaiveDateTime, 26 | } 27 | 28 | #[derive(Insertable, AsChangeset)] 29 | #[diesel(table_name = zap_balances)] 30 | struct NewZapBalance { 31 | npub: Vec, 32 | } 33 | 34 | impl ZapBalance { 35 | pub fn npub(&self) -> nostr::PublicKey { 36 | nostr::PublicKey::from_slice(&self.npub).expect("Invalid key") 37 | } 38 | 39 | pub fn create(conn: &mut PgConnection, npub: nostr::PublicKey) -> anyhow::Result { 40 | let new = NewZapBalance { 41 | npub: npub.to_bytes().to_vec(), 42 | }; 43 | 44 | let res = diesel::insert_into(zap_balances::table) 45 | .values(new) 46 | .get_result::(conn)?; 47 | 48 | Ok(res) 49 | } 50 | 51 | pub fn get(conn: &mut PgConnection, npub: &nostr::PublicKey) -> anyhow::Result> { 52 | let res = zap_balances::table 53 | .filter(zap_balances::npub.eq(npub.to_bytes().to_vec())) 54 | .first::(conn) 55 | .optional()?; 56 | 57 | Ok(res) 58 | } 59 | 60 | pub fn update_balance( 61 | &mut self, 62 | conn: &mut PgConnection, 63 | amount_msats: i32, 64 | ) -> anyhow::Result { 65 | self.balance_msats = self.balance_msats.saturating_add(amount_msats); 66 | 67 | if self.balance_msats < 0 { 68 | anyhow::bail!("Insufficient balance"); 69 | } 70 | 71 | let res = diesel::update(zap_balances::table) 72 | .filter(zap_balances::npub.eq(&self.npub)) 73 | .set(zap_balances::balance_msats.eq(self.balance_msats)) 74 | .get_result::(conn)?; 75 | 76 | Ok(res) 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /src/routes.rs: -------------------------------------------------------------------------------- 1 | use crate::models::create_zap; 2 | use crate::State; 3 | use anyhow::anyhow; 4 | use axum::extract::{Path, Query}; 5 | use axum::http::StatusCode; 6 | use axum::{Extension, Json}; 7 | use bitcoin::hashes::{sha256, Hash}; 8 | use bitcoin::secp256k1::ThirtyTwoByteHash; 9 | use lightning_invoice::Bolt11Invoice; 10 | use lnurl::pay::PayResponse; 11 | use lnurl::Tag; 12 | use nostr::nips::nip57; 13 | use nostr::{Event, JsonUtil}; 14 | use serde_json::{json, Value}; 15 | use std::collections::HashMap; 16 | use std::str::FromStr; 17 | use tonic_openssl_lnd::lnrpc; 18 | 19 | pub(crate) async fn get_invoice_impl( 20 | state: State, 21 | hash: String, 22 | amount_msats: u64, 23 | zap_request: Option, 24 | ) -> anyhow::Result { 25 | let mut lnd = state.lnd.clone(); 26 | let desc_hash = match zap_request.as_ref() { 27 | None => sha256::Hash::from_str(&hash)?, 28 | Some(event) => { 29 | if event.kind != nostr::Kind::ZapRequest { 30 | return Err(anyhow!("Invalid zap request")); 31 | } 32 | sha256::Hash::hash(event.as_json().as_bytes()) 33 | } 34 | }; 35 | 36 | let request = lnrpc::Invoice { 37 | value_msat: amount_msats as i64, 38 | description_hash: desc_hash.into_32().to_vec(), 39 | expiry: 86_400, 40 | ..Default::default() 41 | }; 42 | 43 | let resp = lnd.add_invoice(request).await?.into_inner(); 44 | 45 | if let Some(zap_request) = zap_request { 46 | let invoice = Bolt11Invoice::from_str(&resp.payment_request)?; 47 | 48 | // handle private zaps 49 | let private_zap = nip57::decrypt_received_private_zap_message( 50 | state.keys.secret_key().unwrap(), 51 | &zap_request, 52 | ) 53 | .ok() 54 | .map(|e| e.pubkey); 55 | 56 | // if it is a private zap, use that npub, otherwise use the pubkey from the zap request 57 | let for_npub = private_zap.unwrap_or(zap_request.pubkey); 58 | 59 | let mut conn = state.db_pool.get()?; 60 | create_zap(&mut conn, &invoice, &zap_request, for_npub)?; 61 | } 62 | 63 | Ok(resp.payment_request) 64 | } 65 | 66 | pub async fn get_invoice( 67 | Path(hash): Path, 68 | Query(params): Query>, 69 | Extension(state): Extension, 70 | ) -> Result, (StatusCode, Json)> { 71 | let (amount_msats, zap_request) = match params.get("amount").and_then(|a| a.parse::().ok()) 72 | { 73 | None => Err(( 74 | StatusCode::BAD_REQUEST, 75 | Json(json!({ 76 | "status": "ERROR", 77 | "reason": "Missing amount parameter", 78 | })), 79 | )), 80 | Some(amount_msats) => { 81 | let zap_request = params.get("nostr").map_or_else( 82 | || Ok(None), 83 | |event_str| { 84 | Event::from_json(event_str) 85 | .map_err(|_| { 86 | ( 87 | StatusCode::BAD_REQUEST, 88 | Json(json!({ 89 | "status": "ERROR", 90 | "reason": "Invalid zap request", 91 | })), 92 | ) 93 | }) 94 | .map(Some) 95 | }, 96 | )?; 97 | 98 | Ok((amount_msats, zap_request)) 99 | } 100 | }?; 101 | 102 | match get_invoice_impl(state, hash, amount_msats, zap_request).await { 103 | Ok(invoice) => Ok(Json(json!({ 104 | "pr": invoice, 105 | "routers": [] 106 | }))), 107 | Err(e) => Err(handle_anyhow_error(e)), 108 | } 109 | } 110 | 111 | pub async fn get_lnurl_pay( 112 | Path(name): Path, 113 | Extension(state): Extension, 114 | ) -> Result, (StatusCode, Json)> { 115 | let metadata = format!( 116 | "[[\"text/identifier\",\"{name}@{}\"],[\"text/plain\",\"Sats for {name}\"]]", 117 | state.domain, 118 | ); 119 | 120 | let hash = sha256::Hash::hash(metadata.as_bytes()); 121 | let callback = format!("https://{}/get-invoice/{hash}", state.domain); 122 | 123 | let resp = PayResponse { 124 | callback, 125 | min_sendable: 1_000, 126 | max_sendable: 11_000_000_000, 127 | tag: Tag::PayRequest, 128 | metadata, 129 | comment_allowed: None, 130 | allows_nostr: Some(true), 131 | nostr_pubkey: Some(*state.keys.public_key()), 132 | }; 133 | 134 | Ok(Json(resp)) 135 | } 136 | 137 | pub async fn get_nip05( 138 | Extension(state): Extension, 139 | ) -> Result, (StatusCode, Json)> { 140 | let npub = state.keys.public_key().to_hex(); 141 | let json = json!({ 142 | "names": { 143 | "_": npub, 144 | }, 145 | "relays": { 146 | npub: state.relays, 147 | }, 148 | }); 149 | 150 | Ok(Json(json)) 151 | } 152 | 153 | pub(crate) fn handle_anyhow_error(err: anyhow::Error) -> (StatusCode, Json) { 154 | let err = json!({ 155 | "status": "ERROR", 156 | "reason": format!("{err}"), 157 | }); 158 | (StatusCode::BAD_REQUEST, Json(err)) 159 | } 160 | -------------------------------------------------------------------------------- /src/wasm_handler.rs: -------------------------------------------------------------------------------- 1 | use anyhow::anyhow; 2 | use extism::{Manifest, Plugin, Wasm}; 3 | use log::{debug, info}; 4 | use nostr::EventId; 5 | use reqwest::Url; 6 | use serde::{Deserialize, Serialize}; 7 | use sha2::{Digest, Sha256}; 8 | use std::fs::File; 9 | use std::io; 10 | use std::io::{Cursor, Seek}; 11 | use std::path::PathBuf; 12 | use std::time::Duration; 13 | use tokio::select; 14 | use tokio::time::Instant; 15 | 16 | const MAX_WASM_FILE_SIZE: u64 = 25_000_000; // 25mb 17 | 18 | #[derive(Debug, Clone, Serialize, Deserialize)] 19 | pub struct ScheduledParams { 20 | /// Expected outputs, if provided, a DLC announcement will be made if the output matches 21 | pub expected_outputs: Option>, 22 | /// The time to run the wasm in seconds from epoch 23 | pub run_date: u64, 24 | /// What to name the announcement 25 | pub name: Option, 26 | } 27 | 28 | #[derive(Debug, Clone, Serialize, Deserialize)] 29 | pub struct JobParams { 30 | pub url: String, 31 | pub function: String, 32 | pub input: String, 33 | pub time: u64, 34 | pub checksum: String, 35 | pub schedule: Option, 36 | } 37 | 38 | pub async fn download_and_run_wasm( 39 | job_params: JobParams, 40 | event_id: EventId, 41 | http: &reqwest::Client, 42 | ) -> anyhow::Result { 43 | let url = Url::parse(&job_params.url)?; 44 | let temp_dir = tempfile::tempdir()?; 45 | let file_path = temp_dir.path().join(format!("{event_id}.wasm")); 46 | 47 | let response = http.get(url).send().await?; 48 | 49 | if response.status().is_success() { 50 | // if length larger than 25mb, error 51 | if response.content_length().unwrap_or(0) > MAX_WASM_FILE_SIZE { 52 | anyhow::bail!("File too large"); 53 | } 54 | 55 | let mut dest = File::create(&file_path)?; 56 | 57 | let bytes = response.bytes().await?; 58 | if bytes.len() as u64 > MAX_WASM_FILE_SIZE { 59 | anyhow::bail!("File too large"); 60 | } 61 | let mut content = Cursor::new(bytes); 62 | 63 | let mut hasher = Sha256::new(); 64 | io::copy(&mut content, &mut hasher)?; 65 | let result = hasher.finalize(); 66 | let hex_result = format!("{result:x}"); 67 | if job_params.checksum.to_lowercase() != hex_result { 68 | std::fs::remove_file(&file_path)?; 69 | anyhow::bail!( 70 | "Checksum mismatch expected: {} got: {hex_result}", 71 | job_params.checksum 72 | ); 73 | } 74 | 75 | // write the file to disk 76 | content.rewind()?; 77 | io::copy(&mut content, &mut dest)?; 78 | } else { 79 | anyhow::bail!("Failed to download file: HTTP {}", response.status()) 80 | }; 81 | 82 | info!("Running wasm for event: {event_id}"); 83 | run_wasm(file_path, job_params).await 84 | } 85 | 86 | pub async fn run_wasm(file_path: PathBuf, job_params: JobParams) -> anyhow::Result { 87 | let wasm = Wasm::file(file_path); 88 | let mut manifest = Manifest::new([wasm]); 89 | manifest.allowed_hosts = Some(vec!["*".to_string()]); 90 | let mut plugin = Plugin::new(manifest, [], true)?; 91 | let cancel_handle = plugin.cancel_handle(); 92 | let start = Instant::now(); 93 | let fut = tokio::task::spawn_blocking(move || { 94 | plugin 95 | .call::<&str, &str>(&job_params.function, &job_params.input) 96 | .map(|x| x.to_string()) 97 | }); 98 | 99 | // add a free second for initial start up stuff 100 | let sleep = tokio::time::sleep(Duration::from_millis(job_params.time + 1_000)); 101 | 102 | select! { 103 | result = fut => { 104 | let result = result?; 105 | debug!("Complete, time elapsed: {}ms", start.elapsed().as_millis()); 106 | result 107 | } 108 | _ = sleep => { 109 | cancel_handle.cancel()?; 110 | Err(anyhow!("Timeout")) 111 | } 112 | } 113 | } 114 | 115 | #[cfg(test)] 116 | mod test { 117 | use super::{download_and_run_wasm, JobParams}; 118 | use nostr::EventId; 119 | use serde_json::Value; 120 | 121 | #[tokio::test] 122 | async fn test_wasm_runner() { 123 | let params = JobParams { 124 | url: "https://github.com/extism/plugins/releases/download/v0.5.0/count_vowels.wasm" 125 | .to_string(), 126 | function: "count_vowels".to_string(), 127 | input: "Hello World".to_string(), 128 | time: 500, 129 | checksum: "93898457953d30d016f712ccf4336ce7e9971db5f7f3aff1edd252764f75d5d7" 130 | .to_string(), 131 | schedule: None, 132 | }; 133 | let result = download_and_run_wasm(params, EventId::all_zeros(), &reqwest::Client::new()) 134 | .await 135 | .unwrap(); 136 | 137 | assert_eq!( 138 | result, 139 | "{\"count\":3,\"total\":3,\"vowels\":\"aeiouAEIOU\"}" 140 | ); 141 | } 142 | 143 | #[tokio::test] 144 | async fn test_http_wasm() { 145 | let params = JobParams { 146 | url: "https://github.com/extism/plugins/releases/download/v0.5.0/http.wasm".to_string(), 147 | function: "http_get".to_string(), 148 | input: "{\"url\":\"https://benthecarman.com/.well-known/nostr.json\"}".to_string(), // get my nip05 149 | time: 5_000, 150 | checksum: "fe7ff8aaf45d67dd0d6b9fdfe3aa871e658a83adcf19c8f016013c29e8857f03" 151 | .to_string(), 152 | schedule: None, 153 | }; 154 | let result = download_and_run_wasm(params, EventId::all_zeros(), &reqwest::Client::new()) 155 | .await 156 | .unwrap(); 157 | 158 | let json = serde_json::from_str::(&result); 159 | 160 | assert!(json.is_ok()); 161 | } 162 | 163 | #[tokio::test] 164 | async fn test_timeout_infinite_loop() { 165 | let params = JobParams { 166 | url: "https://github.com/extism/plugins/releases/download/v0.5.0/loop_forever.wasm" 167 | .to_string(), 168 | function: "loop_forever".to_string(), 169 | input: "".to_string(), 170 | time: 1_000, 171 | checksum: "6e6386b9194f2298b5e55e88c25fe66dda454f0e2604da6964735ab1c554b513" 172 | .to_string(), 173 | schedule: None, 174 | }; 175 | let err = 176 | download_and_run_wasm(params, EventId::all_zeros(), &reqwest::Client::new()).await; 177 | 178 | assert!(err.is_err()); 179 | assert_eq!(err.unwrap_err().to_string(), "Timeout"); 180 | } 181 | } 182 | --------------------------------------------------------------------------------