├── rust-toolchain.toml ├── docs ├── SPEC.md ├── development.nomen.toml ├── release_notes │ └── v0.3.0.md ├── RELEASE.md ├── DEVELOPMENT.md ├── HOWTO.md ├── changelogs │ ├── v0.2.0.md │ └── v0.3.0.md ├── FAQ.md └── API.md ├── nomen ├── src │ ├── config │ │ ├── mod.rs │ │ ├── cli.rs │ │ ├── config_file.rs │ │ └── cfg.rs │ ├── subcommands │ │ ├── index │ │ │ ├── events │ │ │ │ ├── mod.rs │ │ │ │ ├── records.rs │ │ │ │ ├── relay_index.rs │ │ │ │ └── event_data.rs │ │ │ ├── mod.rs │ │ │ └── blockchain.rs │ │ ├── util.rs │ │ ├── mod.rs │ │ └── server │ │ │ ├── mod.rs │ │ │ ├── api.rs │ │ │ └── explorer.rs │ ├── db │ │ ├── queries │ │ │ ├── insert_raw_blockchain.sql │ │ │ ├── insert_transfer_cache.sql │ │ │ ├── insert_blockchain_index.sql │ │ │ └── insert_name_event.sql │ │ ├── event_log.rs │ │ ├── stats.rs │ │ ├── relay_index.rs │ │ ├── raw.rs │ │ ├── mod.rs │ │ ├── name.rs │ │ └── index.rs │ ├── util │ │ ├── mod.rs │ │ ├── keyval.rs │ │ ├── npub.rs │ │ └── nsec.rs │ └── main.rs ├── templates │ ├── error.html │ ├── pubkey.html │ ├── explorer.html │ ├── base.html │ ├── stats.html │ ├── transfer │ │ ├── initiate.html │ │ ├── sign.html │ │ └── complete.html │ ├── index.html │ ├── updaterecords.html │ ├── name.html │ ├── newname.html │ └── faqs.html ├── build.rs └── Cargo.toml ├── .gitignore ├── nomen-cli ├── Cargo.toml └── src │ ├── nostr.rs │ └── main.rs ├── example.nomen.toml ├── Cargo.toml ├── nomen_core ├── Cargo.toml └── src │ ├── nsid_builder.rs │ ├── name.rs │ ├── create.rs │ ├── nsid.rs │ ├── lib.rs │ ├── hash160.rs │ ├── transfer.rs │ ├── extractor.rs │ └── kind.rs ├── RELEASE_NOTES.md ├── LICENSE ├── Makefile ├── CHANGELOG.md ├── README.md ├── cliff.toml └── .github └── workflows └── release.yml /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.71" -------------------------------------------------------------------------------- /docs/SPEC.md: -------------------------------------------------------------------------------- 1 | This specification has been refactored into a different [repo](https://github.com/ursuscamp/noms). 2 | -------------------------------------------------------------------------------- /nomen/src/config/mod.rs: -------------------------------------------------------------------------------- 1 | mod cfg; 2 | mod cli; 3 | mod config_file; 4 | 5 | pub use cfg::*; 6 | pub use cli::*; 7 | pub use config_file::*; 8 | -------------------------------------------------------------------------------- /nomen/src/subcommands/index/events/mod.rs: -------------------------------------------------------------------------------- 1 | mod event_data; 2 | mod records; 3 | pub mod relay_index; 4 | 5 | pub use event_data::*; 6 | pub use records::*; 7 | -------------------------------------------------------------------------------- /nomen/templates/error.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 |

Error

6 | 7 |

8 | Error: {{ message }} 9 |

10 |
11 | {% endblock %} -------------------------------------------------------------------------------- /nomen/build.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use vergen::EmitBuilder; 3 | 4 | fn main() -> Result<(), Box> { 5 | // Emit the instructions 6 | EmitBuilder::builder().all_build().all_git().emit()?; 7 | Ok(()) 8 | } 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode/ 2 | scripts 3 | /target 4 | cookie 5 | .auth* 6 | .bitcoin 7 | nomen.toml 8 | create.json 9 | __pycache__ 10 | records.json 11 | nomen.db 12 | .DS_Store 13 | records1.json 14 | *.csv 15 | *.psbt 16 | release/ 17 | *.bak 18 | contrib/* 19 | backup/ 20 | -------------------------------------------------------------------------------- /docs/development.nomen.toml: -------------------------------------------------------------------------------- 1 | [rpc] 2 | 3 | user="regtest" 4 | password="regtest" 5 | host="localhost" 6 | port=18443 7 | network="regtest" 8 | 9 | [nostr] 10 | 11 | relays=["ws://127.0.0.1:8080"] 12 | 13 | [server] 14 | 15 | bind = "127.0.0.1:8888" 16 | confirmations = 3 -------------------------------------------------------------------------------- /nomen/src/db/queries/insert_raw_blockchain.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO 2 | raw_blockchain ( 3 | blockhash, 4 | txid, 5 | blocktime, 6 | blockheight, 7 | txheight, 8 | vout, 9 | data, 10 | indexed_at 11 | ) 12 | VALUES 13 | (?, ?, ?, ?, ?, ?, ?, unixepoch()); -------------------------------------------------------------------------------- /nomen/src/db/queries/insert_transfer_cache.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO 2 | transfer_cache ( 3 | protocol, 4 | fingerprint, 5 | nsid, 6 | name, 7 | pubkey, 8 | blockhash, 9 | txid, 10 | blocktime, 11 | blockheight, 12 | txheight, 13 | vout, 14 | indexed_at 15 | ) 16 | VALUES 17 | (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, unixepoch()); -------------------------------------------------------------------------------- /nomen/src/db/queries/insert_blockchain_index.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO 2 | blockchain_index ( 3 | protocol, 4 | fingerprint, 5 | nsid, 6 | name, 7 | pubkey, 8 | blockhash, 9 | txid, 10 | blocktime, 11 | blockheight, 12 | txheight, 13 | vout, 14 | indexed_at 15 | ) 16 | VALUES 17 | (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, unixepoch()); -------------------------------------------------------------------------------- /nomen/src/db/queries/insert_name_event.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO name_events (name, fingerprint, nsid, pubkey, created_at, event_id, records, indexed_at, raw_event) 2 | VALUES (?, ?, ?, ?, ?, ?, ?, unixepoch(), ?) 3 | ON CONFLICT (name, pubkey) DO UPDATE SET 4 | created_at = excluded.created_at, 5 | event_id = excluded.event_id, 6 | records = excluded.records, 7 | raw_event = excluded.raw_event 8 | where excluded.created_at > created_at; -------------------------------------------------------------------------------- /nomen/src/util/mod.rs: -------------------------------------------------------------------------------- 1 | mod keyval; 2 | mod npub; 3 | mod nsec; 4 | 5 | pub use keyval::*; 6 | pub use npub::*; 7 | pub use nsec::*; 8 | 9 | use time::{macros::format_description, OffsetDateTime}; 10 | 11 | pub fn format_time(timestamp: i64) -> anyhow::Result { 12 | let dt = OffsetDateTime::from_unix_timestamp(timestamp)?; 13 | let format = format_description!("[year]-[month]-[day] [hour]:[minute]:[second]"); 14 | Ok(dt.format(format)?) 15 | } 16 | -------------------------------------------------------------------------------- /nomen/src/subcommands/index/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{config::Config, db}; 2 | 3 | mod blockchain; 4 | pub mod events; 5 | 6 | pub async fn index(config: &Config) -> anyhow::Result<()> { 7 | let pool = config.sqlite().await?; 8 | blockchain::index(config, &pool).await?; 9 | events::records(config, &pool).await?; 10 | events::relay_index::publish(config, &pool, true).await?; 11 | 12 | db::event_log::save(&pool, "index", "").await?; 13 | Ok(()) 14 | } 15 | -------------------------------------------------------------------------------- /nomen-cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nomen-cli" 3 | version = "0.4.0" 4 | edition = "2021" 5 | rust-version = "1.71" 6 | repository = "https://github.com/ursuscamp/nomen" 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | nomen_core = { path = "../nomen_core" } 12 | anyhow = "1.0.75" 13 | clap = "4.4.4" 14 | nostr-sdk = "0.24.0" 15 | secp256k1 = { version = "0.27.0", features = ["rand-std", "bitcoin-hashes"] } 16 | hex = { version = "0.4.3", features = ["serde"] } 17 | derive_more = "0.99.17" 18 | -------------------------------------------------------------------------------- /nomen/src/util/keyval.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use anyhow::anyhow; 4 | 5 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] 6 | pub struct KeyVal(String, String); 7 | 8 | impl KeyVal { 9 | pub fn pair(self) -> (String, String) { 10 | (self.0, self.1) 11 | } 12 | } 13 | 14 | impl FromStr for KeyVal { 15 | type Err = anyhow::Error; 16 | 17 | fn from_str(s: &str) -> Result { 18 | let (key, val) = s.split_once('=').ok_or_else(|| anyhow!("Invalid key"))?; 19 | Ok(KeyVal(key.to_string().to_uppercase(), val.to_string())) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /nomen/templates/pubkey.html: -------------------------------------------------------------------------------- 1 |

2 | 3 | 4 |
5 | Use NIP-07 6 |

7 | 8 | -------------------------------------------------------------------------------- /example.nomen.toml: -------------------------------------------------------------------------------- 1 | data = "nomen.db" 2 | 3 | [nostr] 4 | relays = ["wss://relay.damus.io"] 5 | 6 | # Per NOM-04 spec: Publish indexes to relays 7 | secret = "nsec1..." 8 | publish = true 9 | well-known = true 10 | 11 | [server] 12 | bind = "0.0.0.0:8080" 13 | without_explorer = false 14 | without_api = false 15 | without_indexer = false 16 | indexer_delay = 30 17 | confirmations = 3 18 | 19 | [rpc] 20 | # Include either cookie or user/password (or none for no RPC auth) 21 | cookie = "path/to/cookie/file" 22 | user = "rpc username" 23 | password = "rpc password" 24 | host = "localhost" 25 | port = 8441 26 | network = "bitcoin" 27 | -------------------------------------------------------------------------------- /nomen/src/db/event_log.rs: -------------------------------------------------------------------------------- 1 | use sqlx::SqlitePool; 2 | 3 | pub async fn save(conn: &SqlitePool, evt_type: &str, evt_data: &str) -> anyhow::Result<()> { 4 | sqlx::query("INSERT INTO event_log (created_at, type, data) VALUES (unixepoch(), ?, ?);") 5 | .bind(evt_type) 6 | .bind(evt_data) 7 | .execute(conn) 8 | .await?; 9 | Ok(()) 10 | } 11 | 12 | pub async fn last_index_time(conn: &SqlitePool) -> anyhow::Result { 13 | let (created_at,) = sqlx::query_as::<_, (i64,)>( 14 | "SELECT created_at FROM event_log WHERE type = 'index' ORDER BY created_at DESC LIMIT 1;", 15 | ) 16 | .fetch_one(conn) 17 | .await?; 18 | 19 | Ok(created_at) 20 | } 21 | -------------------------------------------------------------------------------- /docs/release_notes/v0.3.0.md: -------------------------------------------------------------------------------- 1 | # Release Notes - 0.3.0 2 | 3 | ## Highlights 4 | 5 | Version 0.3.0 is a massive change. The banner feature of this new version is a protocol version bump to v1 which: 6 | 7 | - puts all ownership data on chain 8 | - backwards-compatible with v0 9 | - upgradeable from v0 10 | - enables transfer 11 | - long-term stable 12 | 13 | See the changelog for a full list of changes. 14 | 15 | ## Upgrading from 0.2 16 | 17 | This version involves a full database schema revamp, so backup your `nomen.db` file, then delete it. Upgrading will require a full blockchain rescan! 18 | 19 | Checkout the new [example.nomen.toml](example.nomen.toml) file and adjust your configuration accordingly for the new version. -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "nomen_core", 5 | "nomen", 6 | "nomen-cli" 7 | ] 8 | 9 | # Config for 'cargo dist' 10 | [workspace.metadata.dist] 11 | include = ["RELEASE_NOTES.md"] 12 | # The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax) 13 | cargo-dist-version = "0.4.2" 14 | # CI backends to support 15 | ci = ["github"] 16 | # The installers to generate for each app 17 | installers = [] 18 | # Target platforms to build apps for (Rust target-triple syntax) 19 | targets = ["x86_64-unknown-linux-gnu", "aarch64-apple-darwin", "x86_64-apple-darwin", "x86_64-pc-windows-msvc"] 20 | # Publish jobs to run in CI 21 | pr-run-mode = "plan" 22 | 23 | # The profile that 'cargo dist' will build with 24 | [profile.dist] 25 | inherits = "release" 26 | lto = "thin" 27 | -------------------------------------------------------------------------------- /docs/RELEASE.md: -------------------------------------------------------------------------------- 1 | # Preparing A Release 2 | 3 | 1. Crate release branch. 4 | 2. Archive current `CHANGELOG.md` to `docs/changelogs/vY.Y.Y.md`, replacing `Y.Y.Y` with the current version number. 5 | 3. Archive current `RELEASE_NOTES.md` to `docs/release_notes/vY.Y.Y.md`, replacing `Y.Y.Y` with the current version number. 6 | 4. Update version numbers in `Cargo.toml` files to new version. 7 | 5. Run `git cliff -o CHANGELOG.md --unreleased --tag vX.X.X`, replacing `X.X.X` with the new version. 8 | 6. Write new `RELEASE_NOTES.md`, containing highlights of the release, and special release instructions that must be done prior to upgrade. 9 | 7. Commit with a `release:` tag on the commit message. 10 | 8. Open PR to `master` and merge. 11 | 9. `git tag vY.Y.Y && git push --tags` to create the release and build the artifacts. -------------------------------------------------------------------------------- /nomen/templates/explorer.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 |

Explorer

6 | 7 |

Last indexed at {{ last_index_time }}.

8 | 9 |

This is a list of verified claims. Click here for a list claims uncorroborated with any events.

10 | 11 |
12 | 13 |
14 | 15 |

16 | Select a name to see its current record set. 17 |

18 | 19 |

20 | {% if names.len() == 0 %} 21 |

No names found!

22 | {% else %} 23 |
    24 | {% for name in names %} 25 |
  • {{ name.1 }}
  • 26 | {% endfor %} 27 |
28 | {% endif %} 29 |

30 |
31 | {% endblock %} -------------------------------------------------------------------------------- /nomen/src/db/stats.rs: -------------------------------------------------------------------------------- 1 | use sqlx::SqlitePool; 2 | 3 | pub async fn known_names(conn: &SqlitePool) -> anyhow::Result { 4 | let (count,) = sqlx::query_as::<_, (i64,)>("SELECT count(*) FROM valid_names_vw;") 5 | .fetch_one(conn) 6 | .await?; 7 | Ok(count) 8 | } 9 | 10 | pub async fn index_height(conn: &SqlitePool) -> anyhow::Result { 11 | let (count,) = sqlx::query_as::<_, (i64,)>("SELECT max(blockheight) FROM index_height;") 12 | .fetch_one(conn) 13 | .await?; 14 | Ok(count) 15 | } 16 | 17 | pub async fn nostr_events(conn: &SqlitePool) -> anyhow::Result { 18 | let (count,) = sqlx::query_as::<_, (i64,)>( 19 | " 20 | WITH events as ( 21 | SELECT count(*) as count FROM name_events 22 | ) 23 | SELECT SUM(count) FROM events; 24 | ", 25 | ) 26 | .fetch_one(conn) 27 | .await?; 28 | Ok(count) 29 | } 30 | -------------------------------------------------------------------------------- /nomen/templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Nomen Explorer 9 | 10 | 11 | 12 | 17 | 18 | 19 | 20 |
21 |

Nomen Explorer

22 | 23 | 31 |
32 | 33 | {% block body %}{% endblock %} 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /nomen/templates/stats.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 |

Build Information

6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 |
Version{{ version }}
Commit{{ commit }}
Build Date{{ build_date }}
23 | 24 |

Index Information

25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 |
Known Names{{ known_names }}
Index Blockheight{{ index_height }}
Nostr Events{{ nostr_events }}
42 |
43 | {% endblock %} -------------------------------------------------------------------------------- /nomen-cli/src/nostr.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use derive_more::{AsRef, From, Into}; 4 | use nostr_sdk::{ 5 | prelude::{FromPkStr, FromSkStr}, 6 | Keys, 7 | }; 8 | use secp256k1::{SecretKey, XOnlyPublicKey}; 9 | 10 | #[derive(Debug, Clone, PartialEq, Eq, From, Into, AsRef)] 11 | pub struct Nsec(SecretKey); 12 | 13 | impl FromStr for Nsec { 14 | type Err = anyhow::Error; 15 | 16 | fn from_str(s: &str) -> Result { 17 | let keys = Keys::from_sk_str(s)?; 18 | let sk = keys.secret_key()?; 19 | Ok(Nsec(sk)) 20 | } 21 | } 22 | 23 | #[derive(Debug, Clone, PartialEq, Eq, From, Into, AsRef)] 24 | pub struct Npub(XOnlyPublicKey); 25 | 26 | impl FromStr for Npub { 27 | type Err = anyhow::Error; 28 | 29 | fn from_str(s: &str) -> Result { 30 | let keys = Keys::from_pk_str(s)?; 31 | let pk = keys.public_key(); 32 | Ok(Npub(pk)) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /nomen_core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nomen_core" 3 | version = "0.4.0" 4 | edition = "2021" 5 | rust-version = "1.71" 6 | repository = "https://github.com/ursuscamp/nomen" 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | bitcoin = {version = "0.30.0", features = ["base64", "serde"] } 12 | bitcoin_hashes = { version = "0.12.0", features = ["serde"] } 13 | derive_more = "0.99.17" 14 | hex = { version = "0.4.3", features = ["serde"] } 15 | itertools = "0.10.5" 16 | tracing = "0.1.37" 17 | nostr-sdk = "0.24.0" 18 | rand = "0.8.5" 19 | regex = "1.7.1" 20 | ripemd = "0.1.3" 21 | secp256k1 = { version = "0.27.0", features = ["rand-std"] } 22 | serde = { version = "1.0.152", features = ["derive"] } 23 | serde-hex = "0.1.0" 24 | serde_json = "1.0.94" 25 | sha2 = "0.10.6" 26 | time = { version = "0.3.20", features = ["formatting", "macros"] } 27 | thiserror = "1.0.49" 28 | serde_with = { version = "*", features = ["macros"] } 29 | -------------------------------------------------------------------------------- /docs/DEVELOPMENT.md: -------------------------------------------------------------------------------- 1 | # Setting up a dev environment 2 | 3 | 1. Clone the repo. 4 | 2. Create a branch from `develop`. 5 | 6 | ## Bitcoin 7 | 8 | 1. Start bitcoin in regtest: `make bitcoin-local`. This sets up a local Bitcoin regtest environment just for Nomen. 9 | - If you ever need to reset your local regtest: Stop `bitcoin` and run `make bitcoin-reset`. 10 | 2. Run `make bitcoin-wallet` to setup the default wallet for Bitcoin. 11 | 3. Create an alias like `regtest` to `bitcoin-cli -datadir=.bitcoin/ -chain=regtest`. 12 | 13 | ## Nostr Relay 14 | 15 | 1. In a separate folder, clone `https://github.com/scsibug/nostr-rs-relay`. 16 | 2. Run `cargo build --release`. 17 | 3. Run `RUST_LOG=info target/release/nostr-rs-relay`. 18 | - This will start a local Nostr relay for Nomen to use. 19 | - If you ever need to reset, just `rm nostr.db` and run the command again. 20 | 21 | ## Nomen 22 | 23 | Back in your Nomen folder: 24 | 25 | 1. Copy [development.nomen.toml](./development.nomen.toml) to `nomen.toml` in the root folder. 26 | 2. Run `cargo run -- server` to start the Nomen indexer. -------------------------------------------------------------------------------- /RELEASE_NOTES.md: -------------------------------------------------------------------------------- 1 | # Release Notes - 0.3.0 2 | 3 | ## Highlights 4 | 5 | Version 0.4.0 brings a few new features: 6 | 7 | - [NOM-04](https://github.com/ursuscamp/noms/blob/master/nom-04.md) support: indexes are now publised to relays! 8 | - `rebroadcast` CLI command which rebroadcasts all known records events to relays (useful to keep indexer network healthy) 9 | - `publish` command will publish the full set of indexed names to relays 10 | 11 | ## Upgrading from 0.3 12 | 13 | 1. Backup your `nomen.db` file prior to upgrading. 14 | 2. Repalce your `nomen` executable. 15 | 3. Update your `nomen.toml` config file with the following new keys under the `[nostr]` section (if you wish to publish your index): 16 | 1. `secret = "nsec..."` is the `nsec` encoded private key that your indexer will use to publish events 17 | 2. `publish = true` will tell your node to publish index events to your Nostr relays 18 | 3. `well-known = true` will make sure the indexer serves the `.well-known/nomen.json` file per the NOM-04 specification 19 | 4. Run `nomen publish` to publish a full index (again, only if you wish to publish) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2023 ursuscamp 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the “Software”), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 7 | of the Software, and to permit persons to whom the Software is furnished to do 8 | so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /nomen/templates/transfer/initiate.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 |

Transfer Name

6 | 7 |

8 | To transfer a name, two transfer transactions must be broadcast containting data for the new owner, and a signature 9 | authorizing the transfer, signed by the original owner. 10 |

11 | 12 |

13 | To initiate a transfer, enter the name you wish to transfer, and the pubkeys of the previous (current) owner and 14 | the new owner. 15 |

16 | 17 |
18 |

19 | 20 | 21 |

22 | 23 |

24 | 25 | 26 |

27 | 28 |

29 | 30 | 31 |

32 | 33 |

34 | 35 |

36 |
37 |
38 | {% endblock %} -------------------------------------------------------------------------------- /nomen/src/config/cli.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use clap::Parser; 4 | 5 | #[derive(Parser, Debug, Clone)] 6 | pub struct Cli { 7 | /// Location of config file: Default: nomen.toml 8 | #[arg(short, long, default_value = "nomen.toml")] 9 | pub config: PathBuf, 10 | 11 | #[command(subcommand)] 12 | pub subcommand: Subcommand, 13 | } 14 | 15 | #[derive(clap::Subcommand, Debug, Clone)] 16 | pub enum Subcommand { 17 | /// Output example config file. 18 | Init, 19 | 20 | /// Scan and index the blockchain. 21 | Index, 22 | 23 | /// Start the HTTP server. 24 | Server, 25 | 26 | /// Force the indexer to re-index, given an optional starting blockheight. This operation is fast, it does NOT force a blockchain rescan. 27 | Reindex { blockheight: Option }, 28 | 29 | /// Rescan the blockchain, given an optional starting blockheight. This operation is slow, it redownloads blocks. 30 | Rescan { blockheight: Option }, 31 | 32 | /// Rebroadcast Nostr record events 33 | Rebroadcast, 34 | 35 | /// Publish full name index to relay servers 36 | Publish, 37 | 38 | /// Prints the current version of application 39 | Version, 40 | } 41 | -------------------------------------------------------------------------------- /nomen_core/src/nsid_builder.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::secp256k1::XOnlyPublicKey; 2 | 3 | use crate::Hash160; 4 | 5 | use super::Nsid; 6 | 7 | pub struct NsidBuilder { 8 | root_name: String, 9 | pk: XOnlyPublicKey, 10 | } 11 | 12 | impl NsidBuilder { 13 | pub fn new(root_name: &str, root_pk: &XOnlyPublicKey) -> NsidBuilder { 14 | NsidBuilder { 15 | root_name: root_name.to_owned(), 16 | pk: *root_pk, 17 | } 18 | } 19 | 20 | pub fn finalize(self) -> Nsid { 21 | let mut hasher = Hash160::default(); 22 | hasher.update(self.root_name.as_bytes()); 23 | hasher.update(&self.pk.serialize()); 24 | hasher.finalize().into() 25 | } 26 | } 27 | 28 | #[cfg(test)] 29 | mod tests { 30 | use super::*; 31 | 32 | #[test] 33 | fn test_nsid_builder() { 34 | let pk: XOnlyPublicKey = "60de6fbc4a78209942c62706d904ff9592c2e856f219793f7f73e62fc33bfc18" 35 | .parse() 36 | .unwrap(); 37 | let nsid = NsidBuilder::new("hello-world", &pk).finalize(); 38 | 39 | assert_eq!( 40 | nsid, 41 | "273968a1e7be2ef0acbcae6f61d53e73101e2983".parse().unwrap() 42 | ) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /nomen/templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 |
6 |

Nomen

7 | 8 |

9 | Nomen is an open protocol for global names, like a decentralized DNS, built with Bitcoin and Nostr. The goals of 10 | the Nomen protocol are decentralization and self-sovereignty. 11 |

12 | 13 |

14 | Nomen Explorer is an indexer. It catalogues the Bitcoin blockchain and associated Nomen events on Nostr. 15 |

16 | 17 |

More Information

18 | 19 |

20 | Check out some more information below, starting with the FAQ if you want details. Check out the Explorer if you 21 | want to see 22 | what names already exist out there. 23 |

24 | 25 |

26 |

27 | 28 | 29 | 30 | 31 |

32 |
33 |
34 | {% endblock %} -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BITCOINDIR=.bitcoin 2 | REGTESTCLI=bitcoin-cli -datadir=$(BITCOINDIR) -chain=regtest 3 | 4 | .PHONY: mac-aarch64 linux-amd64 windows-amd64 release 5 | 6 | mac-aarch64: 7 | cargo build --release --target aarch64-apple-darwin 8 | 9 | linux-amd64: 10 | TARGET_CC=x86_64-linux-musl-gcc cargo build --release --target x86_64-unknown-linux-musl 11 | 12 | # Setup: https://gist.github.com/Mefistophell/9787e1b6d2d9441c16d2ac79d6a505e6 13 | windows-amd64: 14 | TARGET_CC=x86_64-w64-mingw32-gcc cargo build --release --target x86_64-pc-windows-gnu 15 | 16 | release: mac-aarch64 linux-amd64 windows-amd64 17 | mkdir -p release 18 | zip release/nomen-mac-aarch64-$(VERSION).zip target/aarch64-apple-darwin/release/nomen 19 | zip release/nomen-linux-amd64-$(VERSION).zip target/x86_64-unknown-linux-musl/release/nomen 20 | zip release/nomen-windows-amd64-$(VERSION).zip target/x86_64-pc-windows-gnu/release/nomen.exe 21 | 22 | bitcoin-local: 23 | mkdir -p .bitcoin 24 | bitcoind -datadir=$(BITCOINDIR) -chain=regtest -fallbackfee=0.001 -txindex -rpcuser=regtest -rpcpassword=regtest 25 | 26 | bitcoin-wallet: 27 | $(REGTESTCLI) createwallet regtest 28 | $(REGTESTCLI) generatetoaddress 101 $$($(REGTESTCLI) getnewaddress) 29 | 30 | bitcoin-reset: 31 | rm -rf .bitcoin -------------------------------------------------------------------------------- /nomen_core/src/name.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use derive_more::{AsRef, Display, Into}; 4 | use regex::Regex; 5 | 6 | #[derive(Display, AsRef, Debug, Clone, PartialEq, Eq, Default)] 7 | pub struct Name(String); 8 | 9 | impl FromStr for Name { 10 | type Err = super::UtilError; 11 | 12 | fn from_str(s: &str) -> Result { 13 | let r = Regex::new(r#"\A[0-9a-z\-]{3,43}\z"#)?; 14 | if r.is_match(s) { 15 | return Ok(Name(s.into())); 16 | } 17 | 18 | Err(super::UtilError::NameValidation) 19 | } 20 | } 21 | 22 | #[cfg(test)] 23 | mod tests { 24 | use std::{any, collections::HashMap}; 25 | 26 | use crate::UtilError; 27 | 28 | use super::*; 29 | 30 | #[test] 31 | fn test_valid() { 32 | let r = ["hello-world", "123abc"] 33 | .into_iter() 34 | .map(Name::from_str) 35 | .all(|r| r.is_ok()); 36 | assert!(r); 37 | } 38 | 39 | #[test] 40 | fn test_invalid() { 41 | let r = [ 42 | "hello!", 43 | "ld", 44 | "abcdefghijklmnopqrztuvwxyzabcdefghijklmnopqrztuvwxyz", 45 | ] 46 | .into_iter() 47 | .map(Name::from_str) 48 | .all(|r| r.is_err()); 49 | assert!(r); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /nomen/src/util/npub.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, str::FromStr}; 2 | 3 | use nostr_sdk::{prelude::FromPkStr, Keys}; 4 | use secp256k1::XOnlyPublicKey; 5 | use serde::Serialize; 6 | 7 | #[derive(Debug, Clone, Copy, Serialize, serde_with::DeserializeFromStr)] 8 | 9 | pub struct Npub(XOnlyPublicKey); 10 | 11 | impl AsRef for Npub { 12 | fn as_ref(&self) -> &XOnlyPublicKey { 13 | &self.0 14 | } 15 | } 16 | 17 | impl FromStr for Npub { 18 | type Err = anyhow::Error; 19 | 20 | fn from_str(s: &str) -> Result { 21 | let keys = Keys::from_pk_str(s)?; 22 | Ok(Npub(keys.public_key())) 23 | } 24 | } 25 | 26 | impl Display for Npub { 27 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 28 | f.write_str(&self.0.to_string()) 29 | } 30 | } 31 | 32 | #[cfg(test)] 33 | mod tests { 34 | use super::*; 35 | 36 | #[test] 37 | fn test_npub() { 38 | let _pubkey: Npub = "npub1u50q2x85utgcgqrmv607crvmk8x3k2nvyun84dxlj6034kajje0s2cm3r0" 39 | .parse() 40 | .unwrap(); 41 | } 42 | 43 | #[test] 44 | fn test_hex() { 45 | let _pubkey: Npub = "e51e0518f4e2d184007b669fec0d9bb1cd1b2a6c27267ab4df969f1adbb2965f" 46 | .parse() 47 | .unwrap(); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | ## [0.4.0] - 2023-11-24 6 | 7 | ### Bug Fixes 8 | 9 | - do not redownload last event every index 10 | 11 | - Re-download record events after reindex 12 | 13 | - Better relay handling 14 | 15 | - publis command should not use queue 16 | 17 | 18 | ### Features 19 | 20 | - Updated config file format for index publishing. 21 | 22 | - Validate config file on startup. 23 | 24 | - NOM-04 support, relay publishing + .well-known 25 | 26 | - Version subcomand #18 27 | 28 | - record v1 upgrade block info 29 | 30 | - UI will now warn users when attempting a transfer on a name that doesn't exist or shouldn't be transferred 31 | 32 | - Added relays key to .well-known/nomen.json, per NOM-04 addition. 33 | 34 | - "rebroadcast" command will rebroadcast known record events 35 | 36 | - publish command to publish full relay index 37 | 38 | 39 | ### Other 40 | 41 | - Preparing release v0.4.0 42 | 43 | ### Refactor 44 | 45 | - Refactor: Refactored db module to sub-modules (#25) 46 | 47 | - Refactor: Some tweaks to db submodule refactoring 48 | 49 | - Refactor: Additional factoring on the db module 50 | 51 | 52 | ### Testing 53 | 54 | - Test: Rewrote and refactored tests to base them on official test vectors (#24) 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /nomen/templates/transfer/sign.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 | {% if error.is_some() %} 6 | {{ error.clone().unwrap_or_default() }} 7 | {% else %} 8 |

Sign Transfer

9 | 10 |

This transfer must be authorized by the current owner. A signature will be generated by signing a dummy Nostr event 11 | with your NIP-07 extension.

12 | 13 |
14 | 15 | 16 | 17 | 18 | 19 |

20 | 21 |

22 |
23 | 24 | {% endif %} 25 |
26 | 27 | 40 | {% endblock %} -------------------------------------------------------------------------------- /nomen/src/subcommands/util.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use bitcoin::{ 4 | psbt::{Output, Psbt}, 5 | script::PushBytesBuf, 6 | ScriptBuf, TxOut, 7 | }; 8 | use nomen_core::{CreateBuilder, NameKind, NsidBuilder}; 9 | use nostr_sdk::{EventBuilder, Tag, TagKind, UnsignedEvent}; 10 | use secp256k1::XOnlyPublicKey; 11 | 12 | pub fn extend_psbt(psbt: &mut Psbt, name: &str, pubkey: &XOnlyPublicKey) { 13 | let data = CreateBuilder::new(pubkey, name).v1_op_return(); 14 | let mut pb = PushBytesBuf::new(); 15 | pb.extend_from_slice(&data).expect("OP_RETURN fail"); 16 | let data = ScriptBuf::new_op_return(&pb); 17 | psbt.outputs.push(Output { 18 | witness_script: Some(data.clone()), 19 | ..Default::default() 20 | }); 21 | psbt.unsigned_tx.output.push(TxOut { 22 | value: 0, 23 | script_pubkey: data, 24 | }); 25 | } 26 | 27 | pub fn name_event( 28 | pubkey: XOnlyPublicKey, 29 | records: &HashMap, 30 | name: &str, 31 | ) -> anyhow::Result { 32 | let records = serde_json::to_string(&records)?; 33 | let nsid = NsidBuilder::new(name, &pubkey).finalize(); 34 | let event = EventBuilder::new( 35 | NameKind::Name.into(), 36 | records, 37 | &[ 38 | Tag::Identifier(nsid.to_string()), 39 | Tag::Generic(TagKind::Custom("nom".to_owned()), vec![name.to_owned()]), 40 | ], 41 | ) 42 | .to_unsigned_event(pubkey); 43 | 44 | Ok(event) 45 | } 46 | -------------------------------------------------------------------------------- /docs/HOWTO.md: -------------------------------------------------------------------------------- 1 | # How To Get A Name 2 | 3 | What you will need: 4 | 5 | 1. A Bitcoin UTXO 6 | 2. A wallet to sign a PSBT 7 | 3. A keypair (any schnorr-compatible Bitcoin or Nostr keypair will work) 8 | * If you need one, use optional step below. 9 | 10 | ## Using the Explorer 11 | 12 | 1. Construct an unsigned PSBT with your Bitcoin wallet. 13 | 2. Visit https://nomenexplorer.com 14 | 3. Click `New Name`. 15 | 3. Paste the base64-encoded PSBT into the form. 16 | 6. Enter the name you wish to reserve and the pubkey of the owner. 17 | * __Note:__ If you have a NIP-07 compatible browser extension, you can click "Use NIP-07" and it will obtain the public key from your browser extension. 18 | 7. Click `Submit` and it will build a new, unsigned transaction for you. Copy the transaction to sign and broadcast it with your wallet. 19 | 8. After broadcasting the transaction, click `setup your records` to build a new nostr records event. 20 | 9. Enter the records you wish to include. Each record must be on its own line and look like this `KEY=value`. 21 | 10. Enter you public key again, or use your NIP-07 extension. 22 | 11. Click `Create Event` and you will be presented with an unsigned Nostr event. 23 | 12. Clicking `Sign and Broadcast` will use your NIP-07 extension to sign the event and broadcast it to relays. 24 | 25 | Alternatively, if you don't want or have an unsigned PSBT, you can skip filling in the PSBT. If you don't fill it in, the form will just return a hex-encoded `OP_RETURN` script. You can paste this into a wallet that is compatible with `OP_RETURN` outputs like Bitcoin Core, Electrum, Trezor, etc. -------------------------------------------------------------------------------- /nomen/src/util/nsec.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, str::FromStr}; 2 | 3 | use nostr_sdk::{prelude::FromSkStr, Keys, ToBech32}; 4 | use secp256k1::SecretKey; 5 | use serde::Serialize; 6 | 7 | #[derive(Debug, Clone, Copy, Serialize, serde_with::DeserializeFromStr)] 8 | 9 | pub struct Nsec(SecretKey); 10 | 11 | impl AsRef for Nsec { 12 | fn as_ref(&self) -> &SecretKey { 13 | &self.0 14 | } 15 | } 16 | 17 | impl FromStr for Nsec { 18 | type Err = anyhow::Error; 19 | 20 | fn from_str(s: &str) -> Result { 21 | let keys = Keys::from_sk_str(s)?; 22 | Ok(Nsec(keys.secret_key().expect("Secret key required"))) 23 | } 24 | } 25 | 26 | impl Display for Nsec { 27 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 28 | f.write_str(&self.0.to_bech32().expect("Unable to format as bech32")) 29 | } 30 | } 31 | 32 | impl From for Nsec { 33 | fn from(value: SecretKey) -> Self { 34 | Nsec(value) 35 | } 36 | } 37 | 38 | impl From for SecretKey { 39 | fn from(value: Nsec) -> Self { 40 | value.0 41 | } 42 | } 43 | 44 | #[cfg(test)] 45 | mod tests { 46 | use super::*; 47 | 48 | #[test] 49 | fn test_nsec() { 50 | let _nsec: Nsec = "nsec18meshnlpsyl6qpq4jkwh9hks3v4uprp44las83akz6xfndc9tx2q646wuk" 51 | .parse() 52 | .unwrap(); 53 | } 54 | 55 | #[test] 56 | fn test_hex() { 57 | let _nsec: Nsec = "3ef30bcfe1813fa00415959d72ded08b2bc08c35affb03c7b6168c99b7055994" 58 | .parse() 59 | .unwrap(); 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /nomen/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nomen" 3 | version = "0.4.0" 4 | edition = "2021" 5 | build = "build.rs" 6 | default-run = "nomen" 7 | rust-version = "1.71" 8 | repository = "https://github.com/ursuscamp/nomen" 9 | 10 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 11 | 12 | [dependencies] 13 | anyhow = "1.0.75" 14 | askama = {version = "0.12.0", features = ["with-axum", "serde-json"]} 15 | askama_axum = "0.3.0" 16 | axum = {version = "0.6.11"} 17 | axum-extra = "0.7.4" 18 | clap = { version = "4.1.8", features = ["derive"] } 19 | hex = { version = "0.4.3", features = ["serde"] } 20 | nomen_core = { path = "../nomen_core" } 21 | nostr-sdk = "0.24.0" 22 | rand = { version = "0.8.5", features = ["serde"] } 23 | secp256k1 = { version = "0.27.0", features = ["rand-std", "bitcoin-hashes"] } 24 | serde = { version = "1.0.188", features = ["derive"] } 25 | serde_json = "1.0.107" 26 | tokio = { version = "1.32.0", features = ["full"] } 27 | toml = "0.8.0" 28 | yansi = "0.5.1" 29 | sqlx = { version = "0.6.2", features = ["runtime-tokio-rustls", "sqlite"] } 30 | bitcoin = { version = "0.30.1", features = ["base64", "rand", "serde"] } 31 | elegant-departure = { version = "0.2.1", features = ["tokio"] } 32 | itertools = "0.11.0" 33 | bitcoincore-rpc = "0.17.0" 34 | futures = "0.3.28" 35 | tracing = "0.1.37" 36 | tracing-subscriber = "0.3.17" 37 | time = { version = "0.3.20", features = ["formatting", "macros"] } 38 | tower-http = { version = "0.4.4", features = ["cors"] } 39 | serde_with = "3.4.0" 40 | 41 | 42 | [build-dependencies] 43 | vergen = { version = "8.0.0", features = ["build", "git", "gitcl"] } 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nomen Explorer 2 | 3 | Nomen is a protocol for globally unique, decentralized "domain names". The Nomen Explorer is the first indexer (or name server) for this protocol. 4 | 5 | Try it [here](https://nomenexplorer.com)! You can explore existing names or create a new one. Note: You will need to sign and broadcast a Bitcoin transaction with your wallet to do it. 6 | 7 | If you download the project yourself, you can build it and run the indexer for your own use, or use the CLI to experiment with Nomen. 8 | 9 | ## What is Nomen? 10 | 11 | Nomen is a protocol for globally unique names, like DNS, except decentralized and based on Bitcoin and Nostr. Instead of a central authority deciding who controls a name, the protocol provides simple rules to determine the owner. 12 | 13 | At a high level, claims to a name are published to the Bitcoin blockchain (think of this as registering a domain name). Bitcoin provides the ordering guarantees. The first to claim a name owns it. Published along with the name is the public key of the owner. 14 | 15 | Owners then publish Nostr events signed with the same key to update their records (like their domain DNS records). 16 | 17 | With Bitcoin, there is no need to create a new blockchain or have a trusted third party. With Nostr, there's no need to bootstrap a new P2P transport layer. 18 | 19 | Read [the specs](https://github.com/ursuscamp/noms) for more details about the protocol itself. It's very simple. 20 | 21 | ## Documentation 22 | 23 | - [Changelog](CHANGELOG.md) 24 | - [Release Notes](RELEASE_NOTES.md) 25 | 26 | ## Setting up a dev environment 27 | 28 | Follow the steps in [DEVELOPMENT.md](./docs/DEVELOPMENT.md). -------------------------------------------------------------------------------- /nomen/src/db/relay_index.rs: -------------------------------------------------------------------------------- 1 | use sqlx::Sqlite; 2 | 3 | pub async fn queue( 4 | conn: impl sqlx::Executor<'_, Database = Sqlite> + Copy, 5 | name: &str, 6 | ) -> anyhow::Result<()> { 7 | sqlx::query("INSERT OR IGNORE INTO relay_index_queue (name) VALUES (?)") 8 | .bind(name) 9 | .execute(conn) 10 | .await?; 11 | Ok(()) 12 | } 13 | 14 | #[derive(sqlx::FromRow, Debug)] 15 | pub struct Name { 16 | pub name: String, 17 | pub pubkey: String, 18 | pub records: String, 19 | } 20 | 21 | pub async fn fetch_all_queued( 22 | conn: impl sqlx::Executor<'_, Database = Sqlite> + Copy, 23 | ) -> anyhow::Result> { 24 | let results = sqlx::query_as::<_, Name>( 25 | "SELECT vnr.name, vnr.pubkey, COALESCE(vnr.records, '{}') as records 26 | FROM valid_names_records_vw vnr 27 | JOIN relay_index_queue riq ON vnr.name = riq.name;", 28 | ) 29 | .fetch_all(conn) 30 | .await?; 31 | Ok(results) 32 | } 33 | 34 | pub async fn fetch_all( 35 | conn: impl sqlx::Executor<'_, Database = Sqlite> + Copy, 36 | ) -> anyhow::Result> { 37 | let results = sqlx::query_as::<_, Name>( 38 | "SELECT vnr.name, vnr.pubkey, COALESCE(vnr.records, '{}') as records 39 | FROM valid_names_records_vw vnr;", 40 | ) 41 | .fetch_all(conn) 42 | .await?; 43 | Ok(results) 44 | } 45 | 46 | pub async fn delete( 47 | conn: impl sqlx::Executor<'_, Database = Sqlite> + Copy, 48 | name: &str, 49 | ) -> anyhow::Result<()> { 50 | sqlx::query("DELETE FROM relay_index_queue WHERE name = ?;") 51 | .bind(name) 52 | .execute(conn) 53 | .await?; 54 | Ok(()) 55 | } 56 | -------------------------------------------------------------------------------- /nomen_core/src/create.rs: -------------------------------------------------------------------------------- 1 | use crate::NomenKind; 2 | use nostr_sdk::{EventId, UnsignedEvent}; 3 | use secp256k1::XOnlyPublicKey; 4 | 5 | use super::{CreateV0, CreateV1, Hash160, NsidBuilder}; 6 | 7 | pub struct CreateBuilder<'a> { 8 | pub pubkey: &'a XOnlyPublicKey, 9 | pub name: &'a str, 10 | } 11 | 12 | impl<'a> CreateBuilder<'a> { 13 | pub fn new(pubkey: &'a XOnlyPublicKey, name: &'a str) -> CreateBuilder<'a> { 14 | CreateBuilder { pubkey, name } 15 | } 16 | 17 | pub fn v0_op_return(&self) -> Vec { 18 | let fingerprint = Hash160::default() 19 | .chain_update(self.name.as_bytes()) 20 | .fingerprint(); 21 | let nsid = NsidBuilder::new(self.name, self.pubkey).finalize(); 22 | CreateV0 { fingerprint, nsid }.serialize() 23 | } 24 | 25 | pub fn v1_op_return(&self) -> Vec { 26 | CreateV1 { 27 | pubkey: *self.pubkey, 28 | name: self.name.to_string(), 29 | } 30 | .serialize() 31 | } 32 | } 33 | 34 | #[cfg(test)] 35 | mod tests { 36 | use super::*; 37 | #[test] 38 | fn test_op_returns() { 39 | let pk = "60de6fbc4a78209942c62706d904ff9592c2e856f219793f7f73e62fc33bfc18" 40 | .parse() 41 | .unwrap(); 42 | let cb = CreateBuilder::new(&pk, "hello-world"); 43 | 44 | assert_eq!( 45 | hex::encode(cb.v0_op_return()), 46 | "4e4f4d0000e5401df4b4273968a1e7be2ef0acbcae6f61d53e73101e2983" 47 | ); 48 | 49 | assert_eq!(hex::encode(cb.v1_op_return()), "4e4f4d010060de6fbc4a78209942c62706d904ff9592c2e856f219793f7f73e62fc33bfc1868656c6c6f2d776f726c64"); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /nomen/templates/transfer/complete.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 |

6 | Here are two OP_RETURNs they must be included in Bitcoin blocks, in this order. The first 7 | OP_RETURN 8 | contains the information for the new owner, and the second contains the signature that authorizes the transfer. 9 | For now, Bitcoin standardness rules prevent multiple OP_RETURNs in a single transaction. Unless you 10 | have a miner connection, they will need to broadcast in separate transactions. The best way to ensure that they 11 | are mined in the correct order to include the first OP_RETURN in a transaction, then do a CPFP 12 | (Child-Pays-For-Parent) transaction from the new UTXO and include the second OP_RETURN 13 |

14 |

15 |

{{ data1 }}
16 | 17 |

18 | 19 |

20 |

{{ data2 }}
21 | 22 |

23 |
24 | 25 | 48 | {% endblock %} -------------------------------------------------------------------------------- /nomen/src/db/raw.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::module_name_repetitions)] 2 | 3 | use bitcoin::{BlockHash, Txid}; 4 | use sqlx::{sqlite::SqliteRow, Executor, FromRow, Row, Sqlite}; 5 | use std::str::FromStr; 6 | 7 | pub struct RawBlockchain { 8 | pub blockhash: BlockHash, 9 | pub txid: Txid, 10 | pub blocktime: usize, 11 | pub blockheight: usize, 12 | pub txheight: usize, 13 | pub vout: usize, 14 | pub data: Vec, 15 | } 16 | 17 | impl FromRow<'_, SqliteRow> for RawBlockchain { 18 | fn from_row(row: &'_ SqliteRow) -> Result { 19 | Ok(RawBlockchain { 20 | blockhash: BlockHash::from_str(row.try_get("blockhash")?) 21 | .map_err(|e| sqlx::Error::Decode(Box::new(e)))?, 22 | txid: Txid::from_str(row.try_get("txid")?) 23 | .map_err(|e| sqlx::Error::Decode(Box::new(e)))?, 24 | blocktime: row.try_get::("blocktime")? as usize, 25 | blockheight: row.try_get::("blockheight")? as usize, 26 | txheight: row.try_get::("txheight")? as usize, 27 | vout: row.try_get::("vout")? as usize, 28 | data: hex::decode(row.try_get::("data")?) 29 | .map_err(|e| sqlx::Error::Decode(Box::new(e)))?, 30 | }) 31 | } 32 | } 33 | 34 | pub async fn insert_raw_blockchain( 35 | conn: impl Executor<'_, Database = Sqlite>, 36 | raw: &RawBlockchain, 37 | ) -> anyhow::Result<()> { 38 | sqlx::query(include_str!("./queries/insert_raw_blockchain.sql")) 39 | .bind(raw.blockhash.to_string()) 40 | .bind(raw.txid.to_string()) 41 | .bind(raw.blocktime as i64) 42 | .bind(raw.blockheight as i64) 43 | .bind(raw.txheight as i64) 44 | .bind(raw.vout as i64) 45 | .bind(hex::encode(&raw.data)) 46 | .execute(conn) 47 | .await?; 48 | Ok(()) 49 | } 50 | -------------------------------------------------------------------------------- /nomen_core/src/nsid.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt::{Debug, Display}, 3 | io::Read, 4 | str::FromStr, 5 | }; 6 | 7 | use bitcoin::secp256k1::XOnlyPublicKey; 8 | use derive_more::{AsMut, AsRef, Deref, DerefMut, From}; 9 | use nostr_sdk::Event; 10 | 11 | use super::{EventExtractor, NameKind, NsidBuilder}; 12 | 13 | #[derive( 14 | Clone, Copy, Deref, DerefMut, AsRef, AsMut, From, Eq, PartialEq, serde_with::DeserializeFromStr, 15 | )] 16 | pub struct Nsid([u8; 20]); 17 | 18 | impl Nsid { 19 | #[allow(dead_code)] 20 | pub fn from_slice(bytes: &[u8]) -> Result { 21 | Ok(Nsid(bytes.try_into()?)) 22 | } 23 | } 24 | 25 | impl TryFrom<&[u8]> for Nsid { 26 | type Error = super::UtilError; 27 | 28 | fn try_from(value: &[u8]) -> Result { 29 | Nsid::from_slice(value) 30 | } 31 | } 32 | 33 | impl TryFrom for Nsid { 34 | type Error = super::UtilError; 35 | 36 | fn try_from(event: Event) -> Result { 37 | let nk: NameKind = event.kind.try_into()?; 38 | let name = event.extract_name()?; 39 | let builder = match nk { 40 | NameKind::Name => NsidBuilder::new(&name, &event.pubkey), 41 | }; 42 | Ok(builder.finalize()) 43 | } 44 | } 45 | 46 | impl FromStr for Nsid { 47 | type Err = super::UtilError; 48 | 49 | fn from_str(s: &str) -> Result { 50 | let mut out = [0u8; 20]; 51 | hex::decode_to_slice(s, &mut out)?; 52 | Ok(Nsid(out)) 53 | } 54 | } 55 | 56 | impl Debug for Nsid { 57 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 58 | f.debug_tuple("Pubkey").field(&hex::encode(self.0)).finish() 59 | } 60 | } 61 | 62 | impl Display for Nsid { 63 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 64 | write!(f, "{}", hex::encode(self.0)) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /docs/changelogs/v0.2.0.md: -------------------------------------------------------------------------------- 1 | ## 0.2.0 2 | 3 | This release includes a database migration, so make sure to back up your index before upgrading. 4 | 5 | Features: 6 | - Transfers have been removed, and names have been limited to 43 characters for vesion `0x00`. They will be enabled in the next version with a better designed. 7 | - Primal.net is now used to npub links. 8 | - New page to list blockchain claims for which there are no indexed record events. 9 | - Index statistic page. 10 | 11 | Bugs: 12 | - Fixed a bug where a name was double-indexed because the same `OP_RETURN` was uploaded twice 13 | 14 | ## 0.2.0 15 | 16 | This release includes a database migration, so make sure to back up your index before upgrading. 17 | 18 | Features: 19 | - Transfers have been removed, and names have been limited to 43 characters for vesion `0x00`. They will be enabled in the next version with a better designed. 20 | - Primal.net is now used to npub links. 21 | - New page to list blockchain claims for which there are no indexed record events. 22 | - Index statistic page. 23 | 24 | Bugs: 25 | - Fixed a bug where a name was double-indexed because the same `OP_RETURN` was uploaded twice 26 | 27 | ## 0.1.1 28 | 29 | Features: 30 | - Explorer now links to a name instead of a NSID. This simply makes it easier for a something to be bookmarked, even after a transfer. 31 | - Explorer web UI and CLI both automatically capitalizes the keys in records now. 32 | - Name page: Update Records link added, which automatically preloads data for user to update, including most recent record set. 33 | - Name page: Blockhash and Txid link to block explorer mempool.space. 34 | - Name page: Links for different record types. For example, `WEB` record links to actual webpage. 35 | - Name page: MOTD records now have a little but of decorative quoting. 36 | - The Search bar strips whitespace. 37 | 38 | Bugs: 39 | - Indexer will not longer stop randomly. 40 | 41 | Other: 42 | - Added `WEB` record type to spec. 43 | - Changes "New Records" to "Update Records" everywhere. 44 | - More detailed help instructions. 45 | 46 | ## 0.1.0 47 | 48 | - Initial release. -------------------------------------------------------------------------------- /nomen/src/main.rs: -------------------------------------------------------------------------------- 1 | #![warn( 2 | clippy::suspicious, 3 | clippy::complexity, 4 | clippy::perf, 5 | clippy::style, 6 | clippy::pedantic 7 | )] 8 | #![allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] 9 | 10 | mod config; 11 | mod db; 12 | mod subcommands; 13 | mod util; 14 | 15 | use anyhow::bail; 16 | use clap::Parser; 17 | 18 | use config::Config; 19 | 20 | #[tokio::main] 21 | async fn main() -> anyhow::Result<()> { 22 | // No log output by default 23 | if std::env::var("RUST_LOG").is_err() { 24 | std::env::set_var("RUST_LOG", "off"); 25 | } 26 | 27 | tracing_subscriber::fmt::init(); 28 | let config = parse_config()?; 29 | 30 | let pool = db::initialize(&config).await?; 31 | 32 | match &config.cli.subcommand { 33 | config::Subcommand::Init => subcommands::init()?, 34 | config::Subcommand::Index => subcommands::index(&config).await?, 35 | config::Subcommand::Server => subcommands::start(&config, &pool).await?, 36 | config::Subcommand::Reindex { blockheight } => { 37 | subcommands::reindex(&config, &pool, blockheight.unwrap_or_default()).await?; 38 | } 39 | config::Subcommand::Rescan { blockheight } => { 40 | subcommands::rescan(&config, &pool, blockheight.unwrap_or_default()).await?; 41 | } 42 | config::Subcommand::Rebroadcast => { 43 | subcommands::rebroadcast(&config, &pool).await?; 44 | } 45 | config::Subcommand::Publish => subcommands::publish(&config, &pool).await?, 46 | config::Subcommand::Version => { 47 | subcommands::version(); 48 | } 49 | } 50 | 51 | Ok(()) 52 | } 53 | 54 | fn parse_config() -> anyhow::Result { 55 | let cli = config::Cli::parse(); 56 | 57 | let file = if cli.config.is_file() { 58 | let config_str = std::fs::read_to_string(&cli.config)?; 59 | 60 | toml::from_str(&config_str)? 61 | } else { 62 | tracing::error!("Config file not found."); 63 | bail!("Missing config file.") 64 | }; 65 | 66 | let config = Config::new(cli, file); 67 | config.validate()?; 68 | 69 | tracing::debug!("Config loaded: {config:?}"); 70 | 71 | Ok(config) 72 | } 73 | -------------------------------------------------------------------------------- /nomen/src/subcommands/index/events/records.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use nomen_core::NameKind; 4 | use nostr_sdk::{Event, Filter}; 5 | use sqlx::SqlitePool; 6 | 7 | use crate::{config::Config, db, subcommands::index::events::EventData}; 8 | 9 | pub async fn records(config: &Config, pool: &SqlitePool) -> anyhow::Result<()> { 10 | tracing::info!("Beginning indexing record events."); 11 | let events = latest_events(config, pool).await?; 12 | for event in events { 13 | match EventData::from_event(&event) { 14 | Ok(ed) => save_event(pool, ed).await?, 15 | Err(err) => tracing::debug!("Invalid event: {err}"), 16 | } 17 | } 18 | 19 | tracing::info!("Records events indexing complete."); 20 | Ok(()) 21 | } 22 | 23 | async fn save_event(pool: &SqlitePool, ed: EventData) -> anyhow::Result<()> { 24 | tracing::info!("Saving valid event {}", ed.event_id); 25 | let EventData { 26 | event_id, 27 | fingerprint, 28 | nsid: _, 29 | calculated_nsid, 30 | pubkey, 31 | name, 32 | created_at, 33 | raw_content, 34 | records: _, 35 | raw_event, 36 | } = ed; 37 | db::name::insert_name_event( 38 | pool, 39 | name.clone(), 40 | fingerprint, 41 | calculated_nsid, 42 | pubkey, 43 | created_at, 44 | event_id, 45 | raw_content, 46 | raw_event, 47 | ) 48 | .await?; 49 | 50 | db::index::update_v0_index(pool, name.as_ref(), &pubkey, calculated_nsid).await?; 51 | 52 | db::relay_index::queue(pool, name.as_ref()).await?; 53 | 54 | Ok(()) 55 | } 56 | 57 | async fn latest_events( 58 | config: &Config, 59 | pool: &sqlx::Pool, 60 | ) -> anyhow::Result> { 61 | let records_time = db::name::last_records_time(pool).await? + 1; 62 | let filter = Filter::new() 63 | .kind(NameKind::Name.into()) 64 | .since(records_time.into()); 65 | 66 | let (_keys, client) = config.nostr_random_client().await?; 67 | let events = client 68 | .get_events_of(vec![filter], Some(Duration::from_secs(10))) 69 | .await?; 70 | client.disconnect().await?; 71 | Ok(events) 72 | } 73 | -------------------------------------------------------------------------------- /nomen_core/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | mod create; 4 | mod extractor; 5 | mod hash160; 6 | mod kind; 7 | mod name; 8 | mod nsid; 9 | mod nsid_builder; 10 | mod transfer; 11 | 12 | pub use create::*; 13 | pub use extractor::*; 14 | pub use hash160::*; 15 | pub use kind::*; 16 | pub use name::*; 17 | pub use nsid::*; 18 | pub use nsid_builder::*; 19 | pub use transfer::*; 20 | 21 | #[derive(thiserror::Error, Debug)] 22 | pub enum UtilError { 23 | #[error("not a nomen transaction")] 24 | NotNomenError, 25 | #[error("unsupported nomen version")] 26 | UnsupportedNomenVersion, 27 | #[error("unexpectex tx type")] 28 | UnexpectedNomenTxType, 29 | #[error("name validation")] 30 | NameValidation, 31 | #[error("unknown nomen kind: {:?}", .0)] 32 | NomenKind(String), 33 | #[error("invalid Key=Value")] 34 | InvalidKeyVal(String), 35 | #[error("invalid event kind")] 36 | InvalidEventKind(nostr_sdk::Kind), 37 | #[error("nostr event signing error")] 38 | UnsignedEventError(#[from] nostr_sdk::event::unsigned::Error), 39 | #[error("slice conversion")] 40 | TryFromSliceError(#[from] std::array::TryFromSliceError), 41 | #[error("hex conversion")] 42 | HexDecode(#[from] hex::FromHexError), 43 | #[error("nostr key")] 44 | NostrKeyError(#[from] nostr_sdk::key::Error), 45 | #[error("regex")] 46 | RegexError(#[from] regex::Error), 47 | #[error("secp256k1")] 48 | Secp256k1Error(#[from] secp256k1::Error), 49 | #[error("string error")] 50 | StringError(#[from] std::string::FromUtf8Error), 51 | #[error(transparent)] 52 | ExtractorError(#[from] ExtractorError), 53 | } 54 | 55 | pub enum NameKind { 56 | Name = 38300, 57 | } 58 | 59 | impl From for nostr_sdk::Kind { 60 | fn from(value: NameKind) -> Self { 61 | nostr_sdk::Kind::ParameterizedReplaceable(value as u16) 62 | } 63 | } 64 | 65 | impl TryFrom for NameKind { 66 | type Error = UtilError; 67 | 68 | fn try_from(value: nostr_sdk::Kind) -> Result { 69 | let nk = match value { 70 | nostr_sdk::Kind::ParameterizedReplaceable(38300) => NameKind::Name, 71 | _ => return Err(UtilError::InvalidEventKind(value)), 72 | }; 73 | Ok(nk) 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /nomen_core/src/hash160.rs: -------------------------------------------------------------------------------- 1 | use ripemd::{Digest, Ripemd160}; 2 | use sha2::Sha256; 3 | 4 | #[derive(Default)] 5 | pub struct Hash160 { 6 | hasher: Sha256, 7 | } 8 | 9 | #[allow(unused)] 10 | impl Hash160 { 11 | pub fn update(&mut self, data: &[u8]) { 12 | self.hasher.update(data); 13 | } 14 | 15 | pub fn chain_update(mut self, data: &[u8]) -> Hash160 { 16 | self.update(data); 17 | self 18 | } 19 | 20 | #[allow(dead_code)] 21 | pub fn chain_optional(mut self, data: &Option<&[u8]>) -> Hash160 { 22 | if let Some(data) = data { 23 | self.update(data); 24 | } 25 | self 26 | } 27 | 28 | pub fn finalize(self) -> [u8; 20] { 29 | let f = self.hasher.finalize(); 30 | Ripemd160::digest(f) 31 | .try_into() 32 | .expect("Hash160 struct should return 20 bytes") 33 | } 34 | 35 | pub fn fingerprint(self) -> [u8; 5] { 36 | let h = self.finalize(); 37 | h[..5].try_into().unwrap() 38 | } 39 | 40 | pub fn digest(data: &[u8]) -> [u8; 20] { 41 | Hash160::default().chain_update(data).finalize() 42 | } 43 | 44 | pub fn digest_slices(data: &[&[u8]]) -> [u8; 20] { 45 | data.iter() 46 | .fold(Hash160::default(), |acc, d| acc.chain_update(d)) 47 | .finalize() 48 | } 49 | } 50 | 51 | #[cfg(test)] 52 | mod tests { 53 | 54 | use super::*; 55 | 56 | #[test] 57 | fn test_update() { 58 | let mut h = Hash160::default(); 59 | h.update(b"hello"); 60 | let d = hex::encode(h.finalize()); 61 | assert_eq!(d, "b6a9c8c230722b7c748331a8b450f05566dc7d0f"); 62 | } 63 | 64 | #[test] 65 | fn test_fingerprint() { 66 | let mut h = Hash160::default(); 67 | h.update(b"hello"); 68 | let d = hex::encode(h.fingerprint()); 69 | assert_eq!(d, "b6a9c8c230"); 70 | } 71 | 72 | #[test] 73 | fn test_digest() { 74 | assert_eq!( 75 | hex::encode(Hash160::digest(b"hello")), 76 | "b6a9c8c230722b7c748331a8b450f05566dc7d0f" 77 | ); 78 | } 79 | 80 | #[test] 81 | fn test_digest_slices() { 82 | let hashed = hex::encode(Hash160::digest_slices(&[b"hello", b"world"])); 83 | assert_eq!(hashed, "b36c87f1c6d9182eb826d7d987f9081adf15b772"); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /nomen_core/src/transfer.rs: -------------------------------------------------------------------------------- 1 | use crate::NomenKind; 2 | use nostr_sdk::{EventId, UnsignedEvent}; 3 | use secp256k1::{schnorr::Signature, XOnlyPublicKey}; 4 | 5 | use super::{SignatureV1, TransferV1}; 6 | 7 | pub struct TransferBuilder<'a> { 8 | pub new_pubkey: &'a XOnlyPublicKey, 9 | pub name: &'a str, 10 | } 11 | 12 | impl<'a> TransferBuilder<'a> { 13 | pub fn transfer_op_return(&self) -> Vec { 14 | TransferV1 { 15 | pubkey: *self.new_pubkey, 16 | name: self.name.to_string(), 17 | } 18 | .serialize() 19 | } 20 | 21 | pub fn unsigned_event(&self, prev_owner: &XOnlyPublicKey) -> nostr_sdk::UnsignedEvent { 22 | let created_at = 1u64.into(); 23 | let kind: nostr_sdk::Kind = 1u64.into(); 24 | let content = format!("{}{}", hex::encode(prev_owner.serialize()), self.name); 25 | let id = EventId::new(prev_owner, created_at, &kind, &[], &content); 26 | 27 | UnsignedEvent { 28 | id, 29 | pubkey: *prev_owner, 30 | created_at, 31 | kind, 32 | tags: vec![], 33 | content, 34 | } 35 | } 36 | 37 | pub fn signature_op_return(&self, keys: nostr_sdk::Keys) -> Result, super::UtilError> { 38 | let unsigned_event = self.unsigned_event(&keys.public_key()); 39 | let event = unsigned_event.sign(&keys)?; 40 | Ok(SignatureV1 { 41 | signature: event.sig, 42 | } 43 | .serialize()) 44 | } 45 | 46 | pub fn signature_provided_op_return(&self, signature: Signature) -> Vec { 47 | SignatureV1 { signature }.serialize() 48 | } 49 | } 50 | 51 | #[cfg(test)] 52 | mod tests { 53 | use std::str::FromStr; 54 | 55 | use super::*; 56 | 57 | #[test] 58 | fn test_op_returns() { 59 | let new_pubkey = XOnlyPublicKey::from_str( 60 | "74301b9c5d30b764bca8d3eb4febb06862f558d292fde93b4a290d90850bac91", 61 | ) 62 | .unwrap(); 63 | let tb = TransferBuilder { 64 | new_pubkey: &new_pubkey, 65 | name: "hello-world", 66 | }; 67 | 68 | assert_eq!(hex::encode(tb.transfer_op_return()), "4e4f4d010174301b9c5d30b764bca8d3eb4febb06862f558d292fde93b4a290d90850bac9168656c6c6f2d776f726c64"); 69 | 70 | // Signatures are not consistent, so they can't really be tested here. 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /nomen/src/subcommands/mod.rs: -------------------------------------------------------------------------------- 1 | mod index; 2 | mod server; 3 | pub mod util; 4 | 5 | pub use index::*; 6 | use nostr_sdk::Event; 7 | pub use server::*; 8 | use sqlx::SqlitePool; 9 | 10 | use crate::{ 11 | config::{Config, ConfigFile}, 12 | db, 13 | }; 14 | 15 | pub(crate) fn init() -> anyhow::Result<()> { 16 | let config_file = ConfigFile::example(); 17 | let cfg = toml::to_string(&config_file)?; 18 | println!("{cfg} "); 19 | Ok(()) 20 | } 21 | 22 | pub(crate) async fn reindex( 23 | _config: &Config, 24 | pool: &SqlitePool, 25 | blockheight: i64, 26 | ) -> anyhow::Result<()> { 27 | println!("Re-indexing blockchain from blockheight {blockheight}."); 28 | db::index::reindex(pool, blockheight).await?; 29 | Ok(()) 30 | } 31 | 32 | pub(crate) async fn rescan( 33 | _config: &Config, 34 | pool: &SqlitePool, 35 | blockheight: i64, 36 | ) -> anyhow::Result<()> { 37 | println!("Re-scanning blockchain from blockheight {blockheight}."); 38 | db::index::reindex(pool, blockheight).await?; 39 | sqlx::query("DELETE FROM index_height WHERE blockheight >= ?;") 40 | .bind(blockheight) 41 | .execute(pool) 42 | .await?; 43 | sqlx::query("DELETE FROM raw_blockchain WHERE blockheight >= ?;") 44 | .bind(blockheight) 45 | .execute(pool) 46 | .await?; 47 | 48 | Ok(()) 49 | } 50 | 51 | pub(crate) fn version() { 52 | let version = env!("CARGO_PKG_VERSION"); 53 | println!("Current version is {version}"); 54 | } 55 | 56 | pub(crate) async fn rebroadcast(config: &Config, pool: &SqlitePool) -> anyhow::Result<()> { 57 | let events = sqlx::query_as::<_, (String,)>( 58 | "select ne.raw_event from valid_names_vw vn join name_events ne on vn.nsid = ne.nsid;", 59 | ) 60 | .fetch_all(pool) 61 | .await?; 62 | println!( 63 | "Rebroadcasing {} events to {} relays", 64 | events.len(), 65 | config.relays().len() 66 | ); 67 | let (_, client) = config.nostr_random_client().await?; 68 | for (event,) in events { 69 | let event = Event::from_json(event)?; 70 | client.send_event(event).await?; 71 | } 72 | 73 | Ok(()) 74 | } 75 | 76 | pub(crate) async fn publish(config: &Config, pool: &SqlitePool) -> anyhow::Result<()> { 77 | println!("Publishing full relay index"); 78 | index::events::relay_index::publish(config, pool, false).await 79 | } 80 | -------------------------------------------------------------------------------- /nomen-cli/src/main.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | mod nostr; 4 | 5 | use clap::Parser; 6 | use nomen_core::TransferBuilder; 7 | use nostr::{Npub, Nsec}; 8 | use nostr_sdk::{Keys, ToBech32, UnsignedEvent}; 9 | use secp256k1::{Secp256k1, SecretKey, XOnlyPublicKey}; 10 | 11 | pub fn main() -> anyhow::Result<()> { 12 | let ops = Ops::parse(); 13 | 14 | handle_ops(ops)?; 15 | 16 | Ok(()) 17 | } 18 | 19 | fn handle_ops(ops: Ops) -> anyhow::Result<()> { 20 | match ops.command { 21 | Commands::Keys { pubkey, nostr } => cmd_keys(pubkey, nostr)?, 22 | Commands::Transfer { old, new, name } => cmd_transfer(old, new, name)?, 23 | } 24 | 25 | Ok(()) 26 | } 27 | 28 | fn cmd_keys(pubkey: bool, nostr: bool) -> anyhow::Result<()> { 29 | let keys = nostr_sdk::Keys::generate(); 30 | let (sk, pk) = if nostr { 31 | ( 32 | keys.secret_key()?.to_bech32()?, 33 | keys.public_key().to_bech32()?, 34 | ) 35 | } else { 36 | ( 37 | keys.secret_key()?.display_secret().to_string(), 38 | keys.public_key().to_string(), 39 | ) 40 | }; 41 | println!("SK: {sk}"); 42 | if pubkey { 43 | println!("PK: {pk}"); 44 | } 45 | Ok(()) 46 | } 47 | 48 | fn cmd_transfer(old: Nsec, new: Npub, name: String) -> anyhow::Result<()> { 49 | let tb = TransferBuilder { 50 | new_pubkey: new.as_ref(), 51 | name: &name, 52 | }; 53 | let keys = nostr_sdk::Keys::new(*old.as_ref()); 54 | let or1 = tb.transfer_op_return(); 55 | let or2 = tb.signature_op_return(keys)?; 56 | println!("{}\n{}", hex::encode(or1), hex::encode(or2)); 57 | Ok(()) 58 | } 59 | 60 | #[derive(clap::Parser)] 61 | struct Ops { 62 | #[command(subcommand)] 63 | command: Commands, 64 | } 65 | 66 | #[derive(clap::Subcommand)] 67 | enum Commands { 68 | /// Generate Schnorr keypairs. 69 | Keys { 70 | #[arg(short, long)] 71 | pubkey: bool, 72 | 73 | #[arg(short, long)] 74 | nostr: bool, 75 | }, 76 | 77 | /// Generate properly formatted OP_RETURNs for a name transfer. 78 | Transfer { 79 | /// Hex-encoded or bech32 (nsec) secret key for the current (previous) owner 80 | old: Nsec, 81 | 82 | /// Hex-encodced or bech32 (npub) public key for the new owner 83 | new: Npub, 84 | 85 | /// Name to transfer 86 | name: String, 87 | }, 88 | } 89 | -------------------------------------------------------------------------------- /nomen/src/subcommands/index/events/relay_index.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use nostr_sdk::{EventBuilder, Keys, Tag}; 4 | use secp256k1::SecretKey; 5 | use serde::Serialize; 6 | use sqlx::SqlitePool; 7 | 8 | use crate::{ 9 | config::Config, 10 | db::{self, relay_index::Name}, 11 | }; 12 | 13 | pub async fn publish(config: &Config, pool: &SqlitePool, use_queue: bool) -> anyhow::Result<()> { 14 | if !config.publish_index() { 15 | return Ok(()); 16 | } 17 | let sk: SecretKey = config 18 | .secret_key() 19 | .expect("Missing config validation for secret") 20 | .into(); 21 | let keys = Keys::new(sk); 22 | let (_, client) = config.nostr_random_client().await?; 23 | 24 | tracing::info!("Publishing relay index."); 25 | let names = if use_queue { 26 | db::relay_index::fetch_all_queued(pool).await? 27 | } else { 28 | db::relay_index::fetch_all(pool).await? 29 | }; 30 | send_events(pool, names, keys, &client).await?; 31 | tracing::info!("Publishing relay index complete."); 32 | 33 | client.disconnect().await.ok(); 34 | Ok(()) 35 | } 36 | 37 | async fn send_events( 38 | conn: &SqlitePool, 39 | names: Vec, 40 | keys: Keys, 41 | client: &nostr_sdk::Client, 42 | ) -> Result<(), anyhow::Error> { 43 | for name in names { 44 | let records: HashMap = serde_json::from_str(&name.records)?; 45 | let content = Content { 46 | name: name.name.clone(), 47 | pubkey: name.pubkey, 48 | records, 49 | }; 50 | let content_serialize = serde_json::to_string(&content)?; 51 | let event = EventBuilder::new( 52 | nostr_sdk::Kind::ParameterizedReplaceable(38301), 53 | content_serialize, 54 | &[Tag::Identifier(name.name.clone())], 55 | ) 56 | .to_event(&keys)?; 57 | 58 | match client.send_event(event.clone()).await { 59 | Ok(s) => { 60 | tracing::info!("Broadcast event id {s}"); 61 | db::relay_index::delete(conn, &name.name).await?; 62 | } 63 | Err(e) => { 64 | tracing::error!( 65 | "Unable to broadcast event {} during relay index publish: {e}", 66 | event.id 67 | ); 68 | } 69 | } 70 | } 71 | Ok(()) 72 | } 73 | 74 | #[derive(Serialize)] 75 | struct Content { 76 | name: String, 77 | pubkey: String, 78 | records: HashMap, 79 | } 80 | -------------------------------------------------------------------------------- /nomen/templates/updaterecords.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 | 5 | 6 |
7 |

Update Records

8 | {% if !unsigned_event.is_empty() %} 9 | 10 |

The following event was created. You can use a NIP-07 browser extension to sign and broadcast this event, using the 11 | same keypair that was used to register the name on the blockchain.

12 | 13 |
{{ unsigned_event }}
14 | 15 |

16 | 17 |

18 | 19 |
20 |
21 | 22 | 52 | 53 | {% else %} 54 | 55 |
56 |

57 | 58 | 59 |

60 | 61 |

62 | 65 | 66 |

67 | 68 | {% include "pubkey.html" %} 69 | 70 |

71 | 72 |

73 |
74 | 75 | {% endif %} 76 |
77 | {% endblock %} -------------------------------------------------------------------------------- /nomen/src/subcommands/index/events/event_data.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use anyhow::bail; 4 | use nomen_core::{EventExtractor, Hash160, Name, Nsid}; 5 | use nostr_sdk::{Event, EventId}; 6 | use secp256k1::XOnlyPublicKey; 7 | 8 | #[derive(Debug, Clone)] 9 | pub struct EventData { 10 | pub event_id: EventId, 11 | pub fingerprint: [u8; 5], 12 | pub nsid: Nsid, 13 | pub calculated_nsid: Nsid, 14 | pub pubkey: XOnlyPublicKey, 15 | pub name: Name, 16 | pub created_at: i64, 17 | pub raw_content: String, 18 | pub records: Option>, 19 | pub raw_event: String, 20 | } 21 | 22 | impl EventData { 23 | pub fn from_event(event: &Event) -> anyhow::Result { 24 | let nsid = event.extract_nsid()?; 25 | let calculated_nsid = event.clone().try_into()?; 26 | let name = event.extract_name()?; 27 | let fingerprint = Hash160::default() 28 | .chain_update(name.as_bytes()) 29 | .fingerprint(); 30 | let records = event.extract_records().ok(); 31 | let raw_event = serde_json::to_string(event)?; 32 | 33 | Ok(EventData { 34 | event_id: event.id, 35 | fingerprint, 36 | nsid, 37 | calculated_nsid, 38 | pubkey: event.pubkey, 39 | name: name.parse()?, 40 | created_at: event.created_at.as_i64(), 41 | raw_content: event.content.clone(), 42 | records, 43 | raw_event, 44 | }) 45 | } 46 | 47 | #[allow(unused)] 48 | pub fn validate(&self) -> anyhow::Result<()> { 49 | if self.nsid != self.calculated_nsid { 50 | bail!("Invalid nsid") 51 | } 52 | Ok(()) 53 | } 54 | } 55 | 56 | #[cfg(test)] 57 | mod tests { 58 | use nomen_core::Nsid; 59 | 60 | use super::*; 61 | 62 | #[test] 63 | fn test_event_data() { 64 | let event = r#"{"id":"4fb5485ad12706f3ddbde1cdeab3199fcbef01b4c2456a7420ef5acb400d29e5","pubkey":"d57b873363d2233d3cd54453416deff9546df50d963bb1208da37f10a4c23d6f","created_at":1682476154,"kind":38300,"tags":[["d","28d63a9a61c6c5ce6be37a830105c92cf7a8f365"],["nom","smith"]],"content":"{\"IP4\":\"127.0.0.1\",\"NPUB\":\"npub1234\"}","sig":"53a629c8169c29abc971653b71ebf8ceb185735170b702dd48377a3336819680577ef28a257b8e4db5e8101531232e1c886a35721b5af1399c32cb526fd61bb6"}"#; 65 | let event = Event::from_json(event).unwrap(); 66 | let mut ed: EventData = EventData::from_event(&event).unwrap(); 67 | assert!(ed.validate().is_ok()); 68 | 69 | ed.nsid = Nsid::from_slice(&[0; 20]).unwrap(); 70 | assert!(ed.validate().is_err()); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /nomen/src/config/config_file.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use bitcoin::Network; 4 | use nostr_sdk::Keys; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | use crate::util::Nsec; 8 | 9 | #[derive(Debug, Serialize, Deserialize, Clone, Default)] 10 | pub struct ServerConfig { 11 | pub bind: Option, 12 | pub explorer: Option, 13 | pub api: Option, 14 | pub indexer: Option, 15 | pub indexer_delay: Option, 16 | pub confirmations: Option, 17 | } 18 | impl ServerConfig { 19 | fn example() -> ServerConfig { 20 | ServerConfig { 21 | bind: Some("0.0.0.0:8080".into()), 22 | explorer: Some(true), 23 | api: Some(true), 24 | indexer: Some(true), 25 | indexer_delay: Some(30), 26 | confirmations: Some(3), 27 | } 28 | } 29 | } 30 | 31 | #[derive(Debug, Serialize, Deserialize, Clone, Default)] 32 | pub struct RpcConfig { 33 | pub cookie: Option, 34 | pub user: Option, 35 | pub password: Option, 36 | pub host: Option, 37 | pub port: Option, 38 | pub network: Option, 39 | } 40 | impl RpcConfig { 41 | fn example() -> RpcConfig { 42 | RpcConfig { 43 | cookie: Some("path/to/cookie/file".into()), 44 | user: Some("rpc username".into()), 45 | password: Some("rpc password".into()), 46 | host: Some("localhost".into()), 47 | port: Some(8441), 48 | network: Some(Network::Bitcoin), 49 | } 50 | } 51 | } 52 | 53 | #[derive(Debug, Serialize, Deserialize, Clone, Default)] 54 | pub struct NostrConfig { 55 | pub relays: Option>, 56 | pub secret: Option, 57 | pub publish: Option, 58 | pub well_known: Option, 59 | } 60 | impl NostrConfig { 61 | fn example() -> NostrConfig { 62 | NostrConfig { 63 | relays: Some(vec!["wss://relay.damus.io".into()]), 64 | secret: Keys::generate() 65 | .secret_key() 66 | .ok() 67 | .map(std::convert::Into::into), 68 | publish: Some(true), 69 | well_known: Some(true), 70 | } 71 | } 72 | } 73 | 74 | #[derive(Debug, Serialize, Deserialize, Clone, Default)] 75 | pub struct ConfigFile { 76 | pub data: Option, 77 | pub nostr: NostrConfig, 78 | pub server: ServerConfig, 79 | pub rpc: RpcConfig, 80 | } 81 | 82 | impl ConfigFile { 83 | pub fn example() -> ConfigFile { 84 | ConfigFile { 85 | data: Some("nomen.db".into()), 86 | nostr: NostrConfig::example(), 87 | server: ServerConfig::example(), 88 | rpc: RpcConfig::example(), 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /nomen/templates/name.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 |

{{ name }}

6 | 7 |

Blockchain Info

8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 44 | 45 | 46 | {% if v1_upgrade_blockheight.is_some() %} 47 | 48 | 49 | 50 | 51 | {% endif %} 52 | 53 | {% if v1_upgrade_txid.is_some() %} 54 | {% let txid = v1_upgrade_txid.clone().unwrap() %} 55 | 56 | 57 | 58 | 59 | {% endif %} 60 | 61 |
Blockhash{{ blockhash }}
Block Height{{ height }}
Txid{{ txid }}
Vout{{ vout }}
Blocktime{{ blocktime }}
Owner (pubkey){{ pubkey }}
Protocol Version 41 | {{ protocol }} 42 | {% if protocol == 0 %}Upgrade to v1{% endif %} 43 |
Upgrade Blockheight{{ v1_upgrade_blockheight.unwrap() }}
Upgrade Txid{{ txid }}
62 | 63 |

Records

64 | 65 |

Update Records

66 | 67 | {% if records.is_empty() %} 68 |

No records found.

69 | {% else %} 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | {% for key in record_keys %} 80 | 81 | 82 | {% if key == "WEB" %} 83 | 84 | {% else if key == "NPUB" %} 85 | 86 | {% else if key == "TWITTER" %} 87 | 88 | {% else if key == "MOTD" %} 89 | 92 | {% else %} 93 | 94 | {% endif %} 95 | 96 | {% endfor %} 97 | 98 |
Record TypeValue
{{ key }}{{ records[key] }}{{ records[key] }}{{ records[key] }} 90 | "{{ records[key] }}" 91 | {{ records[key] }}
99 | {% endif %} 100 |
101 | {% endblock %} -------------------------------------------------------------------------------- /nomen/templates/newname.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 |

{% if upgrade %}Upgrade{% else %}New{% endif %} Name

6 | 7 | {% if !data.is_empty() %} 8 |

{% if is_psbt %}Sign and broadcast this transaction with your Bitcoin wallet{% else %}Broadcast a transaction with 9 | this OP_RETURN data{% endif %}:

10 |
{{ data }}
11 | 12 | 13 |

After signing and transmitting the transaction, setup 14 | your records for the indexer to property index your new name.

15 | 16 | 28 | 29 | {% else %} 30 | 31 | {% if upgrade %} 32 |

33 | You can upgrade an old-style v0 name to v1 by simply recreating it. As long as the name and pubkey match, the 34 | protocol will treat it as an upgrade. 35 |

36 | {% endif %} 37 | 38 |

39 | You have two options: 40 |

    41 |
  1. Create an unsigned PSBT (partially signed Bitcoin transaction) and paste it below. This will modify the PSBT by 42 | adding an 43 | additional zero value OP_RETURN output. Make sure to slightly over-estimate the fee to account for 44 | the bit of extra data, and check the transaction before you sign and broadcast it!
  2. 45 |
  3. Leave the PSBT field blank, and you will be given a hex-encoded OP_RETURN value which you can use 46 | in a Bitcoin wallet of your choice which supports it (Bitcoin Core, Electrum, etc).
  4. 47 |
48 |

49 | 50 |

51 | Once it is mined and has {{ confirmations }} confirmations, it will be indexed. 52 |

53 | 54 |

55 | In order for the indexer to properly index your name, you also need to send your records after you broadcast your 56 | transaction! 57 | You can comeback anytime and click on Update Records in the navigation menu. 58 |

59 | 60 |
61 | 62 |

63 | 64 | 65 |

66 | 67 |

68 | 69 | 70 |

71 | 72 | {% include "pubkey.html" %} 73 | 74 |

75 | 76 |

77 |
78 | 79 | 80 | 81 | {% endif %} 82 |
83 | 84 | {% endblock %} -------------------------------------------------------------------------------- /nomen_core/src/extractor.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use itertools::Itertools; 4 | use nostr_sdk::Event; 5 | use secp256k1::XOnlyPublicKey; 6 | 7 | use super::Nsid; 8 | 9 | #[derive(thiserror::Error, Debug)] 10 | #[error("event extractor")] 11 | pub struct ExtractorError; 12 | 13 | pub trait EventExtractor { 14 | fn extract_children(&self, name: &str) 15 | -> Result, ExtractorError>; 16 | fn extract_records(&self) -> Result, ExtractorError>; 17 | fn extract_name(&self) -> Result; 18 | fn extract_nsid(&self) -> Result; 19 | fn extract_prev_nsid(&self) -> Result, ExtractorError>; 20 | } 21 | 22 | impl EventExtractor for Event { 23 | fn extract_children( 24 | &self, 25 | name: &str, 26 | ) -> Result, ExtractorError> { 27 | let s: Vec<(String, XOnlyPublicKey)> = 28 | serde_json::from_str(&self.content).or(Err(ExtractorError))?; 29 | let children = s 30 | .into_iter() 31 | .map(|(n, pk)| (format!("{n}.{name}"), pk)) 32 | .collect_vec(); 33 | Ok(children) 34 | } 35 | 36 | fn extract_records(&self) -> Result, ExtractorError> { 37 | serde_json::from_str(&self.content).or(Err(ExtractorError)) 38 | } 39 | 40 | fn extract_name(&self) -> Result { 41 | self.tags 42 | .iter() 43 | .filter_map(|t| match t { 44 | nostr_sdk::Tag::Generic(tk, values) => match tk { 45 | nostr_sdk::prelude::TagKind::Custom(tn) if tn == "nom" => { 46 | Some(values.iter().next()?.clone()) 47 | } 48 | _ => None, 49 | }, 50 | _ => None, 51 | }) 52 | .next() 53 | .ok_or(ExtractorError) 54 | } 55 | 56 | fn extract_nsid(&self) -> Result { 57 | self.tags 58 | .iter() 59 | .filter_map(|t| match t { 60 | nostr_sdk::Tag::Identifier(id) => Some(id.clone()), 61 | _ => None, 62 | }) 63 | .next() 64 | .ok_or(ExtractorError)? 65 | .parse() 66 | .or(Err(ExtractorError)) 67 | } 68 | 69 | fn extract_prev_nsid(&self) -> Result, ExtractorError> { 70 | let nsid = self 71 | .tags 72 | .iter() 73 | .find_map(|t| match t { 74 | nostr_sdk::Tag::Generic(tk, values) => match tk { 75 | nostr_sdk::prelude::TagKind::Custom(tn) if tn == "nom" => { 76 | Some(values.get(1)?.clone()) 77 | } 78 | _ => None, 79 | }, 80 | _ => None, 81 | }) 82 | .and_then(|s| s.parse::().ok()); 83 | Ok(nsid) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /docs/FAQ.md: -------------------------------------------------------------------------------- 1 | # Frequently Asked Questions 2 | 3 | ## What is Nomen? 4 | 5 | Nomen is a protocol to create a name system akin to DNS, but open, permissionless and self-sovereign. It's open, because it's all developed in the open. It's permissionless, because no one can stop you from having a name. It's self-sovereign because you can run it yourself, so no one can gatekeep your access to the protocol. 6 | 7 | ## Why Bitcoin? Why Nostr? 8 | 9 | Blockchains provide decentralized guarantees on the ordering of events. One thing that needs those guarantees is money, but another thing could be argued to be identity. In this case, human-readable identity. Human-readable globally unique names are a limited quantity. In order to hand them out fairly, you traditionally needed a central authority like ICANN. But central authorities are corruptible. They can censor. The Bitcoin network is the most secure decentralized ordering mechanism on earth. We can use this to order claims to identities in the most fair way possible: first come, first serve. 10 | 11 | However, not all data can reasonably fit on the blockchain. The limited space means that a secondary protocol is needed to distribute the majority of the data. This is where Nostr comes in. Nostr is an open protocol with existing network effects that is perfect for this. 12 | 13 | ## How does it work? 14 | 15 | It's actually very simple. Claims to identities are published to the Bitcoin blockchain. The claims are very small, only a single OP_RETURN output (80 bytes or less). Events are then published to Nostr relays that contain teh records you want to associate with your name (npub, web address, twitter handle, etc). Per the protocol, even if two individuals claim the same identifier, the Bitcoin blockchain guarantees that one will be first, and thus valid. 16 | 17 | ## Why not inscriptions? 18 | 19 | There are two parts of inscription that might have been used: Sat tracking and the inscription envelope. 20 | 21 | Sat tracking could have been used to allow transferring the name, just like Ordinals inscriptions. This adds complexity for the end user and developer because then you have to build a wallet with special coin tracking, or make sure that the user understands to be extremely careful with their UTXOs. Additionally, this is intended to be a "Nostr-native" protocol, and that means it's the Bitcoin keys that own the name, not Nostr keys. While it's true that Nostr keys can be used as Bitcoin keys in taproot addresses, all of the added complexity of trying to make it all work wasn't worth it. 22 | 23 | Inscription envelopes could have been useful. Instead of using OP_RETURN for putting the data on chain, we could have used an inscription envelope containing the name and ownership info. But inscription envelopes are really more useful for stuffing lots of data on chain. As it stands, OP_RETURN allows enough space for Nomen to include a pubkey for ownership data, and still leave 43 bytes for the name. Unlike NFTs, no one WANTS a long name. Short names are more desirable, so OP_RETURN is not only much simpler (inscriptions always require two transactions), but it is also the "official" and blessed way of putting data on chain. -------------------------------------------------------------------------------- /docs/API.md: -------------------------------------------------------------------------------- 1 | # Nomen Indexer REST API 2 | 3 | The Nomen indexer has a rest API that some may find useful. This API is currently experimental and subject to change. 4 | 5 | ## Errors 6 | 7 | All methods return `200` on success, otherwise they return `400` on an error, which the JSON response: 8 | 9 | ```json 10 | { 11 | "error": "" 12 | } 13 | ``` 14 | 15 | ## Methods 16 | 17 | ### `GET /api/names` 18 | 19 | List of indexed names and owners. 20 | 21 | **Request Type**: `Query Params` 22 | 23 | **Request Body**: `N/A` 24 | 25 | **Response Type**: `JSON` 26 | 27 | **Response Body**: 28 | 29 | ```json 30 | [ 31 | { 32 | "name": "", 33 | "pubkey": "" 34 | } 35 | ] 36 | ``` 37 | 38 | ### `GET /api/name` 39 | 40 | Queries information and metadata about a specific name. 41 | 42 | **Request Type**: `Query Params` 43 | 44 | **Request Body**: `name` is a string parameter matching the name to query. 45 | 46 | **Response Type**: `JSON` 47 | 48 | **Response Body**: 49 | 50 | ```json 51 | [ 52 | { 53 | "records": {} 54 | } 55 | ] 56 | ``` 57 | 58 | ### `GET /api/create/data` 59 | 60 | Returns a valid `OP_RETURN` which can be included in a Bitcoin transaction to claim a particular name. 61 | 62 | **Request Type**: `Query Params` 63 | 64 | **Request Body**: `name` is a string parameter matching the name to query. `pubkey` is the hex-encoded X-Only public key of the name's owner. 65 | 66 | **Response Type**: `JSON` 67 | 68 | **Response Body**: 69 | 70 | ```json 71 | [ 72 | { 73 | "op_return": [""] 74 | } 75 | ] 76 | ``` 77 | 78 | ### `GET /api/transfer/event` 79 | 80 | Returns an unsigned Nostr event which is used as a standard wrapper format for transfer signatures. This event must be signed by **current** owner of the name. This event may be signed like any Nostr event, then the `sig` field can be isolated and used as an on-chain signature for the transfer. 81 | 82 | **Request Type**: `Query Params` 83 | 84 | **Request Body**: `name` is a string parameter matching the name to query. `new_owner` is the hex-encoded X-Only public key of the name's new owner. `old_owner` is the hex-encoded X-Only public key of the name's current owner. 85 | 86 | **Response Type**: `JSON` 87 | 88 | **Response Body**: 89 | 90 | ```json 91 | [ 92 | { 93 | "event": {} 94 | } 95 | ] 96 | ``` 97 | 98 | ### `GET /api/transfer/data` 99 | 100 | Returns two valid `OP_RETURN` which can be included in a Bitcoin transactions to claim a particular name. The first `OP_RETURN` caches the transfer, and the second `OP_RETURN` is the signature. 101 | 102 | **Request Type**: `Query Params` 103 | 104 | **Request Body**: `name` is a string parameter matching the name to query. `new_owner` is the hex-encoded X-Only public key of the name's new owner. `signature` is the `sig` field of the signed Nostr transfer event (for example, returned by `GET /api/transfer/event`). 105 | 106 | **Response Type**: `JSON` 107 | 108 | **Response Body**: 109 | 110 | ```json 111 | [ 112 | { 113 | "op_return": ["", ""] 114 | } 115 | ] 116 | ``` -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | # git-cliff ~ default configuration file 2 | # https://git-cliff.org/docs/configuration 3 | # 4 | # Lines starting with "#" are comments. 5 | # Configuration options are organized into tables and keys. 6 | # See documentation for more information on available options. 7 | 8 | [changelog] 9 | # changelog header 10 | header = """ 11 | # Changelog\n 12 | All notable changes to this project will be documented in this file.\n 13 | """ 14 | # template for the changelog body 15 | # https://keats.github.io/tera/docs/#introduction 16 | body = """ 17 | {% if version %}\ 18 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 19 | {% else %}\ 20 | ## [unreleased] 21 | {% endif %}\ 22 | {% for group, commits in commits | group_by(attribute="group") %} 23 | ### {{ group | upper_first }} 24 | {% for commit in commits %} 25 | - {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | upper_first }}\ 26 | {% endfor %} 27 | {% endfor %}\n 28 | """ 29 | # remove the leading and trailing whitespace from the template 30 | trim = true 31 | # changelog footer 32 | footer = """ 33 | """ 34 | # postprocessors 35 | postprocessors = [ 36 | # { pattern = '', replace = "https://github.com/orhun/git-cliff" }, # replace repository URL 37 | { "pattern" = "Feat: ", replace = "" }, 38 | { "pattern" = "Fix: ", replace = "" }, 39 | ] 40 | [git] 41 | # parse the commits based on https://www.conventionalcommits.org 42 | conventional_commits = false 43 | # filter out the commits that are not conventional 44 | filter_unconventional = false 45 | # process each line of a commit as an individual commit 46 | split_commits = false 47 | # regex for preprocessing the commit messages 48 | commit_preprocessors = [ 49 | # { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, # replace issue numbers 50 | ] 51 | # regex for parsing and grouping commits 52 | commit_parsers = [ 53 | { message = "^feat", group = "Features" }, 54 | { message = "^fix", group = "Bug Fixes" }, 55 | { message = "^doc", group = "Documentation" }, 56 | { message = "^perf", group = "Performance" }, 57 | { message = "^refactor", group = "Refactor" }, 58 | { message = "^style", group = "Styling" }, 59 | { message = "^test", group = "Testing" }, 60 | { message = "^chore\\(release\\): prepare for", skip = true }, 61 | { message = "^chore\\(deps\\)", skip = true }, 62 | { message = "^chore\\(pr\\)", skip = true }, 63 | { message = "^chore\\(pull\\)", skip = true }, 64 | # { message = "^chore|ci", group = "Miscellaneous Tasks" }, 65 | { body = ".*security", group = "Security" }, 66 | { message = "^revert", group = "Revert" }, 67 | { message = ".*", group = "Other" } 68 | ] 69 | # protect breaking changes from being skipped due to matching a skipping commit_parser 70 | protect_breaking_commits = false 71 | # filter out the commits that are not matched by commit parsers 72 | filter_commits = false 73 | # regex for matching git tags 74 | tag_pattern = "v[0-9].*" 75 | 76 | # regex for skipping tags 77 | skip_tags = "v0.1.0-beta.1" 78 | # regex for ignoring tags 79 | ignore_tags = "" 80 | # sort the tags topologically 81 | topo_order = false 82 | # sort the commits inside sections by oldest/newest order 83 | sort_commits = "oldest" 84 | # limit the number of commits included in the changelog. 85 | # limit_commits = 42 86 | -------------------------------------------------------------------------------- /nomen/src/db/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::config::Config; 2 | 3 | use sqlx::SqlitePool; 4 | 5 | pub mod event_log; 6 | pub mod index; 7 | pub mod name; 8 | pub mod raw; 9 | pub mod relay_index; 10 | pub mod stats; 11 | 12 | static MIGRATIONS: [&str; 18] = [ 13 | "CREATE TABLE event_log (id INTEGER PRIMARY KEY, created_at, type, data);", 14 | "CREATE TABLE index_height (blockheight INTEGER PRIMARY KEY, blockhash);", 15 | "CREATE TABLE raw_blockchain (id INTEGER PRIMARY KEY, blockhash, txid, blocktime, blockheight, txheight, vout, data, indexed_at);", 16 | "CREATE TABLE blockchain_index (id INTEGER PRIMARY KEY, protocol, fingerprint, nsid, name, pubkey, blockhash, txid, blocktime, blockheight, txheight, vout, indexed_at);", 17 | "CREATE VIEW ordered_blockchain_vw AS 18 | SELECT * from blockchain_index 19 | ORDER BY blockheight ASC, txheight ASC, vout ASC;", 20 | "CREATE VIEW ranked_blockchain_vw AS 21 | SELECT *, row_number() OVER (PARTITION BY fingerprint) as rank 22 | FROM ordered_blockchain_vw", 23 | "CREATE VIEW valid_names_vw AS 24 | SELECT * FROM ranked_blockchain_vw WHERE rank = 1;", 25 | "CREATE VIEW valid_names_records_vw AS 26 | SELECT vn.*, COALESCE(ne.records, '{}') as records 27 | FROM valid_names_vw vn 28 | LEFT JOIN name_events ne ON vn.nsid = ne.nsid;", 29 | "CREATE TABLE transfer_cache (id INTEGER PRIMARY KEY, protocol, fingerprint, nsid, name, pubkey, blockhash, txid, blocktime, blockheight, txheight, vout, indexed_at);", 30 | 31 | // This is useful so that we can know that this blockheight was already indexed. Even if the cache entry is deleted because it's old, we can keep it here so that 32 | // we can know we already looked at it. 33 | "CREATE TABLE old_transfer_cache (id, protocol, fingerprint, nsid, name, pubkey, blockhash, txid, blocktime, blockheight, txheight, vout, indexed_at);", 34 | 35 | // This view is useful as an "interesting things" view. I.e., something related to Nomen existed at this blockheight and we have already seen it. 36 | "CREATE VIEW index_blockheights_vw AS 37 | SELECT blockheight FROM blockchain_index 38 | UNION 39 | SELECT blockheight FROM transfer_cache 40 | UNION 41 | SELECT blockheight FROM old_transfer_cache;", 42 | 43 | "CREATE TABLE name_events (name, fingerprint, nsid, pubkey, created_at, event_id, records, indexed_at, raw_event);", 44 | "CREATE UNIQUE INDEX name_events_unique_idx ON name_events(name, pubkey);", 45 | "CREATE INDEX name_events_created_at_idx ON name_events(created_at);", 46 | "CREATE TABLE relay_index_queue (name);", 47 | "ALTER TABLE blockchain_index ADD COLUMN v1_upgrade_blockheight;", 48 | "ALTER TABLE blockchain_index ADD COLUMN v1_upgrade_txid", 49 | "CREATE UNIQUE INDEX riq_name_idx ON relay_index_queue (name)", 50 | ]; 51 | 52 | pub async fn initialize(config: &Config) -> anyhow::Result { 53 | let conn = config.sqlite().await?; 54 | 55 | sqlx::query("CREATE TABLE IF NOT EXISTS schema (version);") 56 | .execute(&conn) 57 | .await?; 58 | 59 | let (version,) = 60 | sqlx::query_as::<_, (i64,)>("SELECT COALESCE(MAX(version) + 1, 0) FROM schema;") 61 | .fetch_one(&conn) 62 | .await?; 63 | 64 | for (idx, migration) in MIGRATIONS[version as usize..].iter().enumerate() { 65 | let version = idx as i64 + version; 66 | let mut tx = conn.begin().await?; 67 | tracing::debug!("Migrations schema version {version}"); 68 | sqlx::query(migration).execute(&mut tx).await?; 69 | sqlx::query("INSERT INTO schema (version) VALUES (?);") 70 | .bind(version) 71 | .execute(&mut tx) 72 | .await?; 73 | tx.commit().await?; 74 | } 75 | 76 | Ok(conn) 77 | } 78 | -------------------------------------------------------------------------------- /nomen/src/subcommands/server/mod.rs: -------------------------------------------------------------------------------- 1 | mod api; 2 | mod explorer; 3 | 4 | use std::time::Duration; 5 | 6 | use askama_axum::IntoResponse; 7 | use axum::{ 8 | http::StatusCode, 9 | routing::{get, post}, 10 | Router, 11 | }; 12 | use sqlx::SqlitePool; 13 | use tokio::time::{interval, MissedTickBehavior}; 14 | use tower_http::cors::{Any, CorsLayer}; 15 | 16 | use crate::{config::Config, subcommands}; 17 | 18 | use self::explorer::ErrorTemplate; 19 | 20 | pub struct WebError(anyhow::Error, Option); 21 | 22 | impl IntoResponse for WebError { 23 | fn into_response(self) -> askama_axum::Response { 24 | ErrorTemplate { 25 | message: self.0.to_string(), 26 | } 27 | .into_response() 28 | } 29 | } 30 | 31 | impl From for WebError 32 | where 33 | E: Into, 34 | { 35 | fn from(err: E) -> Self { 36 | Self(err.into(), None) 37 | } 38 | } 39 | 40 | #[derive(Clone)] 41 | pub struct AppState { 42 | config: Config, 43 | pool: SqlitePool, 44 | } 45 | 46 | pub async fn start(config: &Config, conn: &SqlitePool) -> anyhow::Result<()> { 47 | if config.indexer() { 48 | let _indexer = tokio::spawn(indexer(config.clone())); 49 | } 50 | let mut app = Router::new(); 51 | 52 | if config.explorer() { 53 | app = app 54 | .route("/", get(explorer::index)) 55 | .route("/explorer", get(explorer::explorer)) 56 | .route("/explorer/:nsid", get(explorer::show_name)) 57 | .route("/newname", get(explorer::new_name_form)) 58 | .route("/newname", post(explorer::new_name_submit)) 59 | .route("/updaterecords", get(explorer::new_records_form)) 60 | .route("/updaterecords", post(explorer::new_records_submit)) 61 | .route("/transfer", get(explorer::transfer::initiate)) 62 | .route("/transfer", post(explorer::transfer::submit_initiate)) 63 | .route("/transfer/sign", post(explorer::transfer::complete)) 64 | .route("/stats", get(explorer::index_stats)); 65 | } 66 | 67 | if config.well_known() { 68 | app = app.route("/.well-known/nomen.json", get(explorer::well_known::nomen)); 69 | } 70 | 71 | if config.api() { 72 | let api_router = Router::new() 73 | .route("/names", get(api::names)) 74 | .route("/name", get(api::name)) 75 | .route("/create/data", get(api::op_return_v1)) 76 | .route("/v0/create/data", get(api::op_return_v0)) 77 | .route("/transfer/event", get(api::get_transfer_event)) 78 | .route("/transfer/data", get(api::get_transfer)) 79 | .layer(CorsLayer::new().allow_origin(Any).allow_methods(Any)); 80 | app = app.nest("/api", api_router); 81 | } 82 | 83 | let state = AppState { 84 | config: config.clone(), 85 | pool: conn.clone(), 86 | }; 87 | let app = app.with_state(state); 88 | 89 | let addr = config 90 | .server_bind() 91 | .expect("Server bind unconfigured") 92 | .parse()?; 93 | 94 | tracing::info!("Starting server on {addr}"); 95 | axum::Server::bind(&addr) 96 | .serve(app.into_make_service()) 97 | .with_graceful_shutdown(elegant_departure::tokio::depart().on_termination()) 98 | .await?; 99 | 100 | tracing::info!("Server shutdown complete."); 101 | elegant_departure::shutdown().await; 102 | Ok(()) 103 | } 104 | 105 | async fn indexer(config: Config) -> anyhow::Result<()> { 106 | let mut interval = interval(Duration::from_secs(config.server_indexer_delay())); 107 | interval.set_missed_tick_behavior(MissedTickBehavior::Skip); 108 | 109 | loop { 110 | match subcommands::index(&config).await { 111 | Ok(_) => {} 112 | Err(err) => tracing::error!("Indexing error: {}", err), 113 | } 114 | interval.tick().await; 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /docs/changelogs/v0.3.0.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | ## [0.3.0] - 2023-11-03 6 | 7 | ### Bug Fixes 8 | 9 | - Update HOWTO. 10 | 11 | 12 | ### Features 13 | 14 | - Re-index command 15 | 16 | - Rescan command. 17 | 18 | - Now hex and npub keys works in the UI. 19 | 20 | 21 | ### Other 22 | 23 | - Index statistic page. 24 | 25 | - List of reserved named without corroborating Nostr events. 26 | 27 | - Changelog update, update cargo.toml for v0.2.0. 28 | 29 | - Bold the message on the uncorroborated claims screen. 30 | 31 | - Clarifying language in the spec. 32 | 33 | - Handle edge case where a name is being indexed twice because it was posted with the same NSID twice. 34 | 35 | - CHANGELOG.mod 36 | 37 | - Updating SPEC to remove transfers and added an Appendix for changes and updates. 38 | 39 | - Removing transfers from version 0x00 40 | 41 | - Limiting names to 43 characters. 42 | 43 | - Use Primal.net for user links. 44 | 45 | - Index Stats was still using transfer_events table 46 | 47 | - Merge branch 'master' into develop 48 | - MSRV 49 | 50 | - Simple API to generate v1 create OP_RETURN data. 51 | 52 | - Development environment setup docs. 53 | 54 | - Simpler blockchain index table. 55 | 56 | - Added protocol version to blockchain_index 57 | 58 | - Basic working indexer. 59 | 60 | - API for create v0 op_returns 61 | 62 | - V0 -> v1 automatic upgrade 63 | 64 | - Properly notifies upgrades 65 | 66 | - SPEC update 67 | 68 | - Basic nomen-cli 69 | 70 | - Refactoring CLI some more. 71 | 72 | - Completely refactored into a multi-crate workspace. 73 | 74 | - Moving REFACTOR.md 75 | 76 | - Added transer_cache table for pending/incomplete transfers awaiting signatures. 77 | 78 | - Use transfer_cache + signatures to complete a transfer and remove the transfer from the cache. 79 | 80 | - Refactoring the commands. 81 | 82 | - Breaking into messages 83 | 84 | - Broke event loop down, using raw_blockchain now. 85 | 86 | - Updating REFACTOR.md 87 | 88 | - Updated from log crate to tracing and tracing_subscriber 89 | 90 | - Clippy pedantic fixes 91 | 92 | - Broke large method into many smaller methods for clippy's satisfaction. 93 | 94 | - Removed anyhow from nomen_core and using thiserror instead 95 | 96 | - Handle re-orgs. 97 | 98 | - Removing unused code, and moving things around. 99 | 100 | - Tests based on pre-defined test vectors! 101 | 102 | Tests based on pre-defined test vectors! 103 | 104 | - Moven nomen_core::util modules to nomen_core proper. 105 | 106 | - UI items are working 107 | 108 | - Fixed bug where it is continuously rescanning same blocks. 109 | 110 | - Refactor API module layout. 111 | 112 | - Refactored API and explorer modules, added API methods. 113 | 114 | - API docs. 115 | 116 | - Added CORS headers for API. 117 | 118 | - Setting version to 0.3.0-rc.1 for now 119 | 120 | - Removed uncorroborated claims completely. 121 | 122 | - Feedback now provided again when updating records. 123 | 124 | - Fixed Update Records link on the individual Name page. 125 | 126 | - Setting 100 expiration limit on transfer cache. 127 | 128 | - Creating a view for all unindex blocked. 129 | 130 | - Add protocol version to name page. 131 | 132 | - Return metadata from the Name api. 133 | 134 | - Return metadata from the Name api. 135 | 136 | - Merge branch 'protocolv1' of https://github.com/ursuscamp/nomen into protocolv1 137 | 138 | - New name now works a little bit more easily, by just asking for a tx input. 139 | 140 | - Fixing code TODOs. 141 | 142 | - V0 Upgrade Support in UI 143 | 144 | - Do not show nameless v0 names on the explorer. 145 | 146 | - Update docs. 147 | 148 | - Rename nsid to name in explorer. 149 | 150 | - Removing primary key index on old_transfer_cahce 151 | 152 | - Basic transfer UI! 153 | 154 | - Bug fixes 155 | 156 | - Added "Transfer Name" option to nav bar. 157 | 158 | - Fix two bugs: OP_RETURN length crashing bug, and database locking bug. 159 | 160 | - New Name take/returns a PSBT again 161 | 162 | - PSBT is now optional for New Names. If PSBT is not provided, a plain OP_RETURN is generated instead. 163 | 164 | - Updating to pre.2 165 | 166 | - Transfers re-added. Transfers from UI now just output OP_RETURNs. 167 | 168 | - Bump prerelease version to 3. 169 | 170 | 171 | 172 | -------------------------------------------------------------------------------- /nomen/src/db/name.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::module_name_repetitions)] 2 | 3 | use nomen_core::{Hash160, Name, Nsid}; 4 | use nostr_sdk::EventId; 5 | use secp256k1::XOnlyPublicKey; 6 | use sqlx::{FromRow, Sqlite, SqlitePool}; 7 | 8 | #[derive(FromRow)] 9 | pub struct NameDetails { 10 | pub blockhash: String, 11 | pub txid: String, 12 | pub blocktime: i64, 13 | pub vout: i64, 14 | pub blockheight: i64, 15 | pub name: String, 16 | pub records: String, 17 | pub pubkey: String, 18 | pub protocol: i64, 19 | pub v1_upgrade_blockheight: Option, 20 | pub v1_upgrade_txid: Option, 21 | } 22 | 23 | pub async fn details(conn: &SqlitePool, query: &str) -> anyhow::Result { 24 | let details = sqlx::query_as::<_, NameDetails>( 25 | "SELECT * from valid_names_records_vw vn WHERE vn.nsid = ? or vn.name = ?", 26 | ) 27 | .bind(query) 28 | .bind(query) 29 | .fetch_one(conn) 30 | .await?; 31 | Ok(details) 32 | } 33 | 34 | #[derive(FromRow)] 35 | pub struct NameRecords { 36 | pub blockhash: String, 37 | pub txid: String, 38 | pub fingerprint: String, 39 | pub nsid: String, 40 | pub protocol: i64, 41 | pub records: String, 42 | } 43 | 44 | pub async fn records(conn: &SqlitePool, name: String) -> anyhow::Result> { 45 | let fingerprint = Hash160::default() 46 | .chain_update(name.as_bytes()) 47 | .fingerprint(); 48 | let records = sqlx::query_as::<_, NameRecords>( 49 | "SELECT vn.blockhash, vn.txid, vn.fingerprint, vn.nsid, vn.protocol, coalesce(ne.records, '{}') as records 50 | FROM valid_names_vw vn 51 | JOIN name_events ne ON vn.nsid = ne.nsid 52 | WHERE vn.fingerprint = ? LIMIT 1;", 53 | ) 54 | .bind(hex::encode(fingerprint)) 55 | .fetch_optional(conn) 56 | .await?; 57 | Ok(records) 58 | } 59 | 60 | pub async fn top_level_names( 61 | conn: &SqlitePool, 62 | query: Option, 63 | ) -> anyhow::Result> { 64 | let sql = match query { 65 | Some(q) => sqlx::query_as::<_, (String, String)>( 66 | "SELECT nsid, name FROM valid_names_vw WHERE name IS NOT NULL AND instr(name, ?) ORDER BY name;", 67 | ) 68 | .bind(q.to_lowercase()), 69 | None => sqlx::query_as::<_, (String, String)>( 70 | "SELECT nsid, name FROM valid_names_vw WHERE name IS NOT NULL ORDER BY name;", 71 | ), 72 | }; 73 | 74 | Ok(sql.fetch_all(conn).await?) 75 | } 76 | 77 | #[allow(clippy::too_many_arguments)] 78 | pub async fn insert_name_event( 79 | conn: &SqlitePool, 80 | name: Name, 81 | fingerprint: [u8; 5], 82 | nsid: Nsid, 83 | pubkey: XOnlyPublicKey, 84 | created_at: i64, 85 | event_id: EventId, 86 | records: String, 87 | raw_event: String, 88 | ) -> anyhow::Result<()> { 89 | sqlx::query(include_str!("./queries/insert_name_event.sql")) 90 | .bind(name.to_string()) 91 | .bind(hex::encode(fingerprint)) 92 | .bind(nsid.to_string()) 93 | .bind(pubkey.to_string()) 94 | .bind(created_at) 95 | .bind(event_id.to_string()) 96 | .bind(records) 97 | .bind(raw_event) 98 | .execute(conn) 99 | .await?; 100 | Ok(()) 101 | } 102 | 103 | pub type NameAndKey = (String, String); 104 | 105 | pub async fn fetch_all(conn: &SqlitePool) -> anyhow::Result> { 106 | let rows = sqlx::query_as::<_, NameAndKey>("SELECT name, pubkey FROM valid_names_vw;") 107 | .fetch_all(conn) 108 | .await?; 109 | Ok(rows) 110 | } 111 | 112 | pub async fn check_availability( 113 | conn: impl sqlx::Executor<'_, Database = Sqlite> + Copy, 114 | name: &str, 115 | ) -> anyhow::Result { 116 | let fp = hex::encode( 117 | Hash160::default() 118 | .chain_update(name.as_bytes()) 119 | .fingerprint(), 120 | ); 121 | let (a,) = sqlx::query_as::<_, (bool,)>( 122 | "SELECT COUNT(*) = 0 FROM valid_names_vw WHERE fingerprint = ?;", 123 | ) 124 | .bind(&fp) 125 | .fetch_one(conn) 126 | .await?; 127 | Ok(a) 128 | } 129 | 130 | pub async fn last_records_time(conn: &SqlitePool) -> anyhow::Result { 131 | let (t,) = sqlx::query_as::<_, (i64,)>("SELECT COALESCE(MAX(created_at), 0) FROM name_events;") 132 | .fetch_one(conn) 133 | .await?; 134 | Ok(t as u64) 135 | } 136 | -------------------------------------------------------------------------------- /nomen/templates/faqs.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block body %} 4 |
5 |

FAQs

6 |
7 |

What is Nomen?

8 |

9 | Nomen is an open-protocol that aims to solve the "ICANN problem" by providing a registry for global names and identities without any central authority making decisions. 10 | The goals of this project are to promote decentralization, censorship resistance and self sovereignty. Controlling your own digital identity without having to ask anyone's 11 | permission is a huge, important part of that goal. 12 |

13 |
14 | 15 |
16 |

How does it work?

17 |

18 | Nomen is an open protocol with a few, extremely simple rules, built as a layer on top of two established permissionless technologies: Bitcoin and Nostr. Bitcoin provides the 19 | decentralized timestamps needed to establish first-come ownership over global names, and Nostr provides the transport method for name data. 20 |

21 | 22 |

23 | In short, to establish a name: 24 | 25 |

    26 |
  1. Create a hash, representing a new global name. This hash can uniquely and provably represent that name, and that name only. This hash is published to the Bitcoin blockchain, as a permanent record of the claim.
  2. 27 |
  3. 28 | Publish metadata for the name as a Nostr event. This contains all of the data necessary to reconstruct the hash, and prove the ownership of that name. That hash includes the public key of the owner, and the Nostr event 29 | must be signed by the associated private key, thus creating a cryptographically proven link between the owner and on-chain claim. 30 |
  4. 31 |
32 |

33 | 34 |

35 | That is all, in a nutshell! Publish a recognizable hash on the Bitcoin timechain, then broadcast your provable identity to the world. For more technical details, check out the spec (it's very simple and not difficult to understand). 36 |

37 |
38 | 39 |
40 |

How can this possibly scale?

41 | 42 |

43 | Scalability was an important goal of this protocol. Bitcoin transactions won't be affordable forever, and not everyone will be able to make an on-chain transaction just to prove ownership. So how can this possibly scale? 44 |

45 | 46 |

47 | The answer is namespaces. Each on-chain transaction represents a TOP-LEVEL root to a nested namespace. Each namespace can contain potentially infinite children, grandchildren, great grandchildren, etc. Think of it like com 48 | being a root name on chain, and amazon and google being child names. Top level names belong to the root owner of the namespace, and can be operated like a business if they wish, or a charity. They can aggregate namespace updates to the blockchain 49 | periodically. 50 |

51 | 52 |

53 | If you create a name, you can keep it just to yourself forever, or give away names to friends and family, sell them, whatever you want. And it scales because the only thing that gets updated on the blockchain are things that affect ownership. Everything else happens 54 | off-chain, always provably linked back to an on-chain fingerprint. 55 |

56 |
57 | 58 |
59 |

If someone else owns a namespace, how is it censorship resistant?

60 | 61 |

62 | After a child namespace is published (as part of a merkle tree in the on-chain hash), it can NEVER be unpublished. Namespace can only ever be added, not deleted. Part of that hash includes the public key of the child's owner! Which means that, 63 | once a namespace owner provides you with a name, it is associated only with your private key, and only you control it. You can continue to use it forever and the original namespace provider has no say in it. 64 |

65 |
66 | 67 |
68 |

How exactly does Nostr come into this?

69 | 70 |

71 | Because on-chain data is limited and we don't want to pollute it unnecesasrily, we need a secondary protocol for data transport. Rather than create some brand new P2P protocol, we can use the established Nostr network, which already has 72 | the necessary cryptographic primitives (and a healthy dose of users that respect a mission of controlling your digital identity). 73 |

74 | 75 |

76 | Nostr's role in this is quite simple: When a root-level namespace is created, the namespace owner broadcasts a Nostr event referencing that on-chain hash, with all of the data necessary to reproduce the hash. This proves ownership. 77 | A second Nostr event is used to update records for any namespaces, or descendants. The records associated with a name just keys and values, similar to DNS records, as a JSON object. For instance: 78 |

79 | 80 | 81 | { 82 | "IP4": "69.420.0.1", 83 | "NPUB": "npub1..." 84 | } 85 | 86 | 87 |

88 | Your name can represent you in many different contexts, such as your website, your Nostr identity, or whatever you choose to include. While standards will no doubt exist, this is an open protocol. When you want to update it, just sign and broadcast 89 | an updated event using the owner key of the namespace! 90 |

91 |
92 |
93 | {% endblock %} -------------------------------------------------------------------------------- /nomen/src/config/cfg.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use anyhow::bail; 4 | use bitcoin::Network; 5 | use nostr_sdk::{ 6 | prelude::{FromSkStr, ToBech32}, 7 | Options, 8 | }; 9 | use sqlx::{sqlite, SqlitePool}; 10 | 11 | use crate::util::Nsec; 12 | 13 | use super::{Cli, ConfigFile}; 14 | 15 | #[derive(Clone, Debug)] 16 | pub struct Config { 17 | pub cli: Cli, 18 | pub file: ConfigFile, 19 | } 20 | 21 | impl Config { 22 | pub fn new(cli: Cli, file: ConfigFile) -> Self { 23 | Self { cli, file } 24 | } 25 | 26 | pub fn rpc_auth(&self) -> bitcoincore_rpc::Auth { 27 | if let Some(cookie) = &self.rpc_cookie() { 28 | bitcoincore_rpc::Auth::CookieFile(cookie.clone()) 29 | } else if self.rpc_user().is_some() || self.rpc_password().is_some() { 30 | bitcoincore_rpc::Auth::UserPass( 31 | self.rpc_user().expect("RPC user not configured"), 32 | self.rpc_password().expect("RPC password not configured"), 33 | ) 34 | } else { 35 | bitcoincore_rpc::Auth::None 36 | } 37 | } 38 | 39 | pub fn rpc_client(&self) -> anyhow::Result { 40 | let host = self.rpc_host(); 41 | let port = self.rpc_port(); 42 | let url = format!("{host}:{port}"); 43 | let auth = self.rpc_auth(); 44 | Ok(bitcoincore_rpc::Client::new(&url, auth)?) 45 | } 46 | 47 | pub async fn sqlite(&self) -> anyhow::Result { 48 | let db = self.data(); 49 | 50 | // SQLx doesn't seem to like it if a db file does not already exist, so let's create an empty one 51 | if !tokio::fs::try_exists(&db).await? { 52 | tokio::fs::OpenOptions::new() 53 | .write(true) 54 | .create(true) 55 | .open(&db) 56 | .await?; 57 | } 58 | 59 | Ok(SqlitePool::connect(&format!("sqlite:{}", db.to_string_lossy())).await?) 60 | } 61 | 62 | pub async fn nostr_client( 63 | &self, 64 | sk: &str, 65 | ) -> anyhow::Result<(nostr_sdk::Keys, nostr_sdk::Client)> { 66 | let keys = nostr_sdk::Keys::from_sk_str(sk)?; 67 | let client = nostr_sdk::Client::with_opts(&keys, Options::new().wait_for_send(true)); 68 | let relays = self.relays(); 69 | for relay in relays { 70 | client.add_relay(relay, None).await?; 71 | } 72 | client.connect().await; 73 | Ok((keys, client)) 74 | } 75 | 76 | pub async fn nostr_random_client( 77 | &self, 78 | ) -> anyhow::Result<(nostr_sdk::Keys, nostr_sdk::Client)> { 79 | let keys = nostr_sdk::Keys::generate(); 80 | let sk = keys.secret_key()?.to_bech32()?; 81 | self.nostr_client(&sk).await 82 | } 83 | 84 | pub fn starting_block_height(&self) -> usize { 85 | match self.network() { 86 | Network::Bitcoin => 790_500, 87 | Network::Signet => 143_500, 88 | _ => 0, 89 | } 90 | } 91 | 92 | pub fn validate(&self) -> anyhow::Result<()> { 93 | if self.missing_secret_key() { 94 | bail!("Config: Secret key required for relay publising"); 95 | } 96 | Ok(()) 97 | } 98 | 99 | fn missing_secret_key(&self) -> bool { 100 | (self.publish_index() || self.well_known()) && self.file.nostr.secret.is_none() 101 | } 102 | 103 | pub fn publish_index(&self) -> bool { 104 | self.file.nostr.publish.unwrap_or_default() 105 | } 106 | 107 | pub fn well_known(&self) -> bool { 108 | self.file.nostr.well_known.unwrap_or_default() 109 | } 110 | 111 | pub fn secret_key(&self) -> Option { 112 | self.file.nostr.secret 113 | } 114 | 115 | fn rpc_cookie(&self) -> Option { 116 | self.file.rpc.cookie.clone() 117 | } 118 | 119 | fn rpc_user(&self) -> Option { 120 | self.file.rpc.user.clone() 121 | } 122 | 123 | fn rpc_password(&self) -> Option { 124 | self.file.rpc.password.clone() 125 | } 126 | 127 | fn rpc_port(&self) -> u16 { 128 | self.file.rpc.port.expect("RPC port required") 129 | } 130 | 131 | fn rpc_host(&self) -> String { 132 | self.file 133 | .rpc 134 | .host 135 | .clone() 136 | .unwrap_or_else(|| "127.0.0.1".to_string()) 137 | } 138 | 139 | fn data(&self) -> PathBuf { 140 | self.file.data.clone().unwrap_or_else(|| "nomen.db".into()) 141 | } 142 | 143 | pub fn relays(&self) -> Vec { 144 | self.file.nostr.relays.clone().unwrap_or_else(|| { 145 | vec![ 146 | "wss://relay.damus.io".into(), 147 | "wss://relay.snort.social".into(), 148 | "wss://nos.lol".into(), 149 | "wss://nostr.orangepill.dev".into(), 150 | ] 151 | }) 152 | } 153 | 154 | pub fn network(&self) -> Network { 155 | self.file.rpc.network.unwrap_or(Network::Bitcoin) 156 | } 157 | 158 | pub fn server_bind(&self) -> Option { 159 | self.file.server.bind.clone() 160 | } 161 | 162 | pub fn server_indexer_delay(&self) -> u64 { 163 | self.file.server.indexer_delay.unwrap_or(30) 164 | } 165 | 166 | pub fn confirmations(&self) -> usize { 167 | self.file.server.confirmations.unwrap_or(3) 168 | } 169 | 170 | pub fn indexer(&self) -> bool { 171 | self.file.server.indexer.unwrap_or(true) 172 | } 173 | 174 | pub fn explorer(&self) -> bool { 175 | self.file.server.explorer.unwrap_or(true) 176 | } 177 | 178 | pub fn api(&self) -> bool { 179 | self.file.server.api.unwrap_or(true) 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /nomen/src/subcommands/server/api.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use anyhow::anyhow; 4 | use axum::{ 5 | extract::{Query, State}, 6 | Json, 7 | }; 8 | use nomen_core::{CreateBuilder, Name, TransferBuilder}; 9 | 10 | use crate::db; 11 | 12 | use self::models::{OpReturnResponse, TransferEventResponse}; 13 | 14 | use super::AppState; 15 | 16 | mod models { 17 | use std::collections::HashMap; 18 | 19 | use askama_axum::IntoResponse; 20 | use axum::{http::StatusCode, Json}; 21 | use nostr_sdk::UnsignedEvent; 22 | use secp256k1::{schnorr::Signature, XOnlyPublicKey}; 23 | use serde::{Deserialize, Serialize}; 24 | 25 | #[derive(Serialize)] 26 | pub struct JsonError { 27 | pub error: String, 28 | #[serde(skip)] 29 | pub status: StatusCode, 30 | } 31 | 32 | impl JsonError { 33 | pub fn message(err: &str) -> JsonError { 34 | JsonError { 35 | error: err.into(), 36 | status: StatusCode::BAD_REQUEST, 37 | } 38 | } 39 | } 40 | 41 | impl IntoResponse for JsonError { 42 | fn into_response(self) -> askama_axum::Response { 43 | (self.status, Json(self)).into_response() 44 | } 45 | } 46 | 47 | impl From for JsonError { 48 | fn from(value: anyhow::Error) -> Self { 49 | JsonError { 50 | error: value.to_string(), 51 | status: StatusCode::INTERNAL_SERVER_ERROR, 52 | } 53 | } 54 | } 55 | 56 | #[derive(Deserialize)] 57 | pub struct NameQuery { 58 | pub name: String, 59 | } 60 | 61 | #[derive(Serialize)] 62 | pub struct NameResult { 63 | pub blockhash: String, 64 | pub txid: String, 65 | pub fingerprint: String, 66 | pub nsid: String, 67 | pub protocol: i64, 68 | pub records: HashMap, 69 | } 70 | 71 | #[derive(Deserialize)] 72 | pub struct OpReturnQuery { 73 | pub name: String, 74 | pub pubkey: XOnlyPublicKey, 75 | } 76 | 77 | #[derive(Serialize, Default)] 78 | pub struct OpReturnResponse { 79 | pub op_return: Vec, 80 | } 81 | 82 | #[derive(Deserialize)] 83 | pub struct TransferEventQuery { 84 | pub name: String, 85 | pub new_owner: XOnlyPublicKey, 86 | pub old_owner: XOnlyPublicKey, 87 | } 88 | 89 | #[derive(Serialize)] 90 | pub struct TransferEventResponse { 91 | pub event: UnsignedEvent, 92 | } 93 | 94 | #[derive(Deserialize)] 95 | pub struct TransferQuery { 96 | pub name: String, 97 | pub new_owner: XOnlyPublicKey, 98 | pub signature: Signature, 99 | } 100 | 101 | #[derive(Serialize)] 102 | pub struct NameResponse { 103 | pub name: String, 104 | pub pubkey: String, 105 | } 106 | 107 | #[derive(Serialize)] 108 | pub struct NamesResponse { 109 | pub names: Vec, 110 | } 111 | } 112 | 113 | pub async fn names( 114 | State(state): State, 115 | ) -> Result, models::JsonError> { 116 | let names = db::name::fetch_all(&state.pool) 117 | .await? 118 | .into_iter() 119 | .map(|(n, pk)| models::NameResponse { 120 | name: n, 121 | pubkey: pk, 122 | }) 123 | .collect(); 124 | Ok(Json(models::NamesResponse { names })) 125 | } 126 | 127 | pub async fn name( 128 | Query(name): Query, 129 | State(state): State, 130 | ) -> Result, models::JsonError> { 131 | let conn = state.pool; 132 | let name = db::name::records(&conn, name.name).await?; 133 | 134 | name.and_then(|nr| { 135 | Some(models::NameResult { 136 | blockhash: nr.blockhash, 137 | txid: nr.txid, 138 | fingerprint: nr.fingerprint, 139 | nsid: nr.nsid, 140 | protocol: nr.protocol, 141 | records: serde_json::from_str(&nr.records).ok()?, 142 | }) 143 | }) 144 | .map(Json) 145 | .ok_or_else(|| models::JsonError::message("Name not found")) 146 | } 147 | 148 | #[allow(clippy::unused_async)] 149 | pub async fn op_return_v1( 150 | Query(query): Query, 151 | ) -> Result, models::JsonError> { 152 | let name = Name::from_str(&query.name).map_err(|_| anyhow!("Invalid name"))?; 153 | let bytes = CreateBuilder::new(&query.pubkey, name.as_ref()).v1_op_return(); 154 | let orr = models::OpReturnResponse { 155 | op_return: vec![hex::encode(bytes)], 156 | }; 157 | 158 | Ok(Json(orr)) 159 | } 160 | 161 | #[allow(clippy::unused_async)] 162 | pub async fn op_return_v0( 163 | Query(query): Query, 164 | ) -> Result, models::JsonError> { 165 | let name = Name::from_str(&query.name).map_err(|_| anyhow!("Invalid name"))?; 166 | let bytes = CreateBuilder::new(&query.pubkey, name.as_ref()).v0_op_return(); 167 | let orr = models::OpReturnResponse { 168 | op_return: vec![hex::encode(bytes)], 169 | }; 170 | 171 | Ok(Json(orr)) 172 | } 173 | 174 | #[allow(clippy::unused_async)] 175 | pub async fn get_transfer_event( 176 | Query(query): Query, 177 | ) -> Result, models::JsonError> { 178 | let tb = TransferBuilder { 179 | new_pubkey: &query.new_owner, 180 | name: &query.name, 181 | }; 182 | Ok(Json(TransferEventResponse { 183 | event: tb.unsigned_event(&query.old_owner), 184 | })) 185 | } 186 | 187 | #[allow(clippy::unused_async)] 188 | pub async fn get_transfer( 189 | Query(query): Query, 190 | ) -> Result, models::JsonError> { 191 | let tb = TransferBuilder { 192 | new_pubkey: &query.new_owner, 193 | name: &query.name, 194 | }; 195 | let or1 = hex::encode(tb.transfer_op_return()); 196 | let or2 = hex::encode(tb.signature_provided_op_return(query.signature)); 197 | Ok(Json(OpReturnResponse { 198 | op_return: vec![or1, or2], 199 | })) 200 | } 201 | -------------------------------------------------------------------------------- /nomen/src/db/index.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::module_name_repetitions)] 2 | 3 | use bitcoin::{BlockHash, Txid}; 4 | use nomen_core::{Hash160, Nsid, NsidBuilder}; 5 | use secp256k1::XOnlyPublicKey; 6 | use sqlx::{Executor, Sqlite, SqlitePool}; 7 | 8 | pub struct BlockchainIndex { 9 | pub protocol: i64, 10 | pub fingerprint: [u8; 5], 11 | pub nsid: Nsid, 12 | pub name: Option, 13 | pub pubkey: Option, 14 | pub blockhash: BlockHash, 15 | pub txid: Txid, 16 | pub blocktime: usize, 17 | pub blockheight: usize, 18 | pub txheight: usize, 19 | pub vout: usize, 20 | } 21 | 22 | pub async fn insert_blockchain_index( 23 | conn: impl Executor<'_, Database = Sqlite>, 24 | index: &BlockchainIndex, 25 | ) -> anyhow::Result<()> { 26 | sqlx::query(include_str!("./queries/insert_blockchain_index.sql")) 27 | .bind(index.protocol) 28 | .bind(hex::encode(index.fingerprint)) 29 | .bind(index.nsid.to_string()) 30 | .bind(&index.name) 31 | .bind(index.pubkey.map(|k| k.to_string())) 32 | .bind(&index.blockhash.to_string()) 33 | .bind(index.txid.to_string()) 34 | .bind(index.blocktime as i64) 35 | .bind(index.blockheight as i64) 36 | .bind(index.txheight as i64) 37 | .bind(index.vout as i64) 38 | .execute(conn) 39 | .await?; 40 | Ok(()) 41 | } 42 | 43 | pub async fn insert_transfer_cache( 44 | conn: impl Executor<'_, Database = Sqlite>, 45 | index: &BlockchainIndex, 46 | ) -> anyhow::Result<()> { 47 | sqlx::query(include_str!("./queries/insert_transfer_cache.sql")) 48 | .bind(index.protocol) 49 | .bind(hex::encode(index.fingerprint)) 50 | .bind(index.nsid.to_string()) 51 | .bind(&index.name) 52 | .bind(index.pubkey.map(|k| k.to_string())) 53 | .bind(&index.blockhash.to_string()) 54 | .bind(index.txid.to_string()) 55 | .bind(index.blocktime as i64) 56 | .bind(index.blockheight as i64) 57 | .bind(index.txheight as i64) 58 | .bind(index.vout as i64) 59 | .execute(conn) 60 | .await?; 61 | Ok(()) 62 | } 63 | 64 | pub async fn next_index_height(conn: &SqlitePool) -> anyhow::Result { 65 | let (h,) = 66 | sqlx::query_as::<_, (i64,)>("SELECT COALESCE(MAX(blockheight), 0) + 1 FROM index_height;") 67 | .fetch_one(conn) 68 | .await?; 69 | 70 | Ok(h as usize) 71 | } 72 | 73 | pub async fn insert_height( 74 | conn: &SqlitePool, 75 | height: i64, 76 | blockhash: &BlockHash, 77 | ) -> anyhow::Result<()> { 78 | sqlx::query( 79 | "INSERT INTO index_height (blockheight, blockhash) VALUES (?, ?) ON CONFLICT DO NOTHING;", 80 | ) 81 | .bind(height) 82 | .bind(blockhash.to_string()) 83 | .execute(conn) 84 | .await?; 85 | Ok(()) 86 | } 87 | 88 | pub async fn update_for_transfer( 89 | conn: &sqlx::Pool, 90 | nsid: Nsid, 91 | new_owner: XOnlyPublicKey, 92 | old_owner: XOnlyPublicKey, 93 | name: String, 94 | ) -> Result<(), anyhow::Error> { 95 | sqlx::query("UPDATE blockchain_index SET nsid = ?, pubkey = ? WHERE name = ? AND pubkey = ?;") 96 | .bind(hex::encode(nsid.as_ref())) 97 | .bind(hex::encode(new_owner.serialize())) 98 | .bind(&name) 99 | .bind(hex::encode(old_owner.serialize())) 100 | .execute(conn) 101 | .await?; 102 | Ok(()) 103 | } 104 | 105 | pub enum UpgradeStatus { 106 | Upgraded, 107 | NotUpgraded, 108 | } 109 | 110 | pub async fn upgrade_v0_to_v1( 111 | conn: impl sqlx::Executor<'_, Database = Sqlite> + Copy, 112 | name: &str, 113 | pubkey: XOnlyPublicKey, 114 | blockheight: usize, 115 | txid: Txid, 116 | ) -> anyhow::Result { 117 | let fingerprint = hex::encode( 118 | Hash160::default() 119 | .chain_update(name.as_bytes()) 120 | .fingerprint(), 121 | ); 122 | let nsid = hex::encode(NsidBuilder::new(name, &pubkey).finalize().as_ref()); 123 | 124 | let updated = sqlx::query( 125 | "UPDATE blockchain_index 126 | SET name = ?, pubkey = ?, protocol = 1, v1_upgrade_blockheight = ?, v1_upgrade_txid = ? 127 | WHERE fingerprint = ? AND nsid = ? AND protocol = 0;", 128 | ) 129 | .bind(name) 130 | .bind(hex::encode(pubkey.serialize())) 131 | .bind(blockheight as i64) 132 | .bind(hex::encode(txid)) 133 | .bind(&fingerprint) 134 | .bind(&nsid) 135 | .execute(conn) 136 | .await?; 137 | 138 | if updated.rows_affected() > 0 { 139 | return Ok(UpgradeStatus::Upgraded); 140 | } 141 | 142 | Ok(UpgradeStatus::NotUpgraded) 143 | } 144 | 145 | pub async fn update_v0_index( 146 | conn: impl sqlx::Executor<'_, Database = Sqlite> + Copy, 147 | name: &str, 148 | pubkey: &XOnlyPublicKey, 149 | nsid: Nsid, 150 | ) -> anyhow::Result<()> { 151 | sqlx::query( 152 | "UPDATE blockchain_index SET name = ?, pubkey = ? WHERE protocol = 0 AND nsid = ?;", 153 | ) 154 | .bind(name) 155 | .bind(pubkey.to_string()) 156 | .bind(hex::encode(nsid.as_slice())) 157 | .execute(conn) 158 | .await?; 159 | 160 | Ok(()) 161 | } 162 | 163 | pub async fn delete_from_transfer_cache( 164 | conn: &sqlx::Pool, 165 | id: i64, 166 | ) -> Result<(), anyhow::Error> { 167 | tracing::debug!("DELETING transfer_cache with id {id}"); 168 | sqlx::query("DELETE FROM transfer_cache WHERE id = ?;") 169 | .bind(id) 170 | .execute(conn) 171 | .await?; 172 | Ok(()) 173 | } 174 | 175 | pub async fn reindex( 176 | conn: impl sqlx::Executor<'_, Database = Sqlite> + Copy, 177 | blockheight: i64, 178 | ) -> anyhow::Result<()> { 179 | sqlx::query("DELETE FROM blockchain_index WHERE blockheight >= ?;") 180 | .bind(blockheight) 181 | .execute(conn) 182 | .await?; 183 | sqlx::query("DELETE FROM transfer_cache WHERE blockheight >= ?;") 184 | .bind(blockheight) 185 | .execute(conn) 186 | .await?; 187 | sqlx::query("DELETE FROM old_transfer_cache WHERE blockheight >= ?;") 188 | .bind(blockheight) 189 | .execute(conn) 190 | .await?; 191 | sqlx::query("DELETE FROM name_events;") 192 | .execute(conn) 193 | .await?; 194 | sqlx::query("DELETE FROM relay_index_queue;") 195 | .execute(conn) 196 | .await?; 197 | Ok(()) 198 | } 199 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2022-2023, axodotdev 2 | # SPDX-License-Identifier: MIT or Apache-2.0 3 | # 4 | # CI that: 5 | # 6 | # * checks for a Git Tag that looks like a release 7 | # * builds artifacts with cargo-dist (archives, installers, hashes) 8 | # * uploads those artifacts to temporary workflow zip 9 | # * on success, uploads the artifacts to a Github Release™ 10 | # 11 | # Note that the Github Release™ will be created with a generated 12 | # title/body based on your changelogs. 13 | name: Release 14 | 15 | permissions: 16 | contents: write 17 | 18 | # This task will run whenever you push a git tag that looks like a version 19 | # like "1.0.0", "v0.1.0-prerelease.1", "my-app/0.1.0", "releases/v1.0.0", etc. 20 | # Various formats will be parsed into a VERSION and an optional PACKAGE_NAME, where 21 | # PACKAGE_NAME must be the name of a Cargo package in your workspace, and VERSION 22 | # must be a Cargo-style SemVer Version (must have at least major.minor.patch). 23 | # 24 | # If PACKAGE_NAME is specified, then the release will be for that 25 | # package (erroring out if it doesn't have the given version or isn't cargo-dist-able). 26 | # 27 | # If PACKAGE_NAME isn't specified, then the release will be for all 28 | # (cargo-dist-able) packages in the workspace with that version (this mode is 29 | # intended for workspaces with only one dist-able package, or with all dist-able 30 | # packages versioned/released in lockstep). 31 | # 32 | # If you push multiple tags at once, separate instances of this workflow will 33 | # spin up, creating an independent Github Release™ for each one. However Github 34 | # will hard limit this to 3 tags per commit, as it will assume more tags is a 35 | # mistake. 36 | # 37 | # If there's a prerelease-style suffix to the version, then the Github Release™ 38 | # will be marked as a prerelease. 39 | on: 40 | push: 41 | tags: 42 | - '**[0-9]+.[0-9]+.[0-9]+*' 43 | pull_request: 44 | 45 | jobs: 46 | # Run 'cargo dist plan' to determine what tasks we need to do 47 | plan: 48 | runs-on: ubuntu-latest 49 | outputs: 50 | val: ${{ steps.plan.outputs.manifest }} 51 | tag: ${{ !github.event.pull_request && github.ref_name || '' }} 52 | tag-flag: ${{ !github.event.pull_request && format('--tag={0}', github.ref_name) || '' }} 53 | publishing: ${{ !github.event.pull_request }} 54 | env: 55 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 56 | steps: 57 | - uses: actions/checkout@v4 58 | with: 59 | submodules: recursive 60 | - name: Install cargo-dist 61 | run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.4.2/cargo-dist-installer.sh | sh" 62 | - id: plan 63 | run: | 64 | cargo dist plan ${{ !github.event.pull_request && format('--tag={0}', github.ref_name) || '' }} --output-format=json > dist-manifest.json 65 | echo "cargo dist plan ran successfully" 66 | cat dist-manifest.json 67 | echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT" 68 | - name: "Upload dist-manifest.json" 69 | uses: actions/upload-artifact@v3 70 | with: 71 | name: artifacts 72 | path: dist-manifest.json 73 | 74 | # Build and packages all the platform-specific things 75 | upload-local-artifacts: 76 | # Let the initial task tell us to not run (currently very blunt) 77 | needs: plan 78 | if: ${{ fromJson(needs.plan.outputs.val).releases != null && (needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload') }} 79 | strategy: 80 | fail-fast: false 81 | # Target platforms/runners are computed by cargo-dist in create-release. 82 | # Each member of the matrix has the following arguments: 83 | # 84 | # - runner: the github runner 85 | # - dist-args: cli flags to pass to cargo dist 86 | # - install-dist: expression to run to install cargo-dist on the runner 87 | # 88 | # Typically there will be: 89 | # - 1 "global" task that builds universal installers 90 | # - N "local" tasks that build each platform's binaries and platform-specific installers 91 | matrix: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix }} 92 | runs-on: ${{ matrix.runner }} 93 | env: 94 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 95 | BUILD_MANIFEST_NAME: target/distrib/${{ join(matrix.targets, '-') }}-dist-manifest.json 96 | steps: 97 | - uses: actions/checkout@v4 98 | with: 99 | submodules: recursive 100 | - uses: swatinem/rust-cache@v2 101 | - name: Install cargo-dist 102 | run: ${{ matrix.install_dist }} 103 | - name: Install dependencies 104 | run: | 105 | ${{ matrix.packages_install }} 106 | - name: Build artifacts 107 | run: | 108 | # Actually do builds and make zips and whatnot 109 | cargo dist build ${{ needs.plan.outputs.tag-flag }} --print=linkage --output-format=json ${{ matrix.dist_args }} > dist-manifest.json 110 | echo "cargo dist ran successfully" 111 | - id: cargo-dist 112 | name: Post-build 113 | # We force bash here just because github makes it really hard to get values up 114 | # to "real" actions without writing to env-vars, and writing to env-vars has 115 | # inconsistent syntax between shell and powershell. 116 | shell: bash 117 | run: | 118 | # Parse out what we just built and upload it to the Github Release™ 119 | echo "paths<> "$GITHUB_OUTPUT" 120 | jq --raw-output ".artifacts[]?.path | select( . != null )" dist-manifest.json >> "$GITHUB_OUTPUT" 121 | echo "EOF" >> "$GITHUB_OUTPUT" 122 | 123 | cp dist-manifest.json "$BUILD_MANIFEST_NAME" 124 | - name: "Upload artifacts" 125 | uses: actions/upload-artifact@v3 126 | with: 127 | name: artifacts 128 | path: | 129 | ${{ steps.cargo-dist.outputs.paths }} 130 | ${{ env.BUILD_MANIFEST_NAME }} 131 | 132 | should-publish: 133 | needs: 134 | - plan 135 | - upload-local-artifacts 136 | if: ${{ needs.plan.outputs.publishing == 'true' }} 137 | runs-on: ubuntu-latest 138 | steps: 139 | - name: print tag 140 | run: echo "ok we're publishing!" 141 | 142 | # Create a Github Release with all the results once everything is done 143 | publish-release: 144 | needs: [plan, should-publish] 145 | runs-on: ubuntu-latest 146 | env: 147 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 148 | steps: 149 | - uses: actions/checkout@v4 150 | with: 151 | submodules: recursive 152 | - name: "Download artifacts" 153 | uses: actions/download-artifact@v3 154 | with: 155 | name: artifacts 156 | path: artifacts 157 | - name: Cleanup 158 | run: | 159 | # Remove the granular manifests 160 | rm artifacts/*-dist-manifest.json 161 | - name: Create Release 162 | uses: ncipollo/release-action@v1 163 | with: 164 | tag: ${{ needs.plan.outputs.tag }} 165 | name: ${{ fromJson(needs.plan.outputs.val).announcement_title }} 166 | body: ${{ fromJson(needs.plan.outputs.val).announcement_github_body }} 167 | prerelease: ${{ fromJson(needs.plan.outputs.val).announcement_is_prerelease }} 168 | artifacts: "artifacts/*" 169 | -------------------------------------------------------------------------------- /nomen_core/src/kind.rs: -------------------------------------------------------------------------------- 1 | use std::{fmt::Display, str::FromStr}; 2 | 3 | use nostr_sdk::{EventBuilder, UnsignedEvent}; 4 | use secp256k1::{schnorr::Signature, XOnlyPublicKey}; 5 | 6 | use crate::Name; 7 | 8 | use super::{CreateBuilder, Hash160, Nsid, NsidBuilder, TransferBuilder}; 9 | 10 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 11 | pub enum NomenKind { 12 | Create, 13 | Transfer, 14 | } 15 | 16 | impl From for u8 { 17 | fn from(value: NomenKind) -> Self { 18 | match value { 19 | NomenKind::Create => 0x00, 20 | NomenKind::Transfer => 0x01, 21 | } 22 | } 23 | } 24 | 25 | impl Display for NomenKind { 26 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 27 | let s = match self { 28 | NomenKind::Create => "create", 29 | NomenKind::Transfer => "transfer", 30 | }; 31 | write!(f, "{s}") 32 | } 33 | } 34 | 35 | impl FromStr for NomenKind { 36 | type Err = super::UtilError; 37 | 38 | fn from_str(s: &str) -> Result { 39 | match s { 40 | "create" => Ok(NomenKind::Create), 41 | "transfer" => Ok(NomenKind::Transfer), 42 | _ => Err(super::UtilError::NomenKind(s.to_string())), 43 | } 44 | } 45 | } 46 | 47 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 48 | pub struct CreateV0 { 49 | pub fingerprint: [u8; 5], 50 | pub nsid: Nsid, 51 | } 52 | 53 | impl CreateV0 { 54 | pub fn create(fingerprint: [u8; 5], nsid: Nsid) -> CreateV0 { 55 | CreateV0 { fingerprint, nsid } 56 | } 57 | 58 | pub fn parse_create(value: &[u8]) -> Result { 59 | Ok(CreateV0::create( 60 | value[..5].try_into()?, 61 | value[5..].try_into()?, 62 | )) 63 | } 64 | 65 | pub fn serialize(&self) -> Vec { 66 | b"NOM\x00\x00" 67 | .iter() 68 | .chain(self.fingerprint.iter()) 69 | .chain(self.nsid.iter()) 70 | .copied() 71 | .collect() 72 | } 73 | } 74 | 75 | impl TryFrom<&[u8]> for CreateV0 { 76 | type Error = super::UtilError; 77 | 78 | fn try_from(value: &[u8]) -> Result { 79 | if !value.starts_with(b"NOM\x00") { 80 | return Err(super::UtilError::UnexpectedNomenTxType); 81 | } 82 | let value = &value[4..]; 83 | 84 | match value.first() { 85 | Some(0x00) => Ok(CreateV0::parse_create(&value[1..])?), 86 | _ => Err(super::UtilError::UnexpectedNomenTxType), 87 | } 88 | } 89 | } 90 | 91 | #[derive(Clone, Debug, PartialEq, Eq)] 92 | pub struct CreateV1 { 93 | pub pubkey: XOnlyPublicKey, 94 | pub name: String, 95 | } 96 | impl CreateV1 { 97 | pub fn create(pubkey: XOnlyPublicKey, name: &str) -> CreateV1 { 98 | CreateV1 { 99 | pubkey, 100 | name: name.to_owned(), 101 | } 102 | } 103 | 104 | pub fn fingerprint(&self) -> [u8; 5] { 105 | Hash160::default() 106 | .chain_update(self.name.as_bytes()) 107 | .fingerprint() 108 | } 109 | 110 | pub fn nsid(&self) -> Nsid { 111 | NsidBuilder::new(&self.name, &self.pubkey).finalize() 112 | } 113 | 114 | pub fn parse_create(value: &[u8]) -> Result { 115 | let name = String::from_utf8(value[32..].to_vec())?; 116 | let _ = Name::from_str(&name)?; 117 | Ok(CreateV1 { 118 | pubkey: XOnlyPublicKey::from_slice(&value[..32])?, 119 | name, 120 | }) 121 | } 122 | 123 | pub fn serialize(&self) -> Vec { 124 | b"NOM\x01\x00" 125 | .iter() 126 | .chain(self.pubkey.serialize().iter()) 127 | .chain(self.name.as_bytes().iter()) 128 | .copied() 129 | .collect() 130 | } 131 | } 132 | 133 | impl TryFrom<&[u8]> for CreateV1 { 134 | type Error = super::UtilError; 135 | 136 | fn try_from(value: &[u8]) -> Result { 137 | if !value.starts_with(b"NOM\x01") { 138 | return Err(super::UtilError::UnexpectedNomenTxType); 139 | } 140 | let value = &value[4..]; 141 | 142 | match value.first() { 143 | Some(0x00) => Ok(CreateV1::parse_create(&value[1..])?), 144 | _ => Err(super::UtilError::UnexpectedNomenTxType), 145 | } 146 | } 147 | } 148 | 149 | #[derive(Clone, Debug, PartialEq, Eq)] 150 | pub struct TransferV1 { 151 | pub pubkey: XOnlyPublicKey, 152 | pub name: String, 153 | } 154 | impl TransferV1 { 155 | pub fn create(pubkey: XOnlyPublicKey, name: &str) -> TransferV1 { 156 | TransferV1 { 157 | pubkey, 158 | name: name.to_owned(), 159 | } 160 | } 161 | 162 | pub fn parse_create(value: &[u8]) -> Result { 163 | let name = String::from_utf8(value[32..].to_vec())?; 164 | let _ = Name::from_str(&name)?; 165 | Ok(TransferV1 { 166 | pubkey: XOnlyPublicKey::from_slice(&value[..32])?, 167 | name, 168 | }) 169 | } 170 | 171 | pub fn fingerprint(&self) -> [u8; 5] { 172 | Hash160::default() 173 | .chain_update(self.name.as_bytes()) 174 | .fingerprint() 175 | } 176 | 177 | pub fn nsid(&self) -> Nsid { 178 | NsidBuilder::new(&self.name, &self.pubkey).finalize() 179 | } 180 | 181 | pub fn serialize(&self) -> Vec { 182 | b"NOM\x01\x01" 183 | .iter() 184 | .chain(self.pubkey.serialize().iter()) 185 | .chain(self.name.as_bytes().iter()) 186 | .copied() 187 | .collect() 188 | } 189 | } 190 | 191 | impl TryFrom<&[u8]> for TransferV1 { 192 | type Error = super::UtilError; 193 | 194 | fn try_from(value: &[u8]) -> Result { 195 | if !value.starts_with(b"NOM\x01") { 196 | return Err(super::UtilError::UnexpectedNomenTxType); 197 | } 198 | let value = &value[4..]; 199 | 200 | match value.first() { 201 | Some(0x01) => Ok(TransferV1::parse_create(&value[1..])?), 202 | _ => Err(super::UtilError::UnexpectedNomenTxType), 203 | } 204 | } 205 | } 206 | 207 | #[derive(Clone, Debug, PartialEq, Eq)] 208 | pub struct SignatureV1 { 209 | pub signature: Signature, 210 | } 211 | impl SignatureV1 { 212 | pub fn new(signature: &Signature) -> SignatureV1 { 213 | SignatureV1 { 214 | signature: *signature, 215 | } 216 | } 217 | 218 | pub fn parse_signature(value: &[u8]) -> Result { 219 | Ok(SignatureV1 { 220 | signature: Signature::from_slice(value)?, 221 | }) 222 | } 223 | 224 | pub fn serialize(&self) -> Vec { 225 | b"NOM\x01\x02" 226 | .iter() 227 | .chain(self.signature.as_ref().iter()) 228 | .copied() 229 | .collect() 230 | } 231 | } 232 | 233 | impl TryFrom<&[u8]> for SignatureV1 { 234 | type Error = super::UtilError; 235 | 236 | fn try_from(value: &[u8]) -> Result { 237 | if !value.starts_with(b"NOM\x01") { 238 | return Err(super::UtilError::UnexpectedNomenTxType); 239 | } 240 | let value = &value[4..]; 241 | 242 | match value.first() { 243 | Some(0x02) => Ok(SignatureV1::parse_signature(&value[1..])?), 244 | _ => Err(super::UtilError::UnexpectedNomenTxType), 245 | } 246 | } 247 | } 248 | 249 | #[cfg(test)] 250 | mod tests { 251 | use std::str::FromStr; 252 | 253 | use itertools::Itertools; 254 | 255 | use super::*; 256 | 257 | #[test] 258 | fn test_parse_create_serialize_v0() { 259 | let or = 260 | hex::decode("4e4f4d0000e5401df4b4273968a1e7be2ef0acbcae6f61d53e73101e2983").unwrap(); 261 | let c = CreateV0::try_from(or.as_ref()); 262 | assert!(c.is_ok()); 263 | assert_eq!(c.unwrap().serialize(), or); 264 | } 265 | 266 | #[test] 267 | fn test_invalid_create_v0() { 268 | let or = 269 | hex::decode("4e4f4d0001e5401df4b4273968a1e7be2ef0acbcae6f61d53e73101e2983").unwrap(); 270 | let c = CreateV0::try_from(or.as_ref()); 271 | assert!(c.is_err()); 272 | } 273 | 274 | #[test] 275 | fn test_parse_create_serialize_v1() { 276 | let pk = XOnlyPublicKey::from_str( 277 | "60de6fbc4a78209942c62706d904ff9592c2e856f219793f7f73e62fc33bfc18", 278 | ) 279 | .unwrap(); 280 | let or = hex::decode("4e4f4d010060de6fbc4a78209942c62706d904ff9592c2e856f219793f7f73e62fc33bfc1868656c6c6f2d776f726c64").unwrap(); 281 | let c = CreateV1::try_from(or.as_ref()); 282 | assert!(c.is_ok()); 283 | assert_eq!(c.unwrap().serialize(), or); 284 | } 285 | 286 | #[test] 287 | fn test_invalid_create_v1() { 288 | let or = hex::decode( 289 | "4e4f4d010060de6fbc4a78209942c62706d904ff9592c2e856f219793f7f73e62fc33bfc186c64", 290 | ) 291 | .unwrap(); 292 | let c = CreateV1::try_from(or.as_ref()); 293 | assert!(c.is_err()); 294 | } 295 | 296 | #[test] 297 | fn test_parse_transfer_serialize_v1() { 298 | let or = hex::decode("4e4f4d010174301b9c5d30b764bca8d3eb4febb06862f558d292fde93b4a290d90850bac9168656c6c6f2d776f726c64").unwrap(); 299 | let t = TransferV1::try_from(or.as_ref()); 300 | assert!(t.is_ok()); 301 | assert_eq!(t.unwrap().serialize(), or); 302 | } 303 | 304 | #[test] 305 | fn test_parse_signatuyre_serialize_v1() { 306 | let or = hex::decode("4e4f4d0102489e4e3ab29408da53733473156040a25e5a84cbca788c2b7143f971ead84192ae8bd8e4890cfabb08dca693875c28a1949ae0d13f5c6b08617e4fdc022bc751").unwrap(); 307 | let t = SignatureV1::try_from(or.as_ref()); 308 | assert!(t.is_ok()); 309 | assert_eq!(t.unwrap().serialize(), or); 310 | } 311 | 312 | #[test] 313 | fn test_invalid_version() { 314 | let wrong_ver = b"NOM\x01\x00"; 315 | assert!(CreateV0::try_from(wrong_ver.as_ref()).is_err()) 316 | } 317 | 318 | #[test] 319 | fn test_invalid_tx_type() { 320 | let wrong_ver = b"NOZ\x00\x00"; 321 | assert!(CreateV0::try_from(wrong_ver.as_ref()).is_err()) 322 | } 323 | 324 | #[test] 325 | fn test_invalid_tx_kind() { 326 | let wrong_ver = b"NOM\x00\x10"; 327 | assert!(CreateV0::try_from(wrong_ver.as_ref()).is_err()) 328 | } 329 | } 330 | -------------------------------------------------------------------------------- /nomen/src/subcommands/server/explorer.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, str::FromStr}; 2 | 3 | use anyhow::anyhow; 4 | use axum::{ 5 | extract::{Path, Query, State}, 6 | Form, 7 | }; 8 | use axum_extra::extract::WithRejection; 9 | use bitcoin::psbt::Psbt; 10 | use itertools::Itertools; 11 | use nomen_core::{CreateBuilder, Name}; 12 | use serde::Deserialize; 13 | 14 | use crate::{ 15 | db::{self, name::NameDetails}, 16 | subcommands::util::{extend_psbt, name_event}, 17 | util::{format_time, KeyVal, Npub}, 18 | }; 19 | 20 | use super::{AppState, WebError}; 21 | 22 | #[derive(askama::Template)] 23 | #[template(path = "error.html")] 24 | pub struct ErrorTemplate { 25 | pub message: String, 26 | } 27 | 28 | #[derive(askama::Template)] 29 | #[template(path = "index.html")] 30 | pub struct IndexTemplate {} 31 | 32 | #[allow(clippy::unused_async)] 33 | pub async fn index() -> IndexTemplate { 34 | IndexTemplate {} 35 | } 36 | 37 | #[allow(clippy::module_name_repetitions)] 38 | #[derive(Deserialize)] 39 | pub struct ExplorerQuery { 40 | pub q: Option, 41 | } 42 | 43 | #[allow(clippy::module_name_repetitions)] 44 | #[derive(askama::Template)] 45 | #[template(path = "explorer.html")] 46 | pub struct ExplorerTemplate { 47 | q: String, 48 | names: Vec<(String, String)>, 49 | last_index_time: String, 50 | } 51 | 52 | pub async fn explorer( 53 | State(state): State, 54 | Query(query): Query, 55 | ) -> Result { 56 | let conn = state.pool; 57 | let last_index_time = db::event_log::last_index_time(&conn).await?; 58 | let last_index_time = format_time(last_index_time)?; 59 | let q = query.q.map(|s| s.trim().to_string()); 60 | 61 | Ok(ExplorerTemplate { 62 | q: q.clone().unwrap_or_default(), 63 | names: db::name::top_level_names(&conn, q).await?, 64 | last_index_time, 65 | }) 66 | } 67 | 68 | #[derive(askama::Template)] 69 | #[template(path = "name.html")] 70 | pub struct NameTemplate { 71 | name: String, 72 | record_keys: Vec, 73 | records: HashMap, 74 | blockhash: String, 75 | blocktime: String, 76 | txid: String, 77 | vout: i64, 78 | height: i64, 79 | pubkey: String, 80 | protocol: i64, 81 | v1_upgrade_blockheight: Option, 82 | v1_upgrade_txid: Option, 83 | } 84 | 85 | impl TryFrom for NameTemplate { 86 | type Error = anyhow::Error; 87 | 88 | fn try_from(value: NameDetails) -> Result { 89 | let records: HashMap = serde_json::from_str(&value.records)?; 90 | let mut record_keys = records.keys().cloned().collect_vec(); 91 | record_keys.sort(); 92 | let blocktime = format_time(value.blocktime)?; 93 | 94 | Ok(NameTemplate { 95 | name: value.name, 96 | record_keys, 97 | records, 98 | blockhash: value.blockhash, 99 | blocktime, 100 | txid: value.txid, 101 | vout: value.vout, 102 | height: value.blockheight, 103 | pubkey: value.pubkey, 104 | protocol: value.protocol, 105 | v1_upgrade_blockheight: value.v1_upgrade_blockheight, 106 | v1_upgrade_txid: value.v1_upgrade_txid, 107 | }) 108 | } 109 | } 110 | 111 | pub async fn show_name( 112 | State(state): State, 113 | Path(nsid): Path, 114 | ) -> Result { 115 | let conn = state.pool; 116 | let details = db::name::details(&conn, &nsid).await?; 117 | 118 | Ok(details.try_into()?) 119 | } 120 | 121 | #[derive(askama::Template, Default)] 122 | #[template(path = "newname.html")] 123 | pub struct NewNameTemplate { 124 | upgrade: bool, 125 | data: String, 126 | name: String, 127 | pubkey: String, 128 | confirmations: usize, 129 | is_psbt: bool, 130 | } 131 | 132 | #[derive(Deserialize)] 133 | pub struct NewNameForm { 134 | upgrade: bool, 135 | name: String, 136 | pubkey: Npub, 137 | psbt: String, 138 | } 139 | 140 | #[derive(Deserialize)] 141 | pub struct NewNameQuery { 142 | upgrade: Option, 143 | } 144 | 145 | #[allow(clippy::unused_async)] 146 | pub async fn new_name_form( 147 | State(state): State, 148 | WithRejection(Query(query), _): WithRejection, WebError>, 149 | ) -> Result { 150 | Ok(NewNameTemplate { 151 | confirmations: state.config.confirmations(), 152 | upgrade: query.upgrade.unwrap_or_default(), 153 | ..Default::default() 154 | }) 155 | } 156 | 157 | #[allow(clippy::unused_async)] 158 | pub async fn new_name_submit( 159 | State(state): State, 160 | WithRejection(Form(form), _): WithRejection, WebError>, 161 | ) -> Result { 162 | let _name = Name::from_str(&form.name).map_err(|_| anyhow!("Invalid name"))?; 163 | 164 | // If we're upgrading an existing name, we don't actually want to error if the name exists. 165 | let available = if form.upgrade { 166 | true 167 | } else { 168 | db::name::check_availability(&state.pool, form.name.as_ref()).await? 169 | }; 170 | if !available { 171 | Err(anyhow!("Name unavailable"))?; 172 | } 173 | let (is_psbt, data) = if form.psbt.is_empty() { 174 | let d = CreateBuilder::new(form.pubkey.as_ref(), &form.name).v1_op_return(); 175 | (false, hex::encode(d)) 176 | } else { 177 | let mut psbt: Psbt = form.psbt.parse()?; 178 | extend_psbt(&mut psbt, &form.name, form.pubkey.as_ref()); 179 | (true, psbt.to_string()) 180 | }; 181 | Ok(NewNameTemplate { 182 | upgrade: form.upgrade, 183 | data, 184 | name: form.name, 185 | pubkey: form.pubkey.to_string(), 186 | confirmations: state.config.confirmations(), 187 | is_psbt, 188 | }) 189 | } 190 | 191 | #[derive(askama::Template)] 192 | #[template(path = "updaterecords.html")] 193 | pub struct NewRecordsTemplate { 194 | name: String, 195 | pubkey: String, 196 | unsigned_event: String, 197 | relays: Vec, 198 | records: String, 199 | } 200 | 201 | #[derive(Deserialize)] 202 | pub struct NewRecordsQuery { 203 | name: Option, 204 | pubkey: Option, 205 | } 206 | 207 | pub async fn new_records_form( 208 | State(state): State, 209 | Query(query): Query, 210 | ) -> Result { 211 | let records = records_from_query(&query, &state).await?; 212 | Ok(NewRecordsTemplate { 213 | name: query.name.unwrap_or_default(), 214 | pubkey: query.pubkey.map(|s| s.to_string()).unwrap_or_default(), 215 | unsigned_event: String::default(), 216 | relays: state.config.relays(), 217 | records, 218 | }) 219 | } 220 | 221 | async fn records_from_query(query: &NewRecordsQuery, state: &AppState) -> Result { 222 | let records = match &query.name { 223 | Some(name) => { 224 | let (records,) = sqlx::query_as::<_, (String,)>( 225 | "SELECT records FROM valid_names_records_vw WHERE name = ?;", 226 | ) 227 | .bind(name) 228 | .fetch_optional(&state.pool) 229 | .await? 230 | .unwrap_or_else(|| (String::from(r#"{"KEY":"value"}"#),)); 231 | let records: HashMap = serde_json::from_str(&records)?; 232 | records 233 | .iter() 234 | .map(|(k, v)| format!("{k}={v}")) 235 | .collect_vec() 236 | .join("\n") 237 | } 238 | None => "KEY=value".into(), 239 | }; 240 | Ok(records) 241 | } 242 | 243 | #[derive(Deserialize, Debug)] 244 | pub struct NewRecordsForm { 245 | records: String, 246 | name: String, 247 | pubkey: Npub, 248 | } 249 | 250 | #[allow(clippy::unused_async)] 251 | pub async fn new_records_submit( 252 | State(state): State, 253 | Form(form): Form, 254 | ) -> Result { 255 | let records = form 256 | .records 257 | .lines() 258 | .map(str::parse) 259 | .collect::, _>>()? 260 | .iter() 261 | .map(|kv| kv.clone().pair()) 262 | .collect::>(); 263 | let event = name_event(*form.pubkey.as_ref(), &records, &form.name)?; 264 | let unsigned_event = serde_json::to_string_pretty(&event)?; 265 | Ok(NewRecordsTemplate { 266 | name: form.name.to_string(), 267 | pubkey: form.pubkey.to_string(), 268 | unsigned_event, 269 | relays: state.config.relays(), 270 | records: "KEY=value".into(), 271 | }) 272 | } 273 | 274 | #[derive(askama::Template)] 275 | #[template(path = "stats.html")] 276 | pub struct IndexerInfo { 277 | version: &'static str, 278 | commit: &'static str, 279 | build_date: &'static str, 280 | known_names: i64, 281 | index_height: i64, 282 | nostr_events: i64, 283 | } 284 | 285 | pub async fn index_stats(State(state): State) -> Result { 286 | Ok(IndexerInfo { 287 | version: env!("CARGO_PKG_VERSION"), 288 | commit: env!("VERGEN_GIT_DESCRIBE"), 289 | build_date: env!("VERGEN_BUILD_TIMESTAMP"), 290 | known_names: db::stats::known_names(&state.pool).await?, 291 | index_height: db::stats::index_height(&state.pool).await?, 292 | nostr_events: db::stats::nostr_events(&state.pool).await?, 293 | }) 294 | } 295 | 296 | pub mod transfer { 297 | use axum::{extract::State, Form}; 298 | use nomen_core::{SignatureV1, TransferBuilder, TransferV1}; 299 | use secp256k1::schnorr::Signature; 300 | use serde::Deserialize; 301 | 302 | use crate::{ 303 | db, 304 | subcommands::{AppState, WebError}, 305 | util::Npub, 306 | }; 307 | 308 | #[derive(askama::Template)] 309 | #[template(path = "transfer/initiate.html")] 310 | pub struct InitiateTransferTemplate; 311 | 312 | #[derive(Deserialize)] 313 | pub struct InitiateTransferForm { 314 | name: String, 315 | pubkey: Npub, 316 | old_pubkey: Npub, 317 | } 318 | 319 | #[allow(clippy::unused_async)] 320 | pub async fn initiate() -> InitiateTransferTemplate { 321 | InitiateTransferTemplate 322 | } 323 | 324 | #[derive(askama::Template)] 325 | #[template(path = "transfer/sign.html")] 326 | pub struct SignEventTemplate { 327 | name: String, 328 | pubkey: Npub, 329 | old_pubkey: Npub, 330 | event: String, 331 | error: Option, 332 | } 333 | 334 | #[allow(clippy::unused_async)] 335 | pub async fn submit_initiate( 336 | State(state): State, 337 | Form(transfer): Form, 338 | ) -> Result { 339 | let error = set_initiate_error(state, &transfer).await; 340 | let te = TransferBuilder { 341 | new_pubkey: transfer.pubkey.as_ref(), 342 | name: &transfer.name, 343 | }; 344 | let event = te.unsigned_event(transfer.old_pubkey.as_ref()); 345 | Ok(SignEventTemplate { 346 | name: transfer.name, 347 | pubkey: transfer.pubkey, 348 | old_pubkey: transfer.old_pubkey, 349 | event: serde_json::to_string(&event)?, 350 | error, 351 | }) 352 | } 353 | 354 | async fn set_initiate_error( 355 | state: AppState, 356 | transfer: &InitiateTransferForm, 357 | ) -> Option { 358 | let mut error = None; 359 | let name_detail = db::name::details(&state.pool, &transfer.name).await; 360 | match name_detail { 361 | Ok(detail) => { 362 | if detail.protocol == 0 { 363 | error = Some( 364 | "This name uses the v0 protocol. Please upgrade the name to v1 first.", 365 | ); 366 | } else if detail.pubkey != transfer.old_pubkey.to_string() { 367 | error = Some("The pubkeys do not match."); 368 | } else if detail.pubkey == transfer.old_pubkey.to_string() { 369 | error = Some("A name cannot be transferred to the same current owner."); 370 | } 371 | } 372 | Err(_) => { 373 | error = Some("This name does not exist. Non-existant name cannot be transferred."); 374 | } 375 | }; 376 | error.map(Into::into) 377 | } 378 | 379 | #[derive(Deserialize)] 380 | pub struct FinalTransferForm { 381 | name: String, 382 | pubkey: Npub, 383 | sig: Signature, 384 | } 385 | 386 | #[derive(askama::Template)] 387 | #[template(path = "transfer/complete.html")] 388 | pub struct CompleteTransferTemplate { 389 | data1: String, 390 | data2: String, 391 | } 392 | 393 | #[allow(clippy::unused_async)] 394 | pub async fn complete( 395 | State(_state): State, 396 | Form(transfer): Form, 397 | ) -> Result { 398 | let data1 = TransferV1 { 399 | pubkey: *transfer.pubkey.as_ref(), 400 | name: transfer.name.clone(), 401 | } 402 | .serialize(); 403 | 404 | let data2 = SignatureV1::new(&transfer.sig).serialize(); 405 | 406 | Ok(CompleteTransferTemplate { 407 | data1: hex::encode(data1), 408 | data2: hex::encode(data2), 409 | }) 410 | } 411 | } 412 | 413 | pub mod well_known { 414 | use axum::{extract::State, Json}; 415 | use nostr_sdk::Keys; 416 | 417 | use crate::subcommands::{AppState, WebError}; 418 | 419 | #[allow(clippy::unused_async)] 420 | pub async fn nomen( 421 | State(state): State, 422 | ) -> anyhow::Result, WebError> { 423 | let sk = state.config.secret_key().ok_or(anyhow::anyhow!( 424 | "Config: secret key required for .well-known" 425 | ))?; 426 | let pk = Keys::new(*sk.as_ref()).public_key(); 427 | let result = serde_json::json!({ 428 | "indexer": { 429 | "pubkey": pk.to_string(), 430 | "relays": state.config.relays() 431 | } 432 | }); 433 | 434 | Ok(Json(result)) 435 | } 436 | } 437 | -------------------------------------------------------------------------------- /nomen/src/subcommands/index/blockchain.rs: -------------------------------------------------------------------------------- 1 | use bitcoin::BlockHash; 2 | use bitcoincore_rpc::{Client, RpcApi}; 3 | use futures::TryStreamExt; 4 | use nomen_core::{CreateV0, CreateV1, NsidBuilder, SignatureV1, TransferBuilder, TransferV1}; 5 | use secp256k1::{schnorr::Signature, XOnlyPublicKey}; 6 | use sqlx::SqlitePool; 7 | 8 | use crate::{ 9 | config::Config, 10 | db::{self, index::BlockchainIndex, raw::RawBlockchain}, 11 | }; 12 | 13 | enum QueueMessage { 14 | RawBlockchain(RawBlockchain), 15 | Index { 16 | blockheight: i64, 17 | blockhash: BlockHash, 18 | }, 19 | } 20 | 21 | pub async fn index(config: &Config, pool: &sqlx::Pool) -> Result<(), anyhow::Error> { 22 | // Check if the index is on a stale chain, and rewind the index if necessary 23 | rewind_invalid_chain(config.rpc_client()?, pool.clone()).await?; 24 | 25 | let client = config.rpc_client()?; 26 | let index_height = db::index::next_index_height(pool) 27 | .await? 28 | .max(config.starting_block_height()); 29 | let (sender, receiver) = tokio::sync::mpsc::channel(1); 30 | 31 | tracing::info!("Scanning new blocks for indexable NOM outputs at height {index_height}"); 32 | let min_confirmations = config.confirmations(); 33 | 34 | // Spawn a thread to query the Bitcoin node for new block data. Messages are sent to the queue. 35 | let _thread = spawn_index_thread(client, index_height, sender, min_confirmations); 36 | 37 | // Process the messages from the queue. This will push new NOM OP_RETURNs into the raw_blockchain table. 38 | process_messages(receiver, pool).await?; 39 | 40 | // Update the blockchain index by looping through raw_blockchain table and pocessing the saved outputs. 41 | update_blockchain_index(config, pool).await?; 42 | 43 | // Expire unused transfer cache 44 | expire_transfer_cache(pool).await?; 45 | 46 | tracing::info!("Blockchain index complete."); 47 | Ok(()) 48 | } 49 | 50 | async fn process_messages( 51 | mut receiver: tokio::sync::mpsc::Receiver, 52 | pool: &sqlx::Pool, 53 | ) -> anyhow::Result<()> { 54 | let guard = elegant_departure::get_shutdown_guard(); 55 | 'select: loop { 56 | tokio::select! { 57 | msg = receiver.recv() => { 58 | match msg { 59 | Some(QueueMessage::RawBlockchain(raw_blockchain)) => { 60 | if let Err(e) = db::raw::insert_raw_blockchain(pool, &raw_blockchain) 61 | .await 62 | { 63 | tracing::error!("Index error: {e}"); 64 | } 65 | db::index::insert_height(pool, raw_blockchain.blockheight as i64, &raw_blockchain.blockhash).await?; 66 | } 67 | Some(QueueMessage::Index {blockheight, blockhash}) => { 68 | db::index::insert_height(pool, blockheight, &blockhash).await?; 69 | }, 70 | None => break 'select, 71 | } 72 | } 73 | _ = guard.wait() => { 74 | receiver.close(); 75 | break 'select; 76 | } 77 | } 78 | } 79 | Ok(()) 80 | } 81 | 82 | fn spawn_index_thread( 83 | client: Client, 84 | index_height: usize, 85 | sender: tokio::sync::mpsc::Sender, 86 | min_confirmations: usize, 87 | ) -> tokio::task::JoinHandle> { 88 | tokio::task::spawn_blocking(move || -> anyhow::Result<_> { 89 | let mut blockhash = client.get_block_hash(index_height as u64)?; 90 | let mut blockinfo = client.get_block_header_info(&blockhash)?; 91 | 92 | loop { 93 | // If the channel is closed, let's stop 94 | if sender.is_closed() { 95 | tracing::info!("Stopping index operation."); 96 | break; 97 | } 98 | 99 | if (blockinfo.confirmations as usize) < min_confirmations { 100 | tracing::info!( 101 | "Minimum confirmations not met at block height {}.", 102 | blockinfo.height 103 | ); 104 | break; 105 | } 106 | 107 | if blockinfo.height % 10 == 0 { 108 | tracing::info!("Index block height {}", blockinfo.height); 109 | } 110 | 111 | let block = client.get_block(&blockhash)?; 112 | 113 | for (txheight, tx) in block.txdata.iter().enumerate() { 114 | for (vout, output) in tx.output.iter().enumerate() { 115 | if output.script_pubkey.is_op_return() && output.script_pubkey.len() >= 3 { 116 | let b = &output.script_pubkey.as_bytes()[2..]; 117 | 118 | // Pre-check if it starts with NOM, so we can filter out some unnecessary errors from the logs 119 | if b.starts_with(b"NOM") { 120 | let raw_blockchain = RawBlockchain { 121 | blockhash, 122 | txid: tx.txid(), 123 | blocktime: blockinfo.time, 124 | blockheight: blockinfo.height, 125 | txheight, 126 | vout, 127 | data: b.to_vec(), 128 | }; 129 | sender 130 | .blocking_send(QueueMessage::RawBlockchain(raw_blockchain)) 131 | .ok(); 132 | } else { 133 | sender 134 | .blocking_send(QueueMessage::Index { 135 | blockheight: blockinfo.height as i64, 136 | blockhash, 137 | }) 138 | .ok(); 139 | } 140 | } else { 141 | sender 142 | .blocking_send(QueueMessage::Index { 143 | blockheight: blockinfo.height as i64, 144 | blockhash, 145 | }) 146 | .ok(); 147 | } 148 | } 149 | } 150 | match blockinfo.next_block_hash { 151 | Some(next_hash) => { 152 | blockhash = next_hash; 153 | blockinfo = client.get_block_header_info(&blockhash)?; 154 | } 155 | None => break, 156 | } 157 | } 158 | 159 | Ok(()) 160 | }) 161 | } 162 | 163 | pub async fn update_blockchain_index( 164 | _config: &Config, 165 | pool: &sqlx::Pool, 166 | ) -> Result<(), anyhow::Error> { 167 | let rows = sqlx::query_as::<_, RawBlockchain>("SELECT * FROM raw_blockchain rb WHERE rb.blockheight > (SELECT coalesce(max(blockheight), 0) FROM index_blockheights_vw);").fetch_all(pool).await?; 168 | for row in rows { 169 | if let Ok(create) = CreateV0::try_from(row.data.as_ref()) { 170 | let i = BlockchainIndex { 171 | protocol: 0, 172 | fingerprint: create.fingerprint, 173 | nsid: create.nsid, 174 | name: None, 175 | pubkey: None, 176 | blockhash: row.blockhash, 177 | txid: row.txid, 178 | blocktime: row.blocktime, 179 | blockheight: row.blockheight, 180 | txheight: row.txheight, 181 | vout: row.vout, 182 | }; 183 | index_output(pool, i).await?; 184 | } else if let Ok(create) = CreateV1::try_from(row.data.as_ref()) { 185 | let i = BlockchainIndex { 186 | protocol: 1, 187 | fingerprint: create.fingerprint(), 188 | nsid: create.nsid(), 189 | name: Some(create.name), 190 | pubkey: Some(create.pubkey), 191 | blockhash: row.blockhash, 192 | txid: row.txid, 193 | blocktime: row.blocktime, 194 | blockheight: row.blockheight, 195 | txheight: row.txheight, 196 | vout: row.vout, 197 | }; 198 | index_output(pool, i).await?; 199 | } else if let Ok(transfer) = TransferV1::try_from(row.data.as_ref()) { 200 | tracing::info!("Caching transfer for {}", transfer.name); 201 | let i = BlockchainIndex { 202 | protocol: 1, 203 | fingerprint: transfer.fingerprint(), 204 | nsid: transfer.nsid(), 205 | name: Some(transfer.name), 206 | pubkey: Some(transfer.pubkey), 207 | blockhash: row.blockhash, 208 | txid: row.txid, 209 | blocktime: row.blocktime, 210 | blockheight: row.blockheight, 211 | txheight: row.txheight, 212 | vout: row.vout, 213 | }; 214 | cache_transfer(pool, i).await?; 215 | } else if let Ok(signature) = SignatureV1::try_from(row.data.as_ref()) { 216 | tracing::info!("Signature found"); 217 | check_signature(pool, signature.signature).await?; 218 | } else { 219 | tracing::error!("Index error"); 220 | } 221 | } 222 | Ok(()) 223 | } 224 | 225 | async fn check_signature( 226 | conn: &sqlx::Pool, 227 | signature: Signature, 228 | ) -> anyhow::Result<()> { 229 | let mut data = sqlx::query_as::<_, (String, String, String, i64)>( 230 | "SELECT tc.name, tc.pubkey AS new_owner, n.pubkey, tc.id AS old_owner 231 | FROM transfer_cache tc 232 | JOIN valid_names_vw n ON tc.fingerprint = n.fingerprint AND tc.name = n.name", 233 | ) 234 | .fetch(conn); 235 | 236 | while let Some(row) = data.try_next().await? { 237 | let name = row.0; 238 | let new_owner = { 239 | let h = hex::decode(row.1.as_bytes())?; 240 | XOnlyPublicKey::from_slice(&h)? 241 | }; 242 | let old_owner = { 243 | let h = hex::decode(row.2.as_bytes())?; 244 | XOnlyPublicKey::from_slice(&h)? 245 | }; 246 | let tb = TransferBuilder { 247 | new_pubkey: &new_owner, 248 | name: name.as_str(), 249 | }; 250 | let unsigned_event = tb.unsigned_event(&old_owner); 251 | if unsigned_event.add_signature(signature).is_ok() { 252 | tracing::info!( 253 | "Valid signature found for {name}, updating owner to {}!", 254 | hex::encode(new_owner.serialize()) 255 | ); 256 | let nsid = NsidBuilder::new(name.as_str(), &new_owner).finalize(); 257 | db::index::update_for_transfer(conn, nsid, new_owner, old_owner, name).await?; 258 | 259 | tracing::info!("Deleting record from transfer_cache"); 260 | db::index::delete_from_transfer_cache(conn, row.3).await?; 261 | 262 | break; 263 | } 264 | } 265 | 266 | Ok(()) 267 | } 268 | 269 | async fn index_output(conn: &SqlitePool, index: BlockchainIndex) -> anyhow::Result<()> { 270 | tracing::info!( 271 | "NOM output found: {}, name: {:?}, protocol: {}", 272 | index.nsid, 273 | index.name, 274 | index.protocol 275 | ); 276 | 277 | // If we can verify that the v1 create is a valid v0 name that already exists, we can upgrade the v0 to the v1 automatically. 278 | if index.protocol == 1 { 279 | if let Some(name) = &index.name { 280 | if let Some(pubkey) = &index.pubkey { 281 | tracing::info!("Checking for upgrade"); 282 | match db::index::upgrade_v0_to_v1( 283 | conn, 284 | name, 285 | *pubkey, 286 | index.blockheight, 287 | index.txid, 288 | ) 289 | .await? 290 | { 291 | db::index::UpgradeStatus::Upgraded => { 292 | tracing::info!("Name '{name}' upgraded from v0 to v1."); 293 | } 294 | db::index::UpgradeStatus::NotUpgraded => { 295 | tracing::info!("No upgrade found!"); 296 | db::index::insert_blockchain_index(conn, &index).await?; 297 | } 298 | } 299 | } 300 | db::relay_index::queue(conn, name).await?; 301 | } 302 | } else { 303 | db::index::insert_blockchain_index(conn, &index).await?; 304 | } 305 | 306 | Ok(()) 307 | } 308 | 309 | async fn cache_transfer( 310 | conn: &sqlx::Pool, 311 | index: BlockchainIndex, 312 | ) -> anyhow::Result<()> { 313 | db::index::insert_transfer_cache(conn, &index).await?; 314 | Ok(()) 315 | } 316 | 317 | async fn rewind_invalid_chain(client: Client, pool: SqlitePool) -> anyhow::Result<()> { 318 | // Get the latest indexed blockhash and blockheight 319 | let result = sqlx::query_as::<_, (i32, String)>( 320 | "SELECT blockheight, blockhash FROM index_height ORDER BY blockheight DESC LIMIT 1;", 321 | ) 322 | .fetch_optional(&pool) 323 | .await?; 324 | 325 | // No transactions indexed yet, skip the rest 326 | if result.is_none() { 327 | return Ok(()); 328 | } 329 | 330 | let (_, blockhash) = result.unwrap(); 331 | 332 | // Loop backwards from recently indexed block, continuing to the previous block, until we find the most recent ancestor which is not stale 333 | let stale_block = 334 | tokio::task::spawn_blocking(move || -> Result, anyhow::Error> { 335 | let mut next_block = Some(blockhash.parse()?); 336 | let mut stale_block = None; 337 | 338 | while let Some(next_blockhash) = next_block { 339 | let blockinfo = client.get_block_info(&next_blockhash)?; 340 | if blockinfo.confirmations >= 0 { 341 | next_block = None; 342 | } else { 343 | tracing::info!( 344 | "Stale block {} detected at height {}", 345 | blockinfo.hash, 346 | blockinfo.height 347 | ); 348 | stale_block = Some(blockinfo.height); 349 | next_block = blockinfo.previousblockhash; 350 | } 351 | } 352 | 353 | Ok(stale_block) 354 | }) 355 | .await??; 356 | 357 | // Delete entries from blockchain table 358 | if let Some(stale_block) = stale_block { 359 | tracing::info!("Reindexing beginning at height {stale_block}"); 360 | let mut tx = pool.begin().await?; 361 | sqlx::query("DELETE FROM raw_blockchain WHERE blockheight >= ?;") 362 | .bind(stale_block as i64) 363 | .execute(&mut tx) 364 | .await?; 365 | sqlx::query("DELETE FROM blockchain_index WHERE blockheight >= ?;") 366 | .bind(stale_block as i64) 367 | .execute(&mut tx) 368 | .await?; 369 | sqlx::query("DELETE FROM transfer_cache WHERE blockheight >= ?;") 370 | .bind(stale_block as i64) 371 | .execute(&mut tx) 372 | .await?; 373 | sqlx::query("DELETE FROM old_transfer_cache WHERE blockheight >= ?;") 374 | .bind(stale_block as i64) 375 | .execute(&mut tx) 376 | .await?; 377 | sqlx::query("DELETE FROM index_height WHERE blockheight >= ?;") 378 | .bind(stale_block as i64) 379 | .execute(&mut tx) 380 | .await?; 381 | tx.commit().await?; 382 | } 383 | 384 | Ok(()) 385 | } 386 | 387 | async fn expire_transfer_cache(pool: &sqlx::Pool) -> anyhow::Result<()> { 388 | tracing::info!("Starting transfer cache expiration."); 389 | let (index_height,) = sqlx::query_as::<_, (i64,)>("SELECT max(blockheight) FROM index_height;") 390 | .fetch_one(pool) 391 | .await?; 392 | sqlx::query("INSERT INTO old_transfer_cache SELECT * FROM transfer_cache WHERE blockheight < (? - 100);").bind(index_height).execute(pool).await?; 393 | sqlx::query("DELETE FROM transfer_cache WHERE blockheight < (? - 100);") 394 | .bind(index_height) 395 | .execute(pool) 396 | .await?; 397 | tracing::info!("Finished transfer cache expiration."); 398 | Ok(()) 399 | } 400 | --------------------------------------------------------------------------------