├── .gitignore ├── mizaru ├── src │ ├── lib.rs │ └── keypair.rs └── Cargo.toml ├── tundevice ├── .cargo │ └── config ├── build.rs ├── Cargo.toml └── src │ ├── linux.c │ └── lib.rs ├── Cargo.toml ├── geph4-aioutils ├── Cargo.toml └── src │ ├── dns.rs │ └── lib.rs └── geph4-binder-transport ├── Cargo.toml └── src ├── lib.rs ├── wiretypes.rs └── http.rs /.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | target/ 3 | -------------------------------------------------------------------------------- /mizaru/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod keypair; 2 | pub use keypair::*; 3 | -------------------------------------------------------------------------------- /tundevice/.cargo/config: -------------------------------------------------------------------------------- 1 | [target.x86_64-unknown-linux-gnu] 2 | runner = 'sudo -E' 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "tundevice", 5 | "mizaru", 6 | "geph4-binder-transport", 7 | "geph4-aioutils", 8 | ] -------------------------------------------------------------------------------- /tundevice/build.rs: -------------------------------------------------------------------------------- 1 | extern crate cc; 2 | 3 | use cc::Build; 4 | 5 | fn main() { 6 | Build::new() 7 | .file("src/linux.c") 8 | .warnings(true) 9 | .compile("tundevice"); 10 | } 11 | -------------------------------------------------------------------------------- /tundevice/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tundevice" 3 | version = "0.1.5-alpha.0" 4 | authors = ["nullchinchilla "] 5 | edition = "2018" 6 | description="tundevice helper for geph4" 7 | license="GPL-3.0-only" 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | blocking = "1.0.2" 13 | smol= "1.2.5" 14 | bytes= "1.0.0" 15 | flume = "0.10.1" 16 | log= "0.4.11" 17 | async-dup="1" 18 | libc = "0.2.103" 19 | 20 | [build-dependencies] 21 | cc = "1.0.66" 22 | -------------------------------------------------------------------------------- /geph4-aioutils/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "geph4-aioutils" 3 | version = "0.1.5" 4 | authors = ["miyuruasuka"] 5 | edition = "2018" 6 | description="utility crate for geph4" 7 | license="GPL-3.0-only" 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | smol= "1.2.5" 13 | serde= "1.0.118" 14 | anyhow= "1.0.37" 15 | bincode= "1.3.1" 16 | smol-timeout= "0.6.0" 17 | async-dup= "1.2.2" 18 | concurrent-queue="1" 19 | once_cell="1" 20 | cached="0.23" 21 | 22 | [target.'cfg(windows)'.dependencies] 23 | dnsclient= "0.1.9" 24 | 25 | log= "0.4.11" 26 | -------------------------------------------------------------------------------- /mizaru/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mizaru" 3 | version = "0.1.4-alpha.0" 4 | authors = ["nullchinchilla "] 5 | edition = "2018" 6 | description="mizaru authentication library for geph4" 7 | license="GPL-3.0-only" 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | rsa-fdh = "0.5.0" 13 | rsa = { version = "0.3.0", features = ["serde"] } 14 | rand = "0.7" 15 | rayon = "1.5.0" 16 | sha2 = "0.9.2" 17 | bincode = "1.3.1" 18 | serde = { version = "1.0.118", features = ["derive", "rc"] } 19 | hex = "0.4.2" 20 | im={ version = "15.0.0", features = ["serde", "rayon"] } 21 | -------------------------------------------------------------------------------- /tundevice/src/linux.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Since the rust ioctl bindings don't have all the structures and constants, 3 | * it's easier to just write the thing in C and link it in. 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | /** 16 | * fd ‒ the fd to turn into TUN or TAP. 17 | * name ‒ the name to use. If empty, kernel will assign something by itself. 18 | * Must be buffer with capacity at least 33. 19 | * mode ‒ 1 = TUN, 2 = TAP. 20 | * packet_info ‒ if packet info should be provided, if the given value is 0 it will not prepend packet info. 21 | */ 22 | int tun_setup(int fd, unsigned char *name) { 23 | struct ifreq ifr; 24 | memset(&ifr, 0, sizeof ifr); 25 | ifr.ifr_flags = IFF_TUN | IFF_NO_PI; 26 | 27 | 28 | // Leave one for terminating '\0'. No idea if it is needed, didn't find 29 | // it in the docs, but assuming the worst. 30 | strncpy(ifr.ifr_name, (char *)name, IFNAMSIZ - 1); 31 | 32 | int ioresult = ioctl(fd, TUNSETIFF, &ifr); 33 | if (ioresult < 0) { 34 | return ioresult; 35 | } 36 | strncpy((char *)name, ifr.ifr_name, IFNAMSIZ < 32 ? IFNAMSIZ : 32); 37 | name[32] = '\0'; 38 | return 0; 39 | } 40 | -------------------------------------------------------------------------------- /geph4-binder-transport/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "geph4-binder-transport" 3 | version = "0.2.3" 4 | authors = ["nullchinchilla "] 5 | edition = "2018" 6 | description="geph4 binder protocol definitions" 7 | license="GPL-3.0-only" 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [dependencies] 12 | rsa = { version = "0.3.0", features = ["serde"] } 13 | smol = "1.2.5" 14 | serde = { version = "1.0.118", features = ["derive"] } 15 | derivative = "2.1.3" 16 | x25519-dalek = { version = "1.1.0", features = ["serde"] } 17 | ed25519-dalek = { version = "1.0.1", features = ["serde"] } 18 | bincode = "1.3.1" 19 | blake3 = {version="1", default-features=false} 20 | chacha20poly1305 = "0.7.1" 21 | # surf={version="2.2.0", features=["h1-client-rustls"], default-features=false} 22 | async-h1 = "2.3.0" 23 | 24 | http-types = "2.9.0" 25 | log = "0.4.11" 26 | rand = "0.7" 27 | env_logger = "0.8.2" 28 | mizaru="0.1.3" 29 | geph4-aioutils="0.1.2" 30 | smolscale= "0.3.7" 31 | thiserror= "1.0.23" 32 | rsa-fdh = "0.5.0" 33 | async-trait= "0.1.42" 34 | smol-timeout= "0.6.0" 35 | rustls="0.19" 36 | webpki-roots="0.21" 37 | 38 | [dependencies.async-tls] 39 | version = "0.11.0" 40 | default-features = false 41 | features = ["client"] 42 | -------------------------------------------------------------------------------- /geph4-aioutils/src/dns.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | /// Resolves a string into a vector of SocketAddrs. 4 | #[cfg(target_os = "windows")] 5 | pub async fn resolve(host_port: &str) -> std::io::Result> { 6 | use dnsclient::{r#async::DNSClient, UpstreamServer}; 7 | log::warn!("using custom DNS implementation to resolve {}", host_port); 8 | let exploded: Vec<&str> = host_port.split(':').collect(); 9 | let port: u16 = exploded 10 | .get(1) 11 | .cloned() 12 | .ok_or_else(|| crate::to_ioerror("no port in address"))? 13 | .parse() 14 | .map_err(crate::to_ioerror)?; 15 | let mut resolver = DNSClient::new(vec![ 16 | UpstreamServer::new("8.8.8.8:53".parse::().unwrap()), 17 | UpstreamServer::new("9.9.9.9:53".parse::().unwrap()), 18 | UpstreamServer::new("74.82.42.42:53".parse::().unwrap()), 19 | UpstreamServer::new("114.114.114.114:53".parse::().unwrap()), 20 | ]); 21 | resolver.set_timeout(std::time::Duration::from_secs(1)); 22 | let result = resolver 23 | .query_a(exploded[0]) 24 | .await 25 | .map_err(crate::to_ioerror)?; 26 | Ok(result 27 | .into_iter() 28 | .map(|ip| SocketAddr::new(ip.into(), port)) 29 | .collect()) 30 | } 31 | 32 | #[cfg(not(target_os = "windows"))] 33 | pub async fn resolve(host_port: &str) -> std::io::Result> { 34 | resolve_inner(host_port.into()).await 35 | } 36 | 37 | pub async fn resolve_inner(host_port: String) -> std::io::Result> { 38 | smol::net::resolve(host_port).await 39 | } 40 | -------------------------------------------------------------------------------- /tundevice/src/lib.rs: -------------------------------------------------------------------------------- 1 | use fs::OpenOptions; 2 | use smol::prelude::*; 3 | use std::os::unix::io::AsRawFd; 4 | use std::os::{raw::c_char, unix::prelude::RawFd}; 5 | use std::{ffi::CStr, process::Command}; 6 | use std::{fs, io, os::raw::c_int}; 7 | // extern "C" { 8 | // fn tun_setup(fd: c_int, name: *mut u8) -> c_int; 9 | // } 10 | 11 | unsafe fn tun_setup(fd: c_int, name: *mut u8) -> c_int { 12 | let ifr = libc::ifreq::default(); 13 | } 14 | 15 | /// A virtual TUN interface. 16 | /// 17 | /// This is the main interface of this crate, representing a TUN device or something similar on non-Unix platforms. 18 | #[derive(Debug)] 19 | pub struct TunDevice { 20 | fd: async_dup::Mutex>, 21 | name: String, 22 | } 23 | 24 | impl TunDevice { 25 | /// Creates a new TUN interface by calling into the operating system. 26 | pub fn new_from_os(name: &str) -> io::Result { 27 | assert!(std::env::consts::OS == "linux"); 28 | // open FD 29 | let fd = OpenOptions::new() 30 | .read(true) 31 | .write(true) 32 | .open("/dev/net/tun")?; 33 | // The buffer is larger than needed, but who cares… it is large enough. 34 | let mut name_buffer = Vec::new(); 35 | name_buffer.extend_from_slice(name.as_bytes()); 36 | name_buffer.extend_from_slice(&[0; 1024]); 37 | let name_ptr: *mut u8 = name_buffer.as_mut_ptr(); 38 | let result = unsafe { tun_setup(fd.as_raw_fd(), name_ptr) }; 39 | if result < 0 { 40 | return Err(io::Error::last_os_error()); 41 | } 42 | let name = unsafe { 43 | CStr::from_ptr(name_ptr as *const c_char) 44 | .to_string_lossy() 45 | .into_owned() 46 | }; 47 | log::info!("TUN DEVICE INITIALIZED {:#?}", fd); 48 | // return the device 49 | Ok(TunDevice { 50 | fd: async_dup::Mutex::new(smol::Async::new(fd)?), 51 | name, 52 | }) 53 | } 54 | 55 | /// Assigns an IP address to the device. 56 | pub fn assign_ip(&self, cidr_str: &str) { 57 | assert!(std::env::consts::OS == "linux"); 58 | // spawn ip tool 59 | Command::new("/usr/bin/env") 60 | .args(&["ip", "link", "set", &self.name, "up"]) 61 | .output() 62 | .expect("cannot bring up interface!"); 63 | Command::new("/usr/bin/env") 64 | .args(&["ip", "addr", "flush", "dev", &self.name]) 65 | .output() 66 | .expect("cannot assign IP to interface!"); 67 | Command::new("/usr/bin/env") 68 | .args(&["ip", "addr", "add", cidr_str, "dev", &self.name]) 69 | .output() 70 | .expect("cannot assign IP to interface!"); 71 | Command::new("/usr/bin/env") 72 | .args(&["ip", "link", "set", "dev", &self.name, "mtu", "1280"]) 73 | .output() 74 | .expect("cannot set MTU to 1280!"); 75 | } 76 | 77 | /// Reads raw packet. 78 | pub async fn read_raw(&self, buf: &mut [u8]) -> Option { 79 | Some((&self.fd).read(buf).await.ok()?) 80 | } 81 | 82 | /// Writes a packet. 83 | pub async fn write_raw(&self, to_write: &[u8]) -> Option<()> { 84 | (&self.fd).write(to_write).await.ok(); 85 | Some(()) 86 | } 87 | 88 | /// Extracts a **duplicate** of the inner file-descriptor. 89 | pub fn dup_rawfd(&self) -> RawFd { 90 | let inner = self.fd.lock(); 91 | unsafe { libc::dup(inner.as_raw_fd()) } 92 | } 93 | } 94 | 95 | // #[cfg(test)] 96 | // mod tests { 97 | // use super::*; 98 | // use std::{thread, time::Duration}; 99 | 100 | // #[test] 101 | // fn test_tun() { 102 | // smol::block_on(async move { 103 | // let mut device = TunDevice::new_from_os("tun-test").unwrap(); 104 | // device.assign_ip("10.89.64.2".parse().unwrap()); 105 | // device.route_traffic("10.89.64.1".parse().unwrap()); 106 | // loop { 107 | // println!("{:?}", device.read_raw().await); 108 | // } 109 | // }); 110 | // } 111 | // // commented out because this whole crate requires rootish perms 112 | // } 113 | -------------------------------------------------------------------------------- /geph4-binder-transport/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod wiretypes; 2 | 3 | use std::{ 4 | net::IpAddr, 5 | sync::{atomic::AtomicUsize, atomic::Ordering, Arc}, 6 | time::Duration, 7 | }; 8 | 9 | use smol_timeout::TimeoutExt; 10 | pub use wiretypes::*; 11 | mod http; 12 | pub use http::*; 13 | use rand::prelude::*; 14 | 15 | /// Trait that all binder clients implement. 16 | #[async_trait::async_trait] 17 | pub trait BinderClient: Sync + Send { 18 | /// Send a request to the network with a certain timeout. 19 | async fn request(&self, request: BinderRequestData) -> BinderResult; 20 | } 21 | 22 | /// Trait that all binder transport servers implement. 23 | pub trait BinderServer: Send + Sync { 24 | /// Receive a request from the network. 25 | fn next_request(&self) -> std::io::Result; 26 | } 27 | 28 | /// A binder request 29 | #[derive(derivative::Derivative)] 30 | #[derivative(Debug)] 31 | pub struct BinderRequest { 32 | pub request_data: BinderRequestData, 33 | probable_ip: Option, 34 | #[derivative(Debug = "ignore")] 35 | response_send: Box) + Send + Sync>, 36 | } 37 | 38 | impl BinderRequest { 39 | /// Respond to the request. 40 | pub fn respond(self, response: BinderResult) { 41 | (self.response_send)(response) 42 | } 43 | 44 | /// The probable IP address of the request. 45 | pub fn probable_ip(&self) -> Option { 46 | self.probable_ip 47 | } 48 | } 49 | 50 | /// A BinderClient implementation wrapping multiple BinderClients. 51 | pub struct MultiBinderClient { 52 | clients: Vec>, 53 | index: AtomicUsize, 54 | } 55 | 56 | impl MultiBinderClient { 57 | /// Create a MBC that doesn't have any BinderClients. 58 | pub fn empty() -> Self { 59 | MultiBinderClient { 60 | clients: Vec::new(), 61 | index: AtomicUsize::new(0), 62 | } 63 | } 64 | 65 | /// Add a new MBC, returning the MBC produced. 66 | pub fn add_client(mut self, new: impl BinderClient + 'static) -> Self { 67 | self.clients.push(Arc::new(new)); 68 | self.clients.shuffle(&mut rand::thread_rng()); 69 | self 70 | } 71 | } 72 | 73 | impl MultiBinderClient { 74 | // does the request on ONE binder 75 | async fn request_one(&self, request: BinderRequestData) -> BinderResult { 76 | let mut timeout = Duration::from_secs(3); 77 | loop { 78 | let curr_idx = self.index.fetch_add(1, Ordering::Relaxed); 79 | let client = &self.clients[curr_idx % self.clients.len()]; 80 | log::trace!("request_one started"); 81 | let res = client.request(request.clone()).timeout(timeout).await; 82 | if let Some(Ok(res)) = res { 83 | log::trace!("request_one succeeded"); 84 | self.index.fetch_sub(1, Ordering::Relaxed); 85 | return Ok(res); 86 | } else if let Some(Err(BinderError::Network(other))) = res { 87 | log::warn!("MultiBinderClient switching backend due to error {}", other); 88 | } else if let Some(e) = res { 89 | return e; 90 | } else { 91 | log::warn!( 92 | "MultiBinderClient switching backend due to timeout {:?}", 93 | timeout 94 | ); 95 | timeout += Duration::from_secs(1); 96 | } 97 | } 98 | } 99 | 100 | // does the request on all binders 101 | async fn request_multi(&self, request: BinderRequestData) -> BinderResult { 102 | let (send_res, recv_res) = smol::channel::unbounded(); 103 | let mut _tasks = vec![]; 104 | for (idx, client) in self.clients.iter().enumerate() { 105 | let client = client.clone(); 106 | let request = request.clone(); 107 | let send_res = send_res.clone(); 108 | _tasks.push(smolscale::spawn(async move { 109 | let _ = send_res.send((idx, client.request(request).await)).await; 110 | })); 111 | } 112 | drop(send_res); 113 | 114 | let mut final_res = None; 115 | for _ in 0.._tasks.len() { 116 | let (idx, res) = recv_res 117 | .recv() 118 | .await 119 | .expect("result channel shouldn't have closed"); 120 | self.index.store(idx, Ordering::Relaxed); 121 | final_res = Some(res); 122 | if final_res.as_ref().unwrap().is_ok() { 123 | break; 124 | } else { 125 | log::warn!("request_multi failed: {:?}", final_res); 126 | } 127 | } 128 | final_res.unwrap() 129 | } 130 | } 131 | 132 | #[async_trait::async_trait] 133 | impl BinderClient for MultiBinderClient { 134 | async fn request(&self, request: BinderRequestData) -> BinderResult { 135 | if request.is_idempotent() { 136 | self.request_multi(request).await 137 | } else { 138 | self.request_one(request).await 139 | } 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /geph4-aioutils/src/lib.rs: -------------------------------------------------------------------------------- 1 | use std::{pin::Pin, time::Duration}; 2 | 3 | use serde::{de::DeserializeOwned, Serialize}; 4 | use smol::{channel::Receiver, prelude::*}; 5 | 6 | mod dns; 7 | pub use dns::*; 8 | 9 | /// Race two different futures, returning the first non-Err, or an Err if both branches error. 10 | pub async fn try_race(future1: F1, future2: F2) -> Result 11 | where 12 | F1: Future>, 13 | F2: Future>, 14 | { 15 | let (send_err, recv_err) = smol::channel::bounded(2); 16 | // success future, always returns a success. 17 | let success = smol::future::race( 18 | async { 19 | match future1.await { 20 | Ok(v) => v, 21 | Err(e) => { 22 | drop(send_err.try_send(e)); 23 | smol::future::pending().await 24 | } 25 | } 26 | }, 27 | async { 28 | match future2.await { 29 | Ok(v) => v, 30 | Err(e) => { 31 | drop(send_err.try_send(e)); 32 | smol::future::pending().await 33 | } 34 | } 35 | }, 36 | ); 37 | // fail future. waits for two failures. 38 | let fail = async { 39 | if recv_err.recv().await.is_ok() { 40 | if let Ok(err) = recv_err.recv().await { 41 | err 42 | } else { 43 | smol::future::pending().await 44 | } 45 | } else { 46 | smol::future::pending().await 47 | } 48 | }; 49 | // race success and future 50 | async { Ok(success.await) } 51 | .or(async { Err(fail.await) }) 52 | .await 53 | } 54 | 55 | /// Reads a bincode-deserializable value with a 16bbe length 56 | pub async fn read_pascalish( 57 | reader: &mut (impl AsyncRead + Unpin), 58 | ) -> anyhow::Result { 59 | // first read 2 bytes as length 60 | let mut len_bts = [0u8; 2]; 61 | reader.read_exact(&mut len_bts).await?; 62 | let len = u16::from_be_bytes(len_bts); 63 | // then read len 64 | let mut true_buf = vec![0u8; len as usize]; 65 | reader.read_exact(&mut true_buf).await?; 66 | // then deserialize 67 | Ok(bincode::deserialize(&true_buf)?) 68 | } 69 | 70 | /// Writes a bincode-serializable value with a 16bbe length 71 | pub async fn write_pascalish( 72 | writer: &mut (impl AsyncWrite + Unpin), 73 | value: &T, 74 | ) -> anyhow::Result<()> { 75 | let serialized = bincode::serialize(value).unwrap(); 76 | assert!(serialized.len() <= 65535); 77 | // write bytes 78 | writer 79 | .write_all(&(serialized.len() as u16).to_be_bytes()) 80 | .await?; 81 | writer.write_all(&serialized).await?; 82 | Ok(()) 83 | } 84 | 85 | const IDLE_TIMEOUT: Duration = Duration::from_secs(3600); 86 | 87 | /// Copies an AsyncRead to an AsyncWrite, with a callback for every write. 88 | #[inline] 89 | pub async fn copy_with_stats( 90 | reader: impl AsyncRead + Unpin, 91 | writer: impl AsyncWrite + Unpin, 92 | mut on_write: impl FnMut(usize), 93 | ) -> std::io::Result<()> { 94 | copy_with_stats_async(reader, writer, move |n| { 95 | on_write(n); 96 | async {} 97 | }) 98 | .await 99 | } 100 | 101 | /// Copies an AsyncRead to an AsyncWrite, with an async callback for every write. 102 | #[inline] 103 | pub async fn copy_with_stats_async>( 104 | mut reader: impl AsyncRead + Unpin, 105 | mut writer: impl AsyncWrite + Unpin, 106 | mut on_write: impl FnMut(usize) -> F, 107 | ) -> std::io::Result<()> { 108 | let mut buffer = [0u8; 32768]; 109 | let mut timeout = smol::Timer::after(IDLE_TIMEOUT); 110 | loop { 111 | // first read into the small buffer 112 | let n = reader 113 | .read(&mut buffer) 114 | .or(async { 115 | (&mut timeout).await; 116 | Err(std::io::Error::new( 117 | std::io::ErrorKind::TimedOut, 118 | "copy_with_stats timeout", 119 | )) 120 | }) 121 | .await?; 122 | if n == 0 { 123 | return Ok(()); 124 | } 125 | timeout.set_after(IDLE_TIMEOUT); 126 | writer 127 | .write_all(&buffer[..n]) 128 | .or(async { 129 | (&mut timeout).await; 130 | Err(std::io::Error::new( 131 | std::io::ErrorKind::TimedOut, 132 | "copy_with_stats timeout", 133 | )) 134 | }) 135 | .await?; 136 | on_write(n).await; 137 | } 138 | } 139 | 140 | // /// Copies an Read to an Write, with a callback for every write. 141 | // pub fn copy_with_stats_sync( 142 | // mut reader: impl std::io::Read, 143 | // mut writer: impl std::io::Write, 144 | // mut on_write: impl FnMut(usize), 145 | // ) -> std::io::Result<()> { 146 | // let mut buffer = [0u8; 32768]; 147 | // loop { 148 | // // first read into the small buffer 149 | // let n = reader.read(&mut buffer)?; 150 | // if n == 0 { 151 | // return Ok(()); 152 | // } 153 | // on_write(n); 154 | // writer.write_all(&buffer[..n])?; 155 | // } 156 | // } 157 | 158 | pub trait AsyncReadWrite: AsyncRead + AsyncWrite {} 159 | 160 | impl AsyncReadWrite for T {} 161 | 162 | pub type ConnLike = async_dup::Arc>>>; 163 | 164 | pub fn connify(conn: T) -> ConnLike { 165 | async_dup::Arc::new(async_dup::Mutex::new(Box::pin(conn))) 166 | } 167 | 168 | pub fn to_ioerror>>(e: T) -> std::io::Error { 169 | std::io::Error::new(std::io::ErrorKind::Other, e) 170 | } 171 | 172 | /// Reads from an async_channel::Receiver, but returns a vector of all available items instead of just one to save on context-switching. 173 | pub async fn recv_chan_many(ch: Receiver) -> Result, smol::channel::RecvError> { 174 | let mut toret = vec![ch.recv().await?]; 175 | // push as many as possible 176 | while let Ok(val) = ch.try_recv() { 177 | toret.push(val); 178 | } 179 | Ok(toret) 180 | } 181 | -------------------------------------------------------------------------------- /mizaru/src/keypair.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | use rsa::{RSAPrivateKey, RSAPublicKey}; 3 | use rsa_fdh::blind; 4 | use serde::{Deserialize, Serialize}; 5 | use sha2::{Digest, Sha256}; 6 | use std::{ 7 | sync::{atomic::AtomicU64, atomic::Ordering}, 8 | time::SystemTime, 9 | }; 10 | 11 | const KEY_COUNT: usize = 65536; 12 | const KEY_BITS: usize = 2048; 13 | 14 | /// Obtains the epoch from a SystemTime 15 | pub fn time_to_epoch(time: SystemTime) -> usize { 16 | let unix = time.duration_since(std::time::UNIX_EPOCH).unwrap(); 17 | (unix.as_secs() / 86400) as usize 18 | } 19 | 20 | /// A Mizaru private key. Consists of a vast number of RSA private keys, one for every day, for the 65536 days after the Unix epoch. This supports serde so that you can save this to disk. 21 | #[derive(Clone, Serialize, Deserialize)] 22 | pub struct SecretKey { 23 | rsa_keys: im::Vector, 24 | // all the intermediate layers of the merkle tree 25 | merkle_tree: im::Vector>, 26 | } 27 | 28 | impl SecretKey { 29 | /// Generates a Mizaru private key. May take **quite** a while! 30 | pub fn generate() -> Self { 31 | let count = AtomicU64::new(0); 32 | // first we generate the massive number of rsa keys 33 | let rsa_keys: im::Vector = (0..KEY_COUNT) 34 | .into_par_iter() 35 | .map(|_| { 36 | let mut rng = rand::rngs::OsRng {}; 37 | let count = count.fetch_add(1, Ordering::SeqCst); 38 | eprintln!("generated {}/{} keys", count, KEY_COUNT); 39 | RSAPrivateKey::new(&mut rng, KEY_BITS).expect("can't generate RSA key") 40 | }) 41 | .collect::>() 42 | .into(); 43 | // then, we populate the merkle tree level by level 44 | let merkle_tree_first: im::Vector<[u8; 32]> = rsa_keys 45 | .iter() 46 | .map(|v| Sha256::digest(&bincode::serialize(&v.to_public_key()).unwrap()).into()) 47 | .collect(); 48 | let mut merkle_tree = im::vector![merkle_tree_first]; 49 | while merkle_tree.last().unwrap().len() > 1 { 50 | // "decimate" the merkle tree level to make the next 51 | let last = merkle_tree.last().unwrap(); 52 | let new = (0..last.len() / 2) 53 | .map(|i| { 54 | let mut v = last[i * 2].to_vec(); 55 | v.extend_from_slice(&last[i * 2 + 1]); 56 | Sha256::digest(&v).into() 57 | }) 58 | .collect(); 59 | merkle_tree.push_back(new) 60 | } 61 | // return the value 62 | SecretKey { 63 | rsa_keys, 64 | merkle_tree, 65 | } 66 | } 67 | 68 | fn merkle_branch(&self, idx: usize) -> Vec<[u8; 32]> { 69 | fn other(i: usize) -> usize { 70 | i / 2 * 2 + ((i + 1) % 2) 71 | } 72 | let mut idx = idx; 73 | // HACK mutation within map 74 | self.merkle_tree 75 | .iter() 76 | .take(self.merkle_tree.len() - 1) 77 | .map(|level| { 78 | let toret = level[other(idx)]; 79 | idx >>= 1; 80 | toret 81 | }) 82 | .collect() 83 | } 84 | 85 | /// Blind-signs a message with a given epoch key. The returned struct contains all information required to verify a specific key within the merkle root and an RSA-FDH blind signature using that specific key. 86 | pub fn blind_sign(&self, epoch: usize, blinded_digest: &[u8]) -> BlindedSignature { 87 | assert!(epoch <= self.rsa_keys.len()); 88 | let mut rng = rand::rngs::OsRng {}; 89 | let key_to_use = &self.rsa_keys[epoch]; 90 | let bare_sig = 91 | blind::sign(&mut rng, key_to_use, blinded_digest).expect("blind signature failed"); 92 | BlindedSignature { 93 | epoch, 94 | used_key: key_to_use.to_public_key(), 95 | merkle_branch: self.merkle_branch(epoch), 96 | blinded_sig: bare_sig, 97 | } 98 | } 99 | 100 | /// Returns the "public key", i.e. the merkle tree root. 101 | pub fn to_public_key(&self) -> PublicKey { 102 | PublicKey(self.merkle_tree.last().unwrap()[0]) 103 | } 104 | 105 | /// Gets an epoch key. 106 | pub fn get_subkey(&self, epoch: usize) -> &RSAPrivateKey { 107 | &self.rsa_keys[epoch] 108 | } 109 | } 110 | 111 | /// A blind signature. 112 | #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] 113 | pub struct BlindedSignature { 114 | pub epoch: usize, 115 | pub used_key: RSAPublicKey, 116 | pub merkle_branch: Vec<[u8; 32]>, 117 | pub blinded_sig: Vec, 118 | } 119 | 120 | impl BlindedSignature { 121 | /// Unblinds the signature, given the unblinding factor. 122 | pub fn unblind(self, unblinder: &[u8]) -> UnblindedSignature { 123 | let unblinded_sig = blind::unblind(&self.used_key, &self.blinded_sig, unblinder); 124 | UnblindedSignature { 125 | epoch: self.epoch, 126 | used_key: self.used_key, 127 | merkle_branch: self.merkle_branch, 128 | unblinded_sig, 129 | } 130 | } 131 | } 132 | 133 | #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] 134 | pub struct UnblindedSignature { 135 | pub epoch: usize, 136 | pub used_key: RSAPublicKey, 137 | pub merkle_branch: Vec<[u8; 32]>, 138 | pub unblinded_sig: Vec, 139 | } 140 | 141 | /// A Mizaru public key. This is actually just the merkle-tree-root of a huge bunch of bincoded RSA public keys! 142 | #[derive(Clone, Serialize, Deserialize, Debug)] 143 | pub struct PublicKey(pub [u8; 32]); 144 | 145 | impl PublicKey { 146 | /// Verifies an unblinded signature. 147 | pub fn blind_verify(&self, unblinded_digest: &[u8], sig: &UnblindedSignature) -> bool { 148 | self.verify_member(sig.epoch, &sig.used_key, &sig.merkle_branch) 149 | && blind::verify(&sig.used_key, unblinded_digest, &sig.unblinded_sig).is_ok() 150 | } 151 | 152 | /// Verifies that a certain subkey is the correct one for the epoch 153 | pub fn verify_member( 154 | &self, 155 | epoch: usize, 156 | subkey: &RSAPublicKey, 157 | merkle_branch: &[[u8; 32]], 158 | ) -> bool { 159 | merkle_branch.len(); 160 | let mut accumulator: [u8; 32] = 161 | Sha256::digest(&bincode::serialize(&subkey).unwrap()).into(); 162 | for (i, hash) in merkle_branch.iter().enumerate() { 163 | if epoch >> i & 1 == 0 { 164 | // the hash is on the "odd" position 165 | accumulator = hash_together(&accumulator, hash) 166 | } else { 167 | accumulator = hash_together(hash, &accumulator) 168 | } 169 | } 170 | accumulator == self.0 171 | } 172 | } 173 | 174 | impl From<[u8; 32]> for PublicKey { 175 | fn from(val: [u8; 32]) -> Self { 176 | Self(val) 177 | } 178 | } 179 | 180 | fn hash_together(x: &[u8], y: &[u8]) -> [u8; 32] { 181 | let mut buf = Vec::with_capacity(x.len() + y.len()); 182 | buf.extend_from_slice(x); 183 | buf.extend_from_slice(y); 184 | Sha256::digest(&buf).into() 185 | } 186 | 187 | // #[cfg(test)] 188 | // mod tests { 189 | // use super::*; 190 | // use std::time::Instant; 191 | // #[test] 192 | // fn generate_key() { 193 | // let before = Instant::now(); 194 | // let privkey = SecretKey::generate(); 195 | // eprintln!("elapsed {} secs", before.elapsed().as_secs_f64()); 196 | // eprintln!( 197 | // "signature is {} bytes", 198 | // privkey.blind_sign(1, b"hello world").len() 199 | // ) 200 | // } 201 | // } 202 | -------------------------------------------------------------------------------- /geph4-binder-transport/src/wiretypes.rs: -------------------------------------------------------------------------------- 1 | use std::net::SocketAddr; 2 | 3 | use chacha20poly1305::{ 4 | aead::{Aead, NewAead}, 5 | Nonce, 6 | }; 7 | use chacha20poly1305::{ChaCha20Poly1305, Key}; 8 | use serde::{Deserialize, Serialize}; 9 | /// Either a response or a binder error 10 | pub type BinderResult = Result; 11 | 12 | /// Data for a binder request 13 | #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] 14 | pub enum BinderRequestData { 15 | /// Get mizaru epoch key 16 | GetEpochKey { level: String, epoch: u16 }, 17 | /// Authenticate a user, obtaining the user info and blinded signature. 18 | Authenticate { 19 | username: String, 20 | password: String, 21 | level: String, 22 | epoch: u16, 23 | blinded_digest: Vec, 24 | }, 25 | /// Validates a blind signature token, applying rate-limiting as appropriate. 26 | Validate { 27 | level: String, 28 | unblinded_digest: Vec, 29 | unblinded_signature: mizaru::UnblindedSignature, 30 | }, 31 | /// Obtain a CAPTCHA for registration 32 | GetCaptcha, 33 | /// Register a user 34 | RegisterUser { 35 | username: String, 36 | password: String, 37 | captcha_id: String, 38 | captcha_soln: String, 39 | }, 40 | /// Changes password 41 | ChangePassword { 42 | username: String, 43 | old_password: String, 44 | new_password: String, 45 | }, 46 | /// Delete a user 47 | DeleteUser { username: String, password: String }, 48 | 49 | /// Get all exits 50 | GetExits, 51 | 52 | /// Add a bridge route 53 | AddBridgeRoute { 54 | /// Sosistab public key 55 | sosistab_pubkey: x25519_dalek::PublicKey, 56 | /// Address of the intermediate bridge 57 | bridge_address: SocketAddr, 58 | /// Bridge group 59 | bridge_group: String, 60 | /// Exit hostname 61 | exit_hostname: String, 62 | /// Time 63 | route_unixtime: u64, 64 | /// Authorization from the exit. Signature over a tuple of the rest of the fields except the exit hostname. 65 | exit_signature: ed25519_dalek::Signature, 66 | }, 67 | 68 | /// Get bridges 69 | GetBridges { 70 | level: String, 71 | unblinded_digest: Vec, 72 | unblinded_signature: mizaru::UnblindedSignature, 73 | exit_hostname: String, 74 | }, 75 | 76 | /// Get all free exits 77 | GetFreeExits, 78 | } 79 | 80 | impl BinderRequestData { 81 | /// Encrypts binder request data to a particular recipient, returning the request and also the reply key. 82 | pub fn encrypt( 83 | &self, 84 | my_esk: x25519_dalek::EphemeralSecret, 85 | recipient: x25519_dalek::PublicKey, 86 | ) -> (EncryptedBinderRequestData, [u8; 32]) { 87 | let plain = bincode::serialize(self).unwrap(); 88 | let sender_epk = x25519_dalek::PublicKey::from(&my_esk); 89 | let shared_sec = my_esk.diffie_hellman(&recipient); 90 | let up_key = blake3::keyed_hash(blake3::hash(b"request").as_bytes(), shared_sec.as_bytes()); 91 | let up_key = Key::from_slice(up_key.as_bytes()); 92 | let ciphertext = ChaCha20Poly1305::new(up_key) 93 | .encrypt(Nonce::from_slice(&[0u8; 12]), plain.as_slice()) 94 | .unwrap(); 95 | ( 96 | EncryptedBinderRequestData { 97 | sender_epk, 98 | ciphertext, 99 | }, 100 | *blake3::keyed_hash(blake3::hash(b"response").as_bytes(), shared_sec.as_bytes()) 101 | .as_bytes(), 102 | ) 103 | } 104 | 105 | /// Returns a boolean determining whether or not this request is idempotent. 106 | #[allow(clippy::match_like_matches_macro)] 107 | pub fn is_idempotent(&self) -> bool { 108 | match self { 109 | BinderRequestData::GetEpochKey { .. } => true, 110 | BinderRequestData::GetCaptcha { .. } => true, 111 | BinderRequestData::GetExits { .. } => true, 112 | BinderRequestData::GetFreeExits { .. } => true, 113 | BinderRequestData::GetBridges { .. } => true, 114 | // BinderRequestData::Authenticate { .. } => true, 115 | // BinderRequestData::Validate { .. } => true, 116 | _ => false, 117 | } 118 | } 119 | } 120 | 121 | /// Encrypted binder request data. Uses a crypto-box-like construction. 122 | #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] 123 | pub struct EncryptedBinderRequestData { 124 | sender_epk: x25519_dalek::PublicKey, 125 | ciphertext: Vec, 126 | } 127 | 128 | impl EncryptedBinderRequestData { 129 | /// Decrypts the binder request data. Returns the binder request and also the reply key. 130 | pub fn decrypt( 131 | &self, 132 | my_lsk: &x25519_dalek::StaticSecret, 133 | ) -> Option<(BinderRequestData, [u8; 32])> { 134 | let shared_sec = my_lsk.diffie_hellman(&self.sender_epk); 135 | let up_key = blake3::keyed_hash(blake3::hash(b"request").as_bytes(), shared_sec.as_bytes()); 136 | let plaintext = ChaCha20Poly1305::new(Key::from_slice(up_key.as_bytes())) 137 | .decrypt(Nonce::from_slice(&[0u8; 12]), self.ciphertext.as_slice()) 138 | .ok()?; 139 | Some(( 140 | bincode::deserialize(&plaintext).ok()?, 141 | *blake3::keyed_hash(blake3::hash(b"response").as_bytes(), shared_sec.as_bytes()) 142 | .as_bytes(), 143 | )) 144 | } 145 | } 146 | 147 | /// Binder response 148 | #[allow(clippy::large_enum_variant)] 149 | #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] 150 | pub enum BinderResponse { 151 | /// Okay to something that does not need response data. 152 | Okay, 153 | /// Carrying an epoch key 154 | GetEpochKeyResp(rsa::RSAPublicKey), 155 | /// Response to authentication 156 | AuthenticateResp { 157 | user_info: UserInfo, 158 | blind_signature: mizaru::BlindedSignature, 159 | }, 160 | /// Response to ticket validation 161 | ValidateResp(bool), 162 | /// Response to CAPTCHA request 163 | GetCaptchaResp { 164 | captcha_id: String, 165 | png_data: Vec, 166 | }, 167 | /// Response to request for all exits 168 | GetExitsResp(Vec), 169 | /// Response to request for bridges 170 | GetBridgesResp(Vec), 171 | } 172 | 173 | /// Exit descriptor 174 | #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] 175 | pub struct ExitDescriptor { 176 | pub hostname: String, 177 | pub signing_key: ed25519_dalek::PublicKey, 178 | pub country_code: String, 179 | pub city_code: String, 180 | pub sosistab_key: x25519_dalek::PublicKey, 181 | } 182 | 183 | /// Bridge descriptor 184 | #[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Hash)] 185 | pub struct BridgeDescriptor { 186 | pub endpoint: SocketAddr, 187 | pub sosistab_key: x25519_dalek::PublicKey, 188 | } 189 | 190 | /// Information for a particular user 191 | #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] 192 | pub struct UserInfo { 193 | pub userid: i32, 194 | pub username: String, 195 | pub pwdhash: String, 196 | pub subscription: Option, 197 | } 198 | 199 | /// Information about a user's subscription 200 | #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] 201 | pub struct SubscriptionInfo { 202 | pub level: String, 203 | pub expires_unix: i64, 204 | } 205 | 206 | /// Encrypts it to the reply key 207 | pub fn encrypt_binder_response( 208 | this: &BinderResult, 209 | reply_key: [u8; 32], 210 | ) -> EncryptedBinderResponse { 211 | let plain = bincode::serialize(this).unwrap(); 212 | EncryptedBinderResponse( 213 | ChaCha20Poly1305::new(Key::from_slice(&reply_key)) 214 | .encrypt(Nonce::from_slice(&[0u8; 12]), plain.as_slice()) 215 | .unwrap(), 216 | ) 217 | } 218 | 219 | /// Encrypted binder response (encrypted Result) 220 | #[derive(Debug, Clone, Serialize, Deserialize)] 221 | pub struct EncryptedBinderResponse(Vec); 222 | 223 | impl EncryptedBinderResponse { 224 | /// Decrypts it 225 | pub fn decrypt(&self, reply_key: [u8; 32]) -> Option> { 226 | bincode::deserialize( 227 | &ChaCha20Poly1305::new(Key::from_slice(&reply_key)) 228 | .decrypt(Nonce::from_slice(&[0u8; 12]), self.0.as_slice()) 229 | .ok()?, 230 | ) 231 | .ok() 232 | } 233 | } 234 | 235 | /// Error type enumerating all that could go wrong needed: e.g. user does not exist, wrong password, etc. 236 | #[derive(Clone, Debug, Serialize, Deserialize, thiserror::Error)] 237 | pub enum BinderError { 238 | // user-related errors 239 | #[error("no user found")] 240 | NoUserFound, 241 | #[error("user already exists")] 242 | UserAlreadyExists, 243 | #[error("wrong password")] 244 | WrongPassword, 245 | #[error("incorrect captcha")] 246 | WrongCaptcha, 247 | #[error("incorrect account level")] 248 | WrongLevel, 249 | // database error 250 | #[error("database failed: `{0}`")] 251 | DatabaseFailed(String), 252 | // other failure 253 | #[error("other failure `{0}`")] 254 | Other(String), 255 | // I/O failure 256 | #[error("network failure `{0}`")] 257 | Network(String), 258 | } 259 | 260 | impl From for BinderError { 261 | fn from(value: std::io::Error) -> Self { 262 | BinderError::Network(value.to_string()) 263 | } 264 | } 265 | -------------------------------------------------------------------------------- /geph4-binder-transport/src/http.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | encrypt_binder_response, BinderClient, BinderError, BinderRequest, BinderRequestData, 3 | BinderResponse, BinderResult, BinderServer, EncryptedBinderRequestData, 4 | EncryptedBinderResponse, 5 | }; 6 | use async_tls::TlsConnector; 7 | use http_types::{Method, Request, StatusCode, Url}; 8 | use smol::channel::{Receiver, Sender}; 9 | use smol_timeout::TimeoutExt; 10 | use std::{ 11 | net::{IpAddr, SocketAddr}, 12 | sync::Arc, 13 | time::{Duration, Instant}, 14 | }; 15 | 16 | /// An HTTP-based BinderClient implementation, driven by ureq. 17 | #[derive(Clone)] 18 | pub struct HttpClient { 19 | binder_lpk: x25519_dalek::PublicKey, 20 | endpoint: String, 21 | headers: Vec<(String, String)>, 22 | tls_config: rustls::ClientConfig, 23 | } 24 | 25 | impl HttpClient { 26 | /// Create a new HTTP client from the given endpoint and headers. 27 | pub fn new( 28 | binder_lpk: x25519_dalek::PublicKey, 29 | endpoint: T, 30 | headers: &[(T, T)], 31 | tls_config: Option, 32 | ) -> Self { 33 | let mut default_tls_config = rustls::ClientConfig::default(); 34 | default_tls_config 35 | .root_store 36 | .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); 37 | HttpClient { 38 | binder_lpk, 39 | endpoint: endpoint.to_string(), 40 | headers: headers 41 | .iter() 42 | .map(|(k, v)| (k.to_string(), v.to_string())) 43 | .collect(), 44 | tls_config: tls_config.unwrap_or(default_tls_config), 45 | } 46 | } 47 | } 48 | 49 | #[async_trait::async_trait] 50 | impl BinderClient for HttpClient { 51 | async fn request(&self, brequest: BinderRequestData) -> BinderResult { 52 | let everything = async move { 53 | // open connection 54 | let conn = endpoint_to_conn(self.tls_config.clone(), &self.endpoint) 55 | .await 56 | .map_err(|v| BinderError::Other(v.to_string()))?; 57 | // send request 58 | let mut req = Request::new(Method::Post, Url::parse(&self.endpoint).unwrap()); 59 | for (header, value) in self.headers.iter() { 60 | req.insert_header(header.as_str(), value.as_str()); 61 | } 62 | // set body 63 | let my_esk = x25519_dalek::EphemeralSecret::new(rand::rngs::OsRng {}); 64 | let (encrypted, reply_key) = brequest.encrypt(my_esk, self.binder_lpk); 65 | req.set_body(bincode::serialize(&encrypted).unwrap()); 66 | // do the request 67 | let mut response = async_h1::connect(conn, req) 68 | .await 69 | .map_err(|v| BinderError::Other(v.to_string()))?; 70 | 71 | // read response 72 | let response: EncryptedBinderResponse = bincode::deserialize( 73 | &response 74 | .body_bytes() 75 | .await 76 | .map_err(|v| BinderError::Other(v.to_string()))?, 77 | ) 78 | .map_err(|v| BinderError::Other(v.to_string()))?; 79 | response 80 | .decrypt(reply_key) 81 | .ok_or_else(|| BinderError::Other("decryption failure".into()))? 82 | }; 83 | everything 84 | .timeout(Duration::from_secs(30)) 85 | .await 86 | .ok_or_else(|| BinderError::Other("HTTP timeout in 30 secs".into()))? 87 | } 88 | } 89 | /// An HTTP-based BinderServer implementation. It uses `async-h1` underneath, 90 | /// driven by an internal executor so that it exposes a synchronous interface. 91 | pub struct HttpServer { 92 | breq_recv: Receiver, 93 | executor: Arc>, 94 | } 95 | 96 | impl HttpServer { 97 | /// Creates a new HttpServer listening on the given SocketAddr with the given secret key. 98 | pub fn new( 99 | listen_on: SocketAddr, 100 | my_lsk: x25519_dalek::StaticSecret, 101 | on_time: impl Fn(Duration) + Send + Sync + 'static, 102 | ) -> Self { 103 | let executor = Arc::new(smol::Executor::new()); 104 | let (breq_send, breq_recv) = smol::channel::unbounded(); 105 | executor 106 | .spawn(httpserver_main_loop( 107 | executor.clone(), 108 | listen_on, 109 | my_lsk, 110 | breq_send, 111 | on_time, 112 | )) 113 | .detach(); 114 | Self { 115 | breq_recv, 116 | executor, 117 | } 118 | } 119 | } 120 | 121 | /// Returns a connection, given an endpoint. Implements a happy-eyeballs-style thing. 122 | async fn endpoint_to_conn( 123 | tls_config: rustls::ClientConfig, 124 | endpoint: &str, 125 | ) -> std::io::Result { 126 | let url = Url::parse(endpoint).map_err(geph4_aioutils::to_ioerror)?; 127 | let host_string = url 128 | .host_str() 129 | .map(|v| v.to_owned()) 130 | .ok_or_else(|| geph4_aioutils::to_ioerror("no host"))?; 131 | let port = url.port_or_known_default().unwrap_or(0); 132 | let composed = format!("{}:{}", host_string, port); 133 | let (send, recv) = smol::channel::unbounded(); 134 | let mut _tasks: Vec>> = vec![]; 135 | // race 136 | for (index, addr) in geph4_aioutils::resolve(&composed) 137 | .await? 138 | .into_iter() 139 | .enumerate() 140 | { 141 | let send = send.clone(); 142 | let delay = Duration::from_millis(250) * index as u32; 143 | _tasks.push(smolscale::spawn(async move { 144 | smol::Timer::after(delay).await; 145 | let tcp_conn = smol::net::TcpStream::connect(addr).await?; 146 | let _ = send.send(tcp_conn).await; 147 | Ok(()) 148 | })); 149 | } 150 | if let Ok(tcp_conn) = recv.recv().await { 151 | match url.scheme() { 152 | "https" => { 153 | let connector = TlsConnector::from(tls_config); 154 | let tls_conn = connector.connect(host_string, tcp_conn).await?; 155 | Ok(geph4_aioutils::connify(tls_conn)) 156 | } 157 | _ => Ok(geph4_aioutils::connify(tcp_conn)), 158 | } 159 | } else if !_tasks.is_empty() { 160 | for task in _tasks { 161 | task.await?; 162 | } 163 | panic!("should not get here") 164 | } else { 165 | Err(std::io::Error::new( 166 | std::io::ErrorKind::Other, 167 | "DNS did not return any results", 168 | )) 169 | } 170 | } 171 | 172 | impl BinderServer for HttpServer { 173 | /// Next request 174 | fn next_request(&self) -> std::io::Result { 175 | smol::future::block_on( 176 | self.executor 177 | .run(async move { self.breq_recv.recv().await }), 178 | ) 179 | .map_err(|e| std::io::Error::new(std::io::ErrorKind::Interrupted, e)) 180 | } 181 | } 182 | 183 | async fn httpserver_main_loop( 184 | executor: Arc>, 185 | listen_on: SocketAddr, 186 | my_lsk: x25519_dalek::StaticSecret, 187 | breq_send: Sender, 188 | on_time: impl Fn(Duration) + Send + Sync + 'static, 189 | ) -> Option<()> { 190 | let on_time = Arc::new(on_time); 191 | let listener = smol::net::TcpListener::bind(listen_on).await.unwrap(); 192 | log::debug!("listening on {}", listen_on); 193 | loop { 194 | if let Ok((client, _)) = listener.accept().await { 195 | let my_lsk = my_lsk.clone(); 196 | let breq_send = breq_send.clone(); 197 | let peer_addr = client 198 | .peer_addr() 199 | .unwrap_or_else(|_| "0.0.0.0:0".parse().unwrap()); 200 | log::trace!("new connection from {}", peer_addr); 201 | let on_time = on_time.clone(); 202 | // start a new task 203 | executor 204 | .spawn(async move { 205 | let my_lsk = my_lsk.clone(); 206 | drop( 207 | async_h1::accept(client, |mut req| { 208 | let probable_ip = req 209 | .header("x-forwarded-for") 210 | .and_then(|hv| hv.get(0)) 211 | .and_then(|f| f.as_str().split(',').next()?.parse::().ok()); 212 | let start = Instant::now(); 213 | let my_lsk = my_lsk.clone(); 214 | let breq_send = breq_send.clone(); 215 | let on_time = on_time.clone(); 216 | async move { 217 | // first read the request 218 | let req: EncryptedBinderRequestData = 219 | bincode::deserialize(&req.body_bytes().await?)?; 220 | let (request_data, reply_key) = 221 | req.decrypt(&my_lsk).ok_or_else(|| { 222 | http_types::Error::from_str( 223 | http_types::StatusCode::BadRequest, 224 | "decryption failure", 225 | ) 226 | })?; 227 | log::trace!("got request from {}: {:?}", peer_addr, request_data); 228 | // form response 229 | let (oneshot_send, oneshot_recv) = smol::channel::bounded(1); 230 | let breq = BinderRequest { 231 | request_data, 232 | response_send: Box::new(move |val| { 233 | drop(oneshot_send.try_send(val)) 234 | }), 235 | probable_ip, 236 | }; 237 | breq_send.send(breq).await?; 238 | // wait for response 239 | let response: BinderResult = 240 | oneshot_recv.recv().await?; 241 | log::trace!("response to {}: {:?}", peer_addr, response); 242 | let response = encrypt_binder_response(&response, reply_key); 243 | // send response 244 | let mut resp = http_types::Response::new(StatusCode::Ok); 245 | resp.set_body(bincode::serialize(&response).unwrap()); 246 | on_time(start.elapsed()); 247 | Ok(resp) 248 | } 249 | }) 250 | .await, 251 | ); 252 | }) 253 | .detach(); 254 | } else { 255 | smol::Timer::after(Duration::from_secs(1)).await; 256 | } 257 | } 258 | } 259 | --------------------------------------------------------------------------------