├── .gitignore ├── Cargo.toml ├── src ├── parser.rs ├── main.rs ├── error.rs ├── mapper.rs ├── tracker.rs ├── magnet.rs └── peer.rs └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | Cargo.lock 6 | 7 | .vscode/ 8 | *.torrent 9 | *.Identifier 10 | *.bin 11 | downloads/ 12 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "torrent" 3 | version = "0.0.0" 4 | authors = ["you"] 5 | edition = "2024" 6 | 7 | 8 | 9 | [dependencies] 10 | serde = { version = "1.0.210", features = ["derive"] } 11 | env_logger = "0.11.5" 12 | serde_json = "1.0.128" 13 | bencode-encoder = "0.1.2" 14 | linked_hash_set = "0.1.4" 15 | sha1 = "0.10.6" 16 | base64 = "0.22.1" 17 | rand = "0.8.5" 18 | url = "2.5.2" 19 | reqwest = "0.12.8" 20 | serde_bencode = "0.2.4" 21 | log = "0.4.22" 22 | tokio = { version = "1.40.0", features = ["full"] } 23 | serde_bytes = "0.11.15" 24 | hex = "0.4.3" 25 | percent-encoding = "2.3.1" 26 | data-encoding = "2.3.1" 27 | futures = "0.3.31" 28 | 29 | -------------------------------------------------------------------------------- /src/parser.rs: -------------------------------------------------------------------------------- 1 | use crate::error::*; 2 | use crate::mapper::TorrentMetaData; 3 | use std::fs::File; 4 | use std::io::Write; 5 | use std::path::Path; 6 | 7 | /// Decodes the torrent file to a json 8 | pub fn decode_json(bpath: &str, opath: &str) -> Result<()> { 9 | let input_path = Path::new(bpath); 10 | let output_path = Path::new(opath); 11 | 12 | if let Some(parent) = output_path.parent() { 13 | if !parent.exists() { 14 | std::fs::create_dir_all(parent)?; 15 | } 16 | } 17 | 18 | let torrent_bytes = std::fs::read(input_path)?; 19 | let torrent: TorrentMetaData = serde_bencode::from_bytes(&torrent_bytes)?; 20 | let json_content = serde_json::to_string_pretty(&torrent)?; 21 | let mut file = File::create(output_path)?; 22 | 23 | file.write_all(json_content.as_bytes())?; 24 | 25 | println!( 26 | "Successfully decoded torrent file to JSON at: {}", 27 | output_path.display() 28 | ); 29 | Ok(()) 30 | } 31 | 32 | #[cfg(test)] 33 | mod tests { 34 | use super::*; 35 | 36 | #[test] 37 | fn test_decode_json() { 38 | decode_json( 39 | "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].torrent", 40 | "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].json", 41 | ) 42 | .unwrap(); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | mod error; 2 | mod magnet; 3 | mod mapper; 4 | mod parser; 5 | mod peer; 6 | mod tracker; 7 | 8 | use std::io; 9 | 10 | use error::TorrentError; 11 | use mapper::TorrentMetaData; 12 | use peer::PieceDownloader; 13 | use tracker::{generate_peer_id, request_peers}; 14 | 15 | #[tokio::main] 16 | async fn main() -> Result<(), TorrentError> { 17 | let mut input_path = String::new(); 18 | print!("Enter your torrent file path: "); 19 | io::stdin() 20 | .read_line(&mut input_path) 21 | .expect("failed to read from stdin"); 22 | 23 | print!("Now enter your output path: "); 24 | 25 | let mut output_path = String::new(); 26 | io::stdin() 27 | .read_line(&mut output_path) 28 | .expect("failed to read from stdin"); 29 | 30 | let torrent_mta = TorrentMetaData::from_trnt_file(&input_path)?; 31 | let info_hash = torrent_mta.calculate_info_hash()?; 32 | 33 | let peers = request_peers(&torrent_mta) 34 | .await 35 | .expect("request peers failed!"); 36 | if peers.is_empty() { 37 | return Err(TorrentError::InsufficientSeeds); 38 | } 39 | let peer_id = generate_peer_id(); 40 | let mut downloader = PieceDownloader::new(peers, info_hash, peer_id); 41 | downloader 42 | .download_torrent(&torrent_mta, &output_path) 43 | .await?; 44 | Ok(()) 45 | } 46 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Approaching a torrent downloader from a microservice architecture perspective is an interesting way to modularize the system and potentially make it more scalable and maintainable. Here's a breakdown of potential microservices for a torrent downloading system: 2 | 3 | 4 | 5 | ```mermaid 6 | graph TD 7 | A[API Gateway] --> B[Torrent Parser Service] 8 | A --> C[Peer Discovery Service] 9 | A --> D[Download Manager Service] 10 | A --> E[File Storage Service] 11 | A --> F[User Management Service] 12 | C --> G[Tracker Communication Service] 13 | C --> H[DHT Service] 14 | D --> I[Peer Connection Service] 15 | D --> J[Piece Selection Service] 16 | E --> K[File Assembly Service] 17 | 18 | ``` 19 | 20 | Let's break down each of these services and their responsibilities: 21 | 22 | 1. API Gateway: 23 | - [ ] Handles external requests 24 | - [ ] Routes requests to appropriate microservices 25 | - [ ] Manages authentication and rate limiting 26 | 27 | 2. Torrent Parser Service: 28 | - [x] Parses .torrent files and magnet links 29 | - [x] Extracts metadata (file info, trackers, piece hashes) 30 | 31 | 3. Peer Discovery Service: 32 | - [x] Coordinates peer discovery methods 33 | - [ ] Interfaces with Tracker Communication and DHT services 34 | 35 | 4. Tracker Communication Service: 36 | - [x] Communicates with trackers to get peer lists 37 | - [x] Handles tracker protocol specifics 38 | 39 | 5. DHT (Distributed Hash Table) Service: 40 | - [ ] Implements DHT protocol for trackerless torrents 41 | - [ ] Manages DHT node connections and queries 42 | 43 | 6. Download Manager Service: 44 | - [x] Orchestrates the overall download process 45 | - [x] Manages download queues and priorities 46 | 47 | 7. Peer Connection Service: 48 | - [x] Establishes and manages connections to peers 49 | - [x] Implements BitTorrent protocol messaging 50 | 51 | 8. Piece Selection Service: 52 | - [x] Implements piece selection algorithms (e.g., rarest first) 53 | - [x] Tracks piece availability across peers 54 | 55 | 9. File Storage Service: 56 | - [x] Handles writing downloaded pieces to disk 57 | - [x] Manages file allocation and disk space 58 | 59 | 10. File Assembly Service: 60 | - [x] Assembles downloaded pieces into complete files 61 | - [x] Verifies file integrity 62 | 63 | 11. User Management Service: 64 | - [ ] Handles user accounts, if applicable 65 | - [ ] Manages user preferences and download history 66 | 67 | Each of these services could be implemented as a separate microservice, potentially in different languages or using different technologies as appropriate. They would communicate via APIs, possibly using REST or gRPC. 68 | 69 | Key considerations for this architecture: 70 | 71 | 1. Service Discovery: Implement a way for services to find and communicate with each other. 72 | 73 | 2. Data Consistency: Ensure data consistency across services, especially for shared state like download progress. 74 | 75 | 3. Fault Tolerance: Design each service to be resilient and the overall system to handle partial failures. 76 | 77 | 4. Scalability: Design services to be independently scalable based on load. 78 | 79 | 5. Monitoring and Logging: Implement comprehensive logging and monitoring across all services. 80 | 81 | 6. Security: Ensure secure communication between services and proper access controls. 82 | 83 | 7. Testing: Implement thorough unit and integration testing for each service and the system as a whole. 84 | 85 | 86 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | #[derive(Debug)] 4 | #[allow(dead_code)] 5 | pub enum TorrentError { 6 | // Network errors 7 | ConnectionTimedOut(String), 8 | ConnectionFailed(String), 9 | TrackerError(String), 10 | PeerError(String), 11 | 12 | // protocol related errors 13 | InvalidHandshake(String), 14 | InvalidMessage(String), 15 | ProtocolViolation(String), 16 | 17 | // file or io related errors 18 | FileNotFound(String), 19 | PermissionDenied(String), 20 | DiskFull, 21 | IoError(std::io::Error), 22 | 23 | //parsing related errors 24 | InvalidTorrentFile(String), 25 | InvalidMagnetLink(String), 26 | BencodeError(serde_bencode::Error), 27 | 28 | //downloading errors 29 | PieceVerificationFailed(u32), 30 | NoAvailablePeers(u32), 31 | DownloadTimedout, 32 | InsufficientSeeds, 33 | 34 | //config errs 35 | InvalidConfigs(String), 36 | } 37 | 38 | impl fmt::Display for TorrentError { 39 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 40 | match self { 41 | TorrentError::ConnectionTimedOut(msg) => write!(f, "Connection Timed Out: {}", msg), 42 | TorrentError::ConnectionFailed(msg) => write!(f, "Connection failed: {}", msg), 43 | TorrentError::TrackerError(msg) => write!(f, "Tracker error: {}", msg), 44 | TorrentError::PeerError(msg) => write!(f, "Peer error: {}", msg), 45 | TorrentError::InvalidHandshake(msg) => write!(f, "Invalid Handshake: {}", msg), 46 | TorrentError::InvalidMessage(msg) => write!(f, "Invalid message: {}", msg), 47 | TorrentError::ProtocolViolation(msg) => write!(f, "Protocol violation: {}", msg), 48 | TorrentError::FileNotFound(path) => write!(f, "File not found: {}", path), 49 | TorrentError::PermissionDenied(path) => write!(f, "Permission denied: {}", path), 50 | TorrentError::DiskFull => write!(f, "Disk is full."), 51 | TorrentError::IoError(e) => write!(f, "IO error: {}", e), 52 | TorrentError::InvalidTorrentFile(msg) => write!(f, "Invalid torrent file: {}", msg), 53 | TorrentError::InvalidMagnetLink(msg) => write!(f, "Invalid magnet link: {}", msg), 54 | TorrentError::BencodeError(e) => write!(f, "Bencode error: {}", e), 55 | TorrentError::PieceVerificationFailed(piece) => { 56 | write!(f, "Piece {} verification failed", piece) 57 | } 58 | TorrentError::NoAvailablePeers(piece) => { 59 | write!(f, "No available peers for piece {}", piece) 60 | } 61 | TorrentError::DownloadTimedout => write!(f, "Download timed out"), 62 | TorrentError::InsufficientSeeds => write!(f, "Insufficient seeds"), 63 | TorrentError::InvalidConfigs(msg) => write!(f, "Invalid configurations: {}", msg), 64 | } 65 | } 66 | } 67 | 68 | impl std::error::Error for TorrentError {} 69 | 70 | pub type Result = std::result::Result; 71 | 72 | impl From for TorrentError { 73 | fn from(value: std::io::Error) -> Self { 74 | match value.kind() { 75 | std::io::ErrorKind::NotFound => TorrentError::FileNotFound(value.to_string()), 76 | std::io::ErrorKind::PermissionDenied => { 77 | TorrentError::PermissionDenied(value.to_string()) 78 | } 79 | std::io::ErrorKind::TimedOut => TorrentError::ConnectionTimedOut(value.to_string()), 80 | _ => TorrentError::IoError(value), 81 | } 82 | } 83 | } 84 | 85 | impl From for TorrentError { 86 | fn from(value: serde_bencode::Error) -> Self { 87 | TorrentError::BencodeError(value) 88 | } 89 | } 90 | 91 | impl From for TorrentError { 92 | fn from(value: reqwest::Error) -> Self { 93 | if value.is_timeout() { 94 | TorrentError::ConnectionTimedOut(value.to_string()) 95 | } else if value.is_connect() { 96 | TorrentError::ConnectionFailed(value.to_string()) 97 | } else { 98 | TorrentError::TrackerError(value.to_string()) 99 | } 100 | } 101 | } 102 | 103 | impl From for TorrentError { 104 | fn from(value: serde_json::error::Error) -> Self { 105 | TorrentError::InvalidTorrentFile(value.to_string()) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /src/mapper.rs: -------------------------------------------------------------------------------- 1 | use crate::error::*; 2 | use linked_hash_set::LinkedHashSet; 3 | use serde::{Deserialize, Serialize}; 4 | use serde_bytes::ByteBuf; 5 | use sha1::Digest; 6 | use sha1::Sha1; 7 | use std::collections::HashMap; 8 | use std::path::Path; 9 | 10 | /// Stores length and path parameters in a torrent file 11 | #[derive(Serialize, Deserialize, Debug)] 12 | pub struct TorrentFile { 13 | pub length: i64, 14 | pub path: Vec, 15 | } 16 | 17 | /// Stores the torrent info present in a torrent file 18 | #[derive(Debug, Serialize, Deserialize)] 19 | pub struct TorrentInfo { 20 | pub name: String, 21 | #[serde(rename = "piece length")] 22 | pub piece_length: i64, 23 | #[serde(with = "serde_bytes")] 24 | pub pieces: ByteBuf, 25 | pub files: Option>, 26 | pub length: Option, 27 | #[serde(default)] 28 | pub private: i64, 29 | } 30 | 31 | /// Stores the Actual MetaData of a torrent file 32 | #[derive(Debug, Serialize, Deserialize)] 33 | pub struct TorrentMetaData { 34 | announce: String, 35 | #[serde(rename = "announce-list")] 36 | announce_list: Option>>, 37 | azureus_properties: Option>, 38 | #[serde(rename = "created by")] 39 | created_by: String, 40 | #[serde(rename = "creation date")] 41 | creation_date: i64, 42 | encoding: Option, 43 | pub info: TorrentInfo, 44 | publisher: Option, 45 | #[serde(rename = "publisher-url")] 46 | publisher_url: Option, 47 | } 48 | 49 | impl TorrentMetaData { 50 | pub fn calculate_total_pieces(&self) -> u32 { 51 | let piece_bytes = self.info.pieces.as_ref(); 52 | let total = piece_bytes.len() / 20; 53 | total as u32 54 | } 55 | /// Reads a torrent file and maps ints data to a TorrentMetaData format 56 | pub fn from_trnt_file(path: &str) -> Result { 57 | let file_path = Path::new(path); 58 | let bytes = std::fs::read(file_path)?; 59 | let torrent: TorrentMetaData = serde_bencode::from_bytes(&bytes)?; 60 | Ok(torrent) 61 | } 62 | 63 | /// gets tracker urls 64 | pub fn get_tracker_url(&self) -> Vec { 65 | let mut trackers = LinkedHashSet::new(); 66 | let main_url = self.announce.clone(); 67 | trackers.insert(main_url); 68 | 69 | self.announce_list.iter().flatten().for_each(|x| { 70 | for url in x.iter() { 71 | trackers.insert(url.clone()); 72 | } 73 | }); 74 | trackers.into_iter().collect() 75 | } 76 | 77 | /// Gets the pieces length for each info 78 | pub fn get_pieces_length(&self) -> i64 { 79 | self.info.piece_length 80 | } 81 | 82 | /// Gets pieces hashes stored in Info 83 | pub fn get_pieces_hashes(&self) -> Vec<[u8; 20]> { 84 | let pieces_bytes = self.info.pieces.as_ref(); 85 | 86 | if pieces_bytes.len() % 20 != 0 { 87 | panic!("The length of the pieces string is not a multiple of 20"); 88 | } 89 | 90 | pieces_bytes 91 | .chunks(20) 92 | .map(|chunk| { 93 | let mut hash = [0u8; 20]; 94 | hash.copy_from_slice(chunk); 95 | hash 96 | }) 97 | .collect() 98 | } 99 | 100 | /// Gets the total size of files in a torrent file 101 | pub fn get_total_size(&self) -> i64 { 102 | if let Some(files) = &self.info.files { 103 | return files.iter().map(|file| file.length).sum(); 104 | } 105 | self.info.length.unwrap_or(0) 106 | } 107 | 108 | /// Gets the file structure for later use 109 | pub fn get_file_structure(&self) -> Vec<(String, i64)> { 110 | if let Some(files) = &self.info.files { 111 | files 112 | .iter() 113 | .map(|file| (file.path.join("/"), file.length)) 114 | .collect() 115 | } else { 116 | vec![(self.info.name.clone(), self.info.length.unwrap_or(0))] 117 | } 118 | } 119 | 120 | /// Calculates the complete info hash 121 | pub fn calculate_info_hash(&self) -> Result<[u8; 20]> { 122 | let info_bencoded = serde_bencode::to_bytes(&self.info)?; 123 | let mut hasher = Sha1::new(); 124 | hasher.update(&info_bencoded); 125 | Ok(hasher.finalize().into()) 126 | } 127 | } 128 | 129 | #[cfg(test)] 130 | mod tests { 131 | use crate::tracker::urlencode; 132 | 133 | use super::*; 134 | 135 | macro_rules! test_torrent { 136 | ($name:ident,$method:ident) => { 137 | #[test] 138 | fn $name() { 139 | let path = "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].torrent"; 140 | let result = TorrentMetaData::from_trnt_file(path).unwrap(); 141 | let output = result.$method(); 142 | 143 | println!("{}: {:?}", stringify!($method), output); 144 | } 145 | }; 146 | } 147 | 148 | #[test] 149 | fn test_from_file() { 150 | let path = "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].torrent"; 151 | let result = TorrentMetaData::from_trnt_file(path).unwrap(); 152 | println!("torrent file publishe: {:?}", result.publisher) 153 | } 154 | 155 | test_torrent!(test_get_pieces_length, get_pieces_length); 156 | test_torrent!(test_get_file_structure, get_file_structure); 157 | test_torrent!(test_get_tracker_url, get_tracker_url); 158 | test_torrent!(test_get_total_size, get_total_size); 159 | test_torrent!(test_get_pieces_hashes, get_pieces_hashes); 160 | test_torrent!(test_hash_info, calculate_info_hash); 161 | #[tokio::test] 162 | async fn debug_info_hash() { 163 | let path = r"C:\Users\Lenovo\Downloads\ubuntu-24.10-desktop-amd64.iso.torrent"; 164 | let torrent_meta_data = TorrentMetaData::from_trnt_file(path).unwrap(); 165 | 166 | let info_hash = torrent_meta_data.calculate_info_hash().unwrap(); 167 | // println!("Raw info hash bytes: {:?}", info_hash); 168 | println!("URL encoded info hash: {}", urlencode(&info_hash)); 169 | println!( 170 | "Url encoded info hash using earlier funct: {}", 171 | urlencode(&info_hash) 172 | ); 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /src/tracker.rs: -------------------------------------------------------------------------------- 1 | use crate::mapper::*; 2 | use crate::peer::PeerInfo; 3 | use rand::Rng; 4 | use serde::Deserialize; 5 | use serde_bytes::ByteBuf; 6 | use std::net::Ipv4Addr; 7 | use std::time::Duration; 8 | use tokio::task; 9 | use tokio::time::timeout; 10 | use url::Url; 11 | 12 | const UDP_TIMEOUT: Duration = Duration::from_secs(10); 13 | 14 | //TODO: Add TorrentError to this module 15 | #[allow(dead_code)] 16 | #[derive(Debug, Deserialize)] 17 | pub struct TrackerResponse { 18 | interval: Option, 19 | #[serde(default)] 20 | peer: Vec, 21 | #[serde(rename = "peers", default)] 22 | peers_binary: Option, 23 | } 24 | 25 | /// Used to generate Peer Id that is later used to request trackers 26 | pub fn generate_peer_id() -> [u8; 20] { 27 | let mut rng = rand::thread_rng(); 28 | let mut peer_id = [0u8; 20]; 29 | 30 | rng.fill(&mut peer_id); 31 | 32 | peer_id[0] = b'-'; 33 | peer_id[1..7].copy_from_slice(b"TR2940"); 34 | 35 | peer_id 36 | } 37 | 38 | /// Request Peers in order to get the Peer Info that is needed to establish connections. 39 | #[allow(dead_code)] 40 | pub async fn request_peers( 41 | torrent: &TorrentMetaData, 42 | ) -> Result, Box> { 43 | let info_hash = torrent.calculate_info_hash()?; 44 | let trackers = torrent.get_tracker_url(); 45 | let total_length = torrent.get_total_size(); 46 | 47 | let mut handles = vec![]; 48 | 49 | for tracker in trackers { 50 | if tracker.starts_with("udp://") { 51 | println!("Connecting to UDP tracker: {}", tracker); 52 | //continue; 53 | let tracker = tracker.to_string(); 54 | 55 | let handle = task::spawn(async move { 56 | match tokio::time::timeout( 57 | Duration::from_secs(20), 58 | request_udp_tracker(&tracker, &info_hash, total_length), 59 | ) 60 | .await 61 | { 62 | Ok(Ok(peers)) => Ok((tracker, peers)), 63 | Ok(Err(e)) => Err(format!("Failed to connect to tracker {}: {:?}", tracker, e)), 64 | Err(_) => Err(format!("Timeout connecting to tracker {}", tracker)), 65 | } 66 | }); 67 | handles.push(handle); 68 | } else { 69 | let tracker = tracker.to_string(); 70 | 71 | let handle = task::spawn(async move { 72 | match tokio::time::timeout( 73 | Duration::from_secs(20), 74 | request_http_trackers(&tracker, &info_hash, total_length), 75 | ) 76 | .await 77 | { 78 | Ok(Ok(peers)) => Ok((tracker, peers)), 79 | Ok(Err(e)) => Err(format!("Failed to connect to tracker {}: {:?}", tracker, e)), 80 | Err(_) => Err(format!("Timeout connecting to tracker {}", tracker)), 81 | } 82 | }); 83 | 84 | handles.push(handle); 85 | } 86 | } 87 | 88 | for handle in handles { 89 | match handle.await { 90 | Ok(Ok((tracker, peers))) => { 91 | println!( 92 | "Successfully retrieved {} peers from tracker: {}", 93 | peers.len(), 94 | tracker 95 | ); 96 | return Ok(peers); 97 | } 98 | Ok(Err(e)) => eprintln!("{}", e), 99 | Err(e) => eprintln!("Task failed: {:?}", e), 100 | } 101 | } 102 | 103 | Err("Failed to retrieve peers from any tracker".into()) 104 | } 105 | 106 | /// Sending connection request with a socker 107 | pub async fn send_connection_request( 108 | socket: &tokio::net::UdpSocket, 109 | ) -> Result> { 110 | let protocol_id: u64 = 0x0000041727101980; // Fixed protocol ID 111 | let action: u32 = 0; // connect 112 | let transaction_id: u32 = rand::random(); 113 | 114 | let mut request = Vec::with_capacity(16); 115 | request.extend_from_slice(&protocol_id.to_be_bytes()); 116 | request.extend_from_slice(&action.to_be_bytes()); 117 | request.extend_from_slice(&transaction_id.to_be_bytes()); 118 | 119 | socket.send(&request).await?; 120 | 121 | let mut response = vec![0u8; 16]; 122 | let size = socket.recv(&mut response).await?; 123 | 124 | if size != 16 { 125 | return Err(format!("Invalid connection response size: {}", size).into()); 126 | } 127 | 128 | let resp_action = u32::from_be_bytes(response[0..4].try_into()?); 129 | let resp_transaction_id = u32::from_be_bytes(response[4..8].try_into()?); 130 | 131 | if resp_action != 0 { 132 | return Err(format!("Invalid action in connection response: {}", resp_action).into()); 133 | } 134 | if resp_transaction_id != transaction_id { 135 | return Err("Transaction ID mismatch in connection response".into()); 136 | } 137 | 138 | Ok(u64::from_be_bytes(response[8..16].try_into()?)) 139 | } 140 | /// Request Trakcers using udp links! 141 | pub async fn request_udp_tracker( 142 | announce: &str, 143 | info_hash: &[u8; 20], 144 | total_length: i64, 145 | ) -> Result, Box> { 146 | let url = Url::parse(announce)?; 147 | let host = url.host_str().ok_or("No host in tracker URL")?; 148 | let port = url.port().unwrap_or(80); 149 | 150 | // Bind to an IPv4 address specifically 151 | let socket = tokio::net::UdpSocket::bind("0.0.0.0:0").await?; 152 | 153 | let mut addrs = tokio::net::lookup_host((host, port)).await?; 154 | let addr = addrs 155 | .find(|addr| addr.is_ipv4()) 156 | .ok_or("No IPv4 address found for tracker")?; 157 | 158 | match timeout(UDP_TIMEOUT, socket.connect(addr)).await { 159 | Ok(result) => result?, 160 | Err(_) => return Err("UDP tracker connection timeout".into()), 161 | } 162 | 163 | let mut retries = 2; 164 | let mut connection_id = None; 165 | 166 | while retries > 0 && connection_id.is_none() { 167 | match timeout(UDP_TIMEOUT, send_connection_request(&socket)).await { 168 | Ok(Ok(id)) => { 169 | connection_id = Some(id); 170 | break; 171 | } 172 | Ok(Err(e)) => { 173 | println!("Connection request failed, retries left {}: {}", retries, e); 174 | retries -= 1; 175 | } 176 | Err(_) => { 177 | println!("Connection request timed out, retries left {}", retries); 178 | retries -= 1; 179 | } 180 | } 181 | } 182 | 183 | let connection_id = connection_id.ok_or("Failed to get connection ID after retries")?; 184 | 185 | let transaction_id: u32 = rand::random(); 186 | let peer_id = generate_peer_id(); 187 | 188 | let mut request = Vec::with_capacity(98); 189 | request.extend_from_slice(&connection_id.to_be_bytes()); // 8 bytes 190 | request.extend_from_slice(&1_u32.to_be_bytes()); // 4 bytes - action (1 for announce) 191 | request.extend_from_slice(&transaction_id.to_be_bytes()); // 4 bytes 192 | request.extend_from_slice(info_hash); // 20 bytes 193 | request.extend_from_slice(&peer_id); // 20 bytes 194 | request.extend_from_slice(&0_i64.to_be_bytes()); // 8 bytes - downloaded 195 | request.extend_from_slice(&total_length.to_be_bytes()); // 8 bytes - left 196 | request.extend_from_slice(&0_i64.to_be_bytes()); // 8 bytes - uploaded 197 | request.extend_from_slice(&0_i32.to_be_bytes()); // 4 bytes - event 198 | request.extend_from_slice(&0_u32.to_be_bytes()); // 4 bytes - IP address 199 | request.extend_from_slice(&0_u32.to_be_bytes()); // 4 bytes - key 200 | request.extend_from_slice(&(-1_i32).to_be_bytes()); // 4 bytes - num_want 201 | request.extend_from_slice(&6881_u16.to_be_bytes()); // 2 bytes - port 202 | 203 | retries = 2; 204 | while retries > 0 { 205 | // Send announce 206 | match timeout(UDP_TIMEOUT, socket.send(&request)).await { 207 | Ok(Ok(_)) => {} 208 | Ok(Err(e)) => { 209 | println!("Failed to send announce, retries left {}: {}", retries, e); 210 | retries -= 1; 211 | continue; 212 | } 213 | Err(_) => { 214 | println!("Announce send timed out, retries left {}", retries); 215 | retries -= 1; 216 | continue; 217 | } 218 | } 219 | 220 | let mut response = vec![0u8; 1024]; 221 | match timeout(UDP_TIMEOUT, socket.recv(&mut response)).await { 222 | Ok(Ok(size)) => { 223 | response.truncate(size); 224 | if size < 8 { 225 | println!("Response too short: {} bytes", size); 226 | retries -= 1; 227 | continue; 228 | } 229 | 230 | let action = u32::from_be_bytes(response[0..4].try_into()?); 231 | let resp_transaction_id = u32::from_be_bytes(response[4..8].try_into()?); 232 | println!( 233 | "Response action: {}, transaction_id: {}", 234 | action, resp_transaction_id 235 | ); 236 | 237 | if resp_transaction_id != transaction_id { 238 | println!("Transaction ID mismatch"); 239 | retries -= 1; 240 | continue; 241 | } 242 | 243 | match action { 244 | 1 => { 245 | let interval = u32::from_be_bytes(response[8..12].try_into()?); 246 | let leechers = u32::from_be_bytes(response[12..16].try_into()?); 247 | let seeders = u32::from_be_bytes(response[16..20].try_into()?); 248 | 249 | println!( 250 | "Success! Interval: {}s, Leechers: {}, Seeders: {}", 251 | interval, leechers, seeders 252 | ); 253 | 254 | let mut peers = Vec::new(); 255 | for chunk in response[20..].chunks(6) { 256 | if chunk.len() == 6 { 257 | let ip = 258 | format!("{}.{}.{}.{}", chunk[0], chunk[1], chunk[2], chunk[3]); 259 | let port = u16::from_be_bytes([chunk[4], chunk[5]]); 260 | peers.push(PeerInfo { ip, port }); 261 | } 262 | } 263 | return Ok(peers); 264 | } 265 | 2 => { 266 | // Scrape response 267 | println!("Got scrape response, trying announce again..."); 268 | 269 | let mut announce_request = Vec::with_capacity(98); 270 | announce_request.extend_from_slice(&connection_id.to_be_bytes()); 271 | announce_request.extend_from_slice(&1_u32.to_be_bytes()); // Action 1 for announce 272 | announce_request.extend_from_slice(&transaction_id.to_be_bytes()); 273 | 274 | // Convert info_hash to proper network byte order 275 | let mut formatted_hash = [0u8; 20]; 276 | for i in 0..20 { 277 | formatted_hash[i] = info_hash[19 - i]; 278 | } 279 | announce_request.extend_from_slice(&formatted_hash); 280 | 281 | announce_request.extend_from_slice(&peer_id); 282 | announce_request.extend_from_slice(&0_i64.to_be_bytes()); // downloaded 283 | announce_request.extend_from_slice(&total_length.to_be_bytes()); // left 284 | announce_request.extend_from_slice(&0_i64.to_be_bytes()); // uploaded 285 | announce_request.extend_from_slice(&0_i32.to_be_bytes()); // event 286 | announce_request.extend_from_slice(&0_u32.to_be_bytes()); // IP 287 | announce_request.extend_from_slice(&0_u32.to_be_bytes()); // key 288 | announce_request.extend_from_slice(&(-1_i32).to_be_bytes()); // num_want 289 | announce_request.extend_from_slice(&6881_u16.to_be_bytes()); // port 290 | 291 | socket.send(&announce_request).await?; 292 | 293 | let mut retry_response = vec![0u8; 1024]; 294 | let retry_size = socket.recv(&mut retry_response).await?; 295 | retry_response.truncate(retry_size); 296 | 297 | if retry_size < 20 { 298 | return Err("Retry size too short for announce".into()); 299 | } 300 | let retry_action = u32::from_be_bytes(retry_response[0..4].try_into()?); 301 | if retry_action == 1 { 302 | let mut peers = Vec::new(); 303 | for chunk in retry_response[20..].chunks(6) { 304 | if chunk.len() == 6 { 305 | let ip = format!( 306 | "{}.{}.{}.{}", 307 | chunk[0], chunk[1], chunk[2], chunk[3] 308 | ); 309 | let port = u16::from_be_bytes([chunk[4], chunk[5]]); 310 | if &ip != "0.0.0.0" && port != 0 { 311 | peers.push(PeerInfo { ip, port }); 312 | } 313 | } 314 | } 315 | return Ok(peers); 316 | } else { 317 | return Err( 318 | "Failed to get proper announce response after scrape".into() 319 | ); 320 | }; 321 | } 322 | 3 => { 323 | // Error 324 | let error_msg = String::from_utf8_lossy(&response[8..]); 325 | println!("Got error response: {}", error_msg); 326 | retries -= 1; 327 | continue; 328 | } 329 | _ => { 330 | println!("Got unexpected action: {}", action); 331 | retries -= 1; 332 | continue; 333 | } 334 | } 335 | } 336 | Ok(Err(e)) => { 337 | println!( 338 | "Failed to receive announce response, retries left {}: {}", 339 | retries, e 340 | ); 341 | retries -= 1; 342 | } 343 | Err(_) => { 344 | println!("Announce receive timed out, retries left {}", retries); 345 | retries -= 1; 346 | } 347 | } 348 | } 349 | 350 | Err("Failed to get valid response after retries".into()) 351 | } 352 | /// Request trackers from http and udp origins 353 | pub async fn request_tracker( 354 | announce: &str, 355 | info_hash: &[u8; 20], 356 | total_length: i64, 357 | ) -> Result, Box> { 358 | if announce.starts_with("udp://") { 359 | return request_udp_tracker(announce, info_hash, total_length).await; 360 | } else if announce.starts_with("http://") || announce.starts_with("https://") { 361 | return request_http_trackers(announce, info_hash, total_length).await; 362 | } 363 | Err("Unsupported tracker protocol".into()) 364 | } 365 | /// Request Trackers based on the info that has been parsed from torrent file. 366 | pub async fn request_http_trackers( 367 | announce: &str, 368 | info_hash: &[u8; 20], 369 | total_length: i64, 370 | ) -> Result, Box> { 371 | let url = Url::parse(announce)?; 372 | let peer_id = generate_peer_id(); 373 | 374 | let q = format!( 375 | "?info_hash={}&peer_id={}&port=6881&uploaded=0&downloaded=0&compact=1&left={}", 376 | urlencode(info_hash), 377 | urlencode(&peer_id), 378 | total_length 379 | ); 380 | let full_url = format!("{}{}", url.as_str().trim_end_matches('/'), q); 381 | 382 | let response = match reqwest::get(full_url.clone()).await { 383 | Ok(bytes) => match bytes.bytes().await { 384 | Ok(byte) => byte, 385 | Err(e) => return Err(format!("Failed to get a response: {}", e).into()), 386 | }, 387 | Err(e) => return Err(format!("Failed to connect to {}:{}", url, e).into()), 388 | }; 389 | 390 | if response.starts_with(b"<") { 391 | return Err("Tracker returned HTML instead of bencoded data".into()); 392 | } 393 | 394 | let tracker_response: TrackerResponse = serde_bencode::de::from_bytes(&response) 395 | .map_err(|e| format!("failed to decode the bytes {}", e))?; 396 | let peers = if !tracker_response.peer.is_empty() { 397 | tracker_response.peer 398 | } else if let Some(binary_peer) = tracker_response.peers_binary { 399 | parse_binary_peers(&binary_peer) 400 | } else { 401 | return Err("No peers found in response".into()); 402 | }; 403 | 404 | if peers.is_empty() { 405 | Err("Tracker returned no peers".into()) 406 | } else { 407 | Ok(peers) 408 | } 409 | } 410 | 411 | pub fn parse_binary_peers(binary: &[u8]) -> Vec { 412 | binary 413 | .chunks(6) 414 | .filter_map(|chunk| { 415 | if chunk.len() == 6 { 416 | let ip = Ipv4Addr::new(chunk[0], chunk[1], chunk[2], chunk[3]).to_string(); 417 | let port = u16::from_be_bytes([chunk[4], chunk[5]]); 418 | Some(PeerInfo { ip, port }) 419 | } else { 420 | None 421 | } 422 | }) 423 | .collect() 424 | } 425 | 426 | /// Encodes url to a String 427 | pub fn urlencode(bytes: &[u8]) -> String { 428 | bytes 429 | .iter() 430 | .map(|&b| format!("%{:02x}", b)) 431 | .collect::() 432 | } 433 | 434 | #[cfg(test)] 435 | mod tests { 436 | 437 | use super::*; 438 | #[tokio::test] 439 | async fn test_request_http_tracker() { 440 | let path = "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].torrent"; 441 | 442 | let torrent_meta_data = TorrentMetaData::from_trnt_file(path).unwrap(); 443 | println!("Got the torrent meta data"); 444 | 445 | let info_hash = torrent_meta_data.calculate_info_hash().unwrap(); 446 | let trackers = torrent_meta_data.get_tracker_url(); 447 | let total_length = torrent_meta_data.get_total_size(); 448 | 449 | let mut any_success = false; 450 | 451 | for tracker in trackers { 452 | println!("\nTrying tracker: {}", tracker); 453 | let result = if tracker.starts_with("udp") { 454 | match request_udp_tracker(&tracker, &info_hash, total_length).await { 455 | Ok(peers) => { 456 | println!("Successfully got {} peers from UDP tracker", peers.len()); 457 | any_success = true; 458 | Ok(peers) 459 | } 460 | Err(e) => { 461 | println!("UDP tracker failed: {}", e); 462 | Err(e) 463 | } 464 | } 465 | } else { 466 | match request_http_trackers(&tracker, &info_hash, total_length).await { 467 | Ok(peers) => { 468 | println!("Successfully got {} peers from HTTP tracker", peers.len()); 469 | any_success = true; 470 | Ok(peers) 471 | } 472 | Err(e) => { 473 | println!("HTTP tracker failed: {}", e); 474 | Err(e) 475 | } 476 | } 477 | }; 478 | 479 | // Print peer info if successful 480 | if let Ok(peers) = result { 481 | println!("First 5 peers from tracker {}:", tracker); 482 | for (i, peer) in peers.iter().take(5).enumerate() { 483 | println!(" Peer {}: {}:{}", i + 1, peer.ip, peer.port); 484 | } 485 | if peers.len() > 5 { 486 | println!(" ... and {} more peers", peers.len() - 5); 487 | } 488 | } 489 | } 490 | 491 | // Test passes if at least one tracker worked 492 | assert!(any_success, "No trackers successfully returned peers"); 493 | } 494 | } 495 | -------------------------------------------------------------------------------- /src/magnet.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | mapper::TorrentMetaData, 3 | peer::{Peer, PeerInfo}, 4 | tracker::{generate_peer_id, request_tracker}, 5 | }; 6 | use data_encoding::BASE32; 7 | use futures::future::join_all; 8 | use percent_encoding::percent_decode_str; 9 | use serde::{Deserialize, Serialize}; 10 | use sha1::{Digest, Sha1}; 11 | use std::{ 12 | collections::{HashMap, HashSet}, 13 | time::Duration, 14 | }; 15 | use tokio::time::timeout; 16 | use url::Url; 17 | 18 | const METADATA_PIECE_SIZE: usize = 16384; //metadata is chunked into 16kb pieces! 19 | const EXTENSION_HANDSHAKE_ID: usize = 0; 20 | const METADATA_EXTENSION_ID: usize = 1; 21 | const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(10); 22 | const PIECE_TIMEOUT: Duration = Duration::from_secs(30); 23 | 24 | //TODO: Add TorrentError to this module 25 | #[derive(Debug, Clone)] 26 | pub struct MagnetInfo { 27 | pub info_hash: [u8; 20], 28 | pub display_name: Option, 29 | pub trackers: Vec, 30 | pub peers: Option>, 31 | } 32 | 33 | #[derive(Debug, Serialize, Deserialize)] 34 | pub struct ExtensionHandshake { 35 | m: HashMap, 36 | metadata_size: Option, 37 | #[serde(skip_serializing_if = "Option::is_none")] 38 | v: Option, 39 | } 40 | 41 | #[derive(Debug, Serialize, Deserialize)] 42 | pub struct MetaDataMessage { 43 | msg_type: i64, 44 | piece: i64, 45 | #[serde(skip_serializing_if = "Option::is_none")] 46 | total_size: Option, 47 | } 48 | 49 | impl MagnetInfo { 50 | pub async fn to_torrent_metadata(&self) -> Result> { 51 | let mut all_peers = HashSet::new(); 52 | 53 | println!( 54 | "Attempting to get peers from {} trackers", 55 | self.trackers.len() 56 | ); 57 | for tracker in &self.trackers { 58 | println!("Trying tracker: {}", tracker); 59 | match request_tracker(tracker, &self.info_hash, 0).await { 60 | Ok(peers) => { 61 | peers.iter().for_each(|x| { 62 | all_peers.insert(x.clone()); 63 | }); 64 | } 65 | Err(e) => { 66 | println!("Failed to get peers from tracker {}: {}", tracker, e); 67 | continue; 68 | } 69 | } 70 | } 71 | 72 | if all_peers.is_empty() { 73 | return Err("Could not get any peers from any tracker".into()); 74 | } 75 | 76 | println!("Total peers collected: {}", all_peers.len()); 77 | match self.fetch_metadata_from_peers(&all_peers).await { 78 | Ok(metadata_bytes) => { 79 | let torrent_metadata: TorrentMetaData = serde_bencode::from_bytes(&metadata_bytes) 80 | .map_err(|e| format!("Failed to decode metadata: {}", e))?; 81 | Ok(torrent_metadata) 82 | } 83 | Err(e) => Err(format!( 84 | "Failed to get metadata from {} peers: {}", 85 | all_peers.len(), 86 | e 87 | ) 88 | .into()), 89 | } 90 | } 91 | 92 | pub async fn fetch_metadata_from_peer( 93 | &self, 94 | peer_info: &PeerInfo, 95 | ) -> Result, Box> { 96 | let mut peer = Peer::new(peer_info.ip.clone(), peer_info.port); 97 | 98 | match peer.connect().await { 99 | Ok(_) => println!( 100 | "Connected to peer: {}:{}", 101 | peer.peer_info.ip.clone(), 102 | peer.peer_info.port 103 | ), 104 | Err(e) => println!( 105 | "Failed to connect to peer {}:{} - {}", 106 | peer.peer_info.ip.clone(), 107 | peer_info.port, 108 | e 109 | ), 110 | } 111 | 112 | let mut extension_bits = [0u8; 8]; 113 | extension_bits[5] |= 0x10; 114 | let peer_id = generate_peer_id(); 115 | 116 | match peer.handshake(self.info_hash, peer_id).await { 117 | Ok(_) => println!("Handshake successful!"), 118 | Err(e) => println!("Handshake failed: {}", e), 119 | } 120 | 121 | let mut extension_handshake = HashMap::new(); 122 | extension_handshake.insert("ut_metadata".to_string(), METADATA_EXTENSION_ID as i64); 123 | 124 | let handshake_msg = ExtensionHandshake { 125 | m: extension_handshake, 126 | metadata_size: None, 127 | v: Some("RU0001".to_string()), 128 | }; 129 | 130 | let handshake_bytes = serde_bencode::to_bytes(&handshake_msg).unwrap(); 131 | 132 | self.send_extension_message(&mut peer, EXTENSION_HANDSHAKE_ID as u8, &handshake_bytes) 133 | .await?; 134 | 135 | let response = timeout(HANDSHAKE_TIMEOUT, peer.receive_msg()) 136 | .await 137 | .unwrap()?; 138 | 139 | if response[0] != 20 { 140 | return Err("Invalid Extension message".into()); 141 | } 142 | 143 | let handshake_resp: ExtensionHandshake = serde_bencode::from_bytes(&response[2..]) 144 | .map_err(|e| format!("Failed handshake mapping :{}", e)) 145 | .unwrap(); 146 | 147 | let metadata_size = handshake_resp 148 | .metadata_size 149 | .ok_or("No metadata received!") 150 | .unwrap(); 151 | let num_pieces = 152 | (metadata_size + METADATA_PIECE_SIZE as i64 - 1) / (METADATA_PIECE_SIZE as i64); 153 | 154 | // Request all metadata pieces 155 | let mut metadata = vec![0u8; metadata_size as usize]; 156 | 157 | for piece in 0..num_pieces { 158 | let start = piece as usize * METADATA_PIECE_SIZE; 159 | let end = std::cmp::min(start + METADATA_PIECE_SIZE, metadata_size as usize); 160 | 161 | let piece_data = self 162 | .get_piece_resp(piece, &mut peer, metadata_size, start, end) 163 | .await?; 164 | 165 | metadata[start..end].copy_from_slice(&piece_data); 166 | } 167 | 168 | // Verify metadata hash matches info_hash 169 | let mut hasher = Sha1::new(); 170 | Digest::update(&mut hasher, &metadata); 171 | //hasher.update(&metadata); 172 | let hash: [u8; 20] = hasher.finalize().into(); 173 | 174 | if hash != self.info_hash { 175 | return Err("Metadata hash mismatch".into()); 176 | } 177 | 178 | Ok(metadata) 179 | } 180 | 181 | async fn get_piece_resp( 182 | &self, 183 | piece: i64, 184 | peer: &mut Peer, 185 | metadata_size: i64, 186 | start: usize, 187 | end: usize, 188 | ) -> Result, Box> { 189 | let msg = MetaDataMessage { 190 | msg_type: 0, // request 191 | piece, 192 | total_size: None, 193 | }; 194 | 195 | let msg_bytes = serde_bencode::to_bytes(&msg)?; 196 | self.send_extension_message(peer, METADATA_EXTENSION_ID as u8, &msg_bytes) 197 | .await?; 198 | 199 | // Wait for piece response 200 | let piece_data = timeout(PIECE_TIMEOUT, peer.receive_msg()).await??; 201 | 202 | if piece_data[0] != 20 { 203 | return Err("Invalid metadata piece response".into()); 204 | } 205 | 206 | // Extract and validate piece data 207 | Ok(piece_data[2..end - start + 2].to_vec()) 208 | } 209 | 210 | pub async fn fetch_metadata_from_peers( 211 | &self, 212 | peers: &HashSet, 213 | ) -> Result, Box> { 214 | let max_concurrent = 10; 215 | 216 | let mut handles = Vec::with_capacity(std::cmp::min(peers.len(), max_concurrent)); 217 | 218 | for peer in peers.iter().take(max_concurrent) { 219 | let peer_info = peer.clone(); 220 | let self_clone = self.clone(); 221 | 222 | let handle = tokio::spawn(async move { 223 | match self_clone.fetch_metadata_from_peer(&peer_info).await { 224 | Ok(metadata) => Some((peer_info.clone(), metadata)), 225 | Err(_) => None, 226 | } 227 | }); 228 | handles.push(handle); 229 | } 230 | 231 | let results = join_all(handles).await; 232 | 233 | let mut valid_metadata = HashSet::new(); 234 | let mut metadata_count = HashMap::new(); 235 | 236 | for result in results { 237 | if let Ok(Some((_peer_info, metadata))) = result { 238 | let metadata_hash = { 239 | let mut hasher = Sha1::new(); 240 | Digest::update(&mut hasher, &metadata); 241 | hasher.finalize().to_vec() 242 | }; 243 | valid_metadata.insert((metadata_hash.clone(), metadata.clone())); 244 | *metadata_count.entry(metadata_hash).or_insert(0) += 1 245 | } 246 | } 247 | if let Some((most_common_hash, _)) = metadata_count.iter().max_by_key(|&(_, count)| count) { 248 | if let Some((_, metadata)) = valid_metadata 249 | .iter() 250 | .find(|(hash, _)| hash == most_common_hash) 251 | { 252 | return Ok(metadata.clone()); 253 | } 254 | } 255 | Err("failed to get consistent emtadata from peers".into()) 256 | } 257 | 258 | async fn send_extension_message( 259 | &self, 260 | peer: &mut Peer, 261 | extension_id: u8, 262 | payload: &[u8], 263 | ) -> Result<(), Box> { 264 | let msg_len = 2 + payload.len(); 265 | let mut message = Vec::with_capacity(4 + msg_len); 266 | 267 | message.extend_from_slice(&(msg_len as u32).to_be_bytes()); 268 | message.push(20); // Extension message ID 269 | message.push(extension_id); 270 | message.extend_from_slice(payload); 271 | 272 | peer.send_msg(&message).await?; 273 | Ok(()) 274 | } 275 | 276 | /// parses the link to extract info-hash, display-name(optiona), tracker urls and peer info! 277 | pub fn parse(magnet_url: &str) -> Result> { 278 | let url = Url::parse(magnet_url)?; 279 | 280 | // Get all query parameters, including duplicates 281 | let params: Vec<(String, String)> = url 282 | .query_pairs() 283 | .map(|(k, v)| (k.into_owned(), v.into_owned())) 284 | .collect(); 285 | 286 | let info_hash = if let Some((_, xt)) = params.iter().find(|(k, _)| k == "xt") { 287 | if let Some(hash) = xt.strip_prefix("urn:btih:") { 288 | if hash.len() == 40 { 289 | let mut result = [0u8; 20]; 290 | hex::decode_to_slice(hash, &mut result)?; 291 | result 292 | } else if hash.len() == 32 { 293 | let mut result = [0u8; 20]; 294 | let decoded = BASE32.decode(hash.as_bytes())?; 295 | result.copy_from_slice(&decoded); 296 | result 297 | } else { 298 | return Err("Invalid info hash".into()); 299 | } 300 | } else { 301 | return Err("Invalid xt parameter".into()); 302 | } 303 | } else { 304 | return Err("Missing xt parameter".into()); 305 | }; 306 | 307 | let display_name = params 308 | .iter() 309 | .find(|(k, _)| k == "dn") 310 | .map(|(_, v)| percent_decode_str(v).decode_utf8_lossy().into_owned()); 311 | 312 | // Collect all trackers (tr parameters) 313 | let trackers = params 314 | .iter() 315 | .filter(|(k, _)| k == "tr") 316 | .map(|(_, v)| percent_decode_str(v).decode_utf8_lossy().into_owned()) 317 | .collect::>(); 318 | 319 | println!("Found {} trackers in magnet link:", trackers.len()); 320 | for tracker in &trackers { 321 | println!(" - {}", tracker); 322 | } 323 | 324 | let peers = Some( 325 | params 326 | .iter() 327 | .filter(|(k, _)| k.starts_with("x.pe")) 328 | .map(|(_, v)| percent_decode_str(v).decode_utf8_lossy().into_owned()) 329 | .collect(), 330 | ); 331 | 332 | Ok(MagnetInfo { 333 | info_hash, 334 | display_name, 335 | trackers, 336 | peers, 337 | }) 338 | } 339 | //TODO:: Modify the magnet structure to handle offsets 340 | 341 | //pub async fn download(&self, output_dir: &str) -> Result<(), Box> { 342 | // println!("Fetching metadata from the magnet link!"); 343 | // 344 | // let metadata = self 345 | // .to_torrent_metadata() 346 | // .await 347 | // .expect("Failed to convert the link to metadata!"); 348 | // 349 | // println!("Getting the peer list...!"); 350 | // 351 | // let peers = request_peers(&metadata) 352 | // .await 353 | // .expect("Failed to request peers from magnet link metadata!"); 354 | // if peers.is_empty() { 355 | // return Err("No peer is available!".into()); 356 | // } 357 | // 358 | // tokio::fs::create_dir_all(output_dir) 359 | // .await 360 | // .expect("Failed to create a directory!"); 361 | // 362 | // let pieces_length = metadata.get_pieces_length(); 363 | // let pieces_hashes = metadata.get_pieces_hashes(); 364 | // let file_structure = metadata.get_file_structure(); 365 | // let total_pieces = pieces_hashes.len(); 366 | // 367 | // println!("Starting the download of {} pieces...!", total_pieces); 368 | // 369 | // let mut good_peer = None; 370 | // for peer in peers { 371 | // let mut tmp_peer = Peer::new(peer.ip.clone(), peer.port); 372 | // 373 | // match tmp_peer.connect().await { 374 | // Ok(_) => { 375 | // println!("Connected to {}:{}", peer.ip.clone(), peer.port); 376 | // let info_hash = metadata 377 | // .calculate_info_hash() 378 | // .expect("Failed to calculate the info hash!"); 379 | // match tmp_peer.handshake(info_hash, generate_peer_id()).await { 380 | // Ok(_) => { 381 | // println!("Handshake successful!"); 382 | // good_peer = Some(tmp_peer); 383 | // break; 384 | // } 385 | // Err(e) => { 386 | // println!("Handshake Failed! {}", e); 387 | // continue; 388 | // } 389 | // } 390 | // } 391 | // Err(_) => { 392 | // println!("Failed to connect to the peer!"); 393 | // continue; 394 | // } 395 | // } 396 | // } 397 | // let mut peer = good_peer.ok_or("Could not find a peer!").unwrap(); 398 | // 399 | // for (piece_index, piece_hash) in pieces_hashes.iter().enumerate() { 400 | // println!("Downloading {}/{} ... ", piece_index + 1, total_pieces); 401 | // let file_path = format!("{}/piece_{}", output_dir, piece_index); 402 | // match peer 403 | // .request_piece(piece_index as u32, pieces_length as u32, &file_path) 404 | // .await 405 | // { 406 | // Ok(_) => { 407 | // let piece_data = tokio::fs::read(file_path.as_str()) 408 | // .await 409 | // .expect("Failed to read the file"); 410 | // let mut hasher = Sha1::new(); 411 | // Digest::update(&mut hasher, &piece_data); 412 | // let downloaded_data: [u8; 20] = hasher.finalize().into(); 413 | // if &downloaded_data != piece_hash { 414 | // return Err( 415 | // format!("Piece {} hash verification failed!", piece_index).into() 416 | // ); 417 | // } 418 | // println!("Piece {} verified sucessfully!", piece_index); 419 | // } 420 | // Err(e) => { 421 | // return Err(format!("Failed to receive piece {}: {}", piece_index, e).into()); 422 | // } 423 | // } 424 | // } 425 | // println!("Reconstructing the file tree.."); 426 | // for (file_path, file_length) in file_structure { 427 | // let output_path = format!("{}/{}", output_dir, file_path); 428 | // if let Some(parent) = Path::new(&output_path).parent() { 429 | // tokio::fs::create_dir_all(parent).await?; 430 | // } 431 | // let mut output_file = tokio::fs::File::create(&output_path) 432 | // .await 433 | // .expect("Failed to create output file!"); 434 | // let mut bytes_written = 0i64; 435 | // 436 | // while bytes_written < file_length { 437 | // let piece_index = (bytes_written / pieces_length as i64) as usize; 438 | // let piece_path = format!("{}/piece_{}", output_dir, piece_index); 439 | // let mut piece_data = tokio::fs::File::open(&piece_path).await?; 440 | // 441 | // let offset = bytes_written % pieces_length as i64; 442 | // piece_data 443 | // .seek(std::io::SeekFrom::Start(offset as u64)) 444 | // .await 445 | // .unwrap(); 446 | // 447 | // let bytes_to_write = 448 | // std::cmp::min(pieces_length as i64 - offset, file_length - bytes_written) 449 | // as usize; 450 | // let mut buffer = vec![0u8; bytes_to_write]; 451 | // piece_data.read_exact(&mut buffer).await?; 452 | // output_file.write_all(&buffer).await?; 453 | // 454 | // bytes_written += bytes_to_write as i64; 455 | // } 456 | // } 457 | // for piece_index in 0..total_pieces { 458 | // let piece_path = format!("{}/piece_{}", output_dir, piece_index); 459 | // tokio::fs::remove_file(piece_path).await?; 460 | // } 461 | // println!("Download Completed Successfully!"); 462 | // Ok(()) 463 | //} 464 | } 465 | 466 | #[cfg(test)] 467 | mod tests { 468 | 469 | use super::*; 470 | 471 | #[test] 472 | fn test_magnet_parser() { 473 | let magnet = "magnet:?xt=urn:btih:12451f81a977a2d8bb402f21cd643422c5d4c50a&dn=The.Agency.2024.S01E05.WEB.x264-TORRENTGALAXY&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=udp%3A%2F%2Fexodus.desync.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.cyberia.is%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.torrent.eu.org%3A451%2Fannounce&tr=udp%3A%2F%2Fexplodie.org%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.birkenwald.de%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.moeking.me%3A6969%2Fannounce&tr=udp%3A%2F%2Fipv4.tracker.harry.lu%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.tiny-vps.com%3A6969%2Fannounce"; 474 | 475 | let result = MagnetInfo::parse(magnet).unwrap(); 476 | 477 | assert_eq!( 478 | result.display_name.unwrap().as_str(), 479 | "The.Agency.2024.S01E05.WEB.x264-TORRENTGALAXY" 480 | ); 481 | } 482 | #[test] 483 | fn test_base32_magnet() { 484 | let magnet = "magnet:?xt=urn:btih:c9e15763f722f23e98a29decdfae341b98d53056&dn=Test&tr=udp%3A%2F%2Ftracker.example.org%3A6969"; 485 | let magnet_info = MagnetInfo::parse(magnet).unwrap(); 486 | assert!(magnet_info.info_hash.len() == 20); 487 | } 488 | 489 | #[tokio::test] 490 | async fn test_fetch_metadata_from_peers() { 491 | let magnet = "magnet:?xt=urn:btih:678BC6AC22A5BEFAC6BBC50834E91D4F9755DEE4&dn=Dragon%26%23039%3Bs+Dogma+2+%28Dev+Build+v1.0.0.1%2C+MULTi14%29+%5BFitGirl+Repack%2C+Selective+Download+-+from+38.4+GB%5D%5D&tr=udp%3A%2F%2Fopentor.net%3A6969&tr=udp%3A%2F%2Ftracker.torrent.eu.org%3A451%2Fannounce&tr=udp%3A%2F%2Ftracker.theoks.net%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.ccp.ovh%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=http%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=https%3A%2F%2Ftracker.tamersunion.org%3A443%2Fannounce&tr=udp%3A%2F%2Fexplodie.org%3A6969%2Fannounce&tr=http%3A%2F%2Ftracker.bt4g.com%3A2095%2Fannounce&tr=udp%3A%2F%2Fbt2.archive.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fbt1.archive.org%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.filemail.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker1.bt.moack.co.kr%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=udp%3A%2F%2Fopentracker.i2p.rocks%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.internetwarriors.net%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fcoppersurfer.tk%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.zer0day.to%3A1337%2Fannounce"; 492 | 493 | let magnet_info = MagnetInfo::parse(magnet).unwrap(); 494 | println!("Getting the peers from trackers..."); 495 | 496 | let mut peers = Vec::new(); 497 | for tracker in &magnet_info.trackers { 498 | match request_tracker(tracker, &magnet_info.info_hash, 0).await { 499 | Ok(peer_vec) => { 500 | println!("Got {} number of peers from {}", peer_vec.len(), tracker); 501 | peers.extend(peer_vec); 502 | } 503 | Err(e) => { 504 | println!("Failed to get peers from tracker {}, {}", tracker, e); 505 | continue; 506 | } 507 | } 508 | } 509 | peers.sort_by_key(|p| (p.ip.clone(), p.port)); 510 | peers.dedup_by_key(|p| (p.ip.clone(), p.port)); 511 | 512 | if peers.is_empty() { 513 | panic!("could not find any peers"); 514 | } 515 | println!("Testing with {} unique peers", peers.len()); 516 | 517 | match magnet_info.fetch_metadata_from_peers(&peers).await { 518 | Ok(metadata) => { 519 | println!("Fetched metadata {} bytes", metadata.len()); 520 | assert!(!metadata.is_empty(), "Metadata should not be empty"); 521 | } 522 | Err(e) => { 523 | panic!("Failed to fetch metadata {}", e); 524 | } 525 | } 526 | } 527 | } 528 | -------------------------------------------------------------------------------- /src/peer.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use std::collections::{HashMap, HashSet}; 3 | use std::hash::Hash; 4 | use std::time::Duration; 5 | use tokio::fs::OpenOptions; 6 | use tokio::io::{AsyncReadExt, AsyncWriteExt}; 7 | use tokio::net::TcpStream; 8 | use tokio::time::timeout; 9 | 10 | use crate::error::{Result, TorrentError}; 11 | use crate::mapper::TorrentMetaData; 12 | 13 | //TODO: have to modify the mapper to optionally look for some fields but otherwise ignore them if 14 | //they dont exist! 15 | //TODO: Go through the downloading process once again and implement multi threading 16 | //TODO: Should start the downloading process once it has found a peer with the right bitfields 17 | //then run the rest of the process in the background 18 | //TODO: Add rarest first algo to download the pieces that fewer peers have first 19 | //TODO: Refactor getting piece availability to be dynamically called instead of once at the 20 | //beginning 21 | //TODO: Skip pieces that fail to be downloaded- must have them saved somewhere to download them 22 | //later on 23 | //TODO: Add piece verification 24 | 25 | #[derive(Debug, Clone)] 26 | pub enum PeerMessage { 27 | KeepAlive, 28 | Choke, 29 | Unchoke, 30 | Interested, 31 | NotInterested, 32 | Have { 33 | piece_idx: u32, 34 | }, 35 | BitField { 36 | bitfield: Vec, 37 | }, 38 | Request { 39 | index: u32, 40 | begin: u32, 41 | length: u32, 42 | }, 43 | Piece { 44 | index: u32, 45 | begin: u32, 46 | block: Vec, 47 | }, 48 | Cancel { 49 | index: u32, 50 | begin: u32, 51 | length: u32, 52 | }, 53 | } 54 | 55 | #[derive(Debug, Deserialize, Serialize, Clone, Hash)] 56 | pub struct PeerInfo { 57 | pub ip: String, 58 | pub port: u16, 59 | } 60 | 61 | // TODO: comparing strings is stupid! fix it 62 | impl PartialEq for PeerInfo { 63 | fn eq(&self, other: &Self) -> bool { 64 | self.ip == other.ip 65 | } 66 | } 67 | 68 | impl Eq for PeerInfo {} 69 | 70 | #[derive(Debug)] 71 | pub struct Peer { 72 | pub peer_info: PeerInfo, 73 | stream: Option, 74 | pub bitfields: Option>, 75 | pub is_choked: bool, 76 | pub is_interested: bool, 77 | pub piece_availability: HashSet, 78 | } 79 | 80 | #[derive(Debug)] 81 | pub struct PieceDownloader { 82 | pub peers: Vec, 83 | pub current_peer_idx: usize, 84 | pub hash_info: [u8; 20], 85 | pub peer_id: [u8; 20], 86 | } 87 | 88 | impl PieceDownloader { 89 | pub fn new(peers: Vec, info_hash: [u8; 20], peer_id: [u8; 20]) -> Self { 90 | let peers = peers 91 | .into_iter() 92 | .map(|peer_info| Peer { 93 | peer_info, 94 | stream: None, 95 | bitfields: None, 96 | is_choked: true, 97 | is_interested: false, 98 | piece_availability: HashSet::new(), 99 | }) 100 | .collect(); 101 | 102 | PieceDownloader { 103 | peers, 104 | current_peer_idx: 0, 105 | hash_info: info_hash, 106 | peer_id, 107 | } 108 | } 109 | // a method to initialize all peer connections and gather piece availability 110 | // should be called first so that we could run get_piece_availability on peers 111 | pub async fn initialize_peers(&mut self) -> Result<()> { 112 | let mut successful_connections = 0; 113 | 114 | // connect to all peers 115 | for peer_idx in 0..self.peers.len() { 116 | self.current_peer_idx = peer_idx; 117 | let peer = &mut self.peers[peer_idx]; 118 | 119 | peer.connect().await?; 120 | peer.handshake(self.hash_info, self.peer_id).await?; 121 | 122 | if let Ok(()) = peer.receive_init_msg().await { 123 | successful_connections += 1; 124 | println!( 125 | "Successfully initialized peer {}:{}", 126 | peer.peer_info.ip, peer.peer_info.port 127 | ); 128 | } 129 | } 130 | if successful_connections == 0 { 131 | return Err(TorrentError::PeerError( 132 | "couldnt connect to any peer".to_string(), 133 | )); 134 | } 135 | 136 | Ok(()) 137 | } 138 | 139 | /// Enumerates through the peers and map the piece index to peers that offer that piece! 140 | pub fn get_piece_availability(&self, total_pieces: u32) -> HashMap> { 141 | let mut pices_with_peers: HashMap> = HashMap::new(); 142 | for piece in 0..total_pieces { 143 | let peers_with_piece: Vec = self 144 | .peers 145 | .iter() 146 | .enumerate() 147 | .filter(|(_, peer)| peer.piece_availability.contains(&piece)) 148 | .map(|(idx, _)| idx) 149 | .collect(); 150 | if !peers_with_piece.is_empty() { 151 | pices_with_peers.insert(piece, peers_with_piece); 152 | } 153 | } 154 | pices_with_peers 155 | } 156 | async fn download_piece( 157 | &mut self, 158 | index: u32, 159 | piece_length: u32, 160 | file_path: &str, 161 | total_pieces: u32, 162 | ) -> Result<()> { 163 | let mut offset = 0; 164 | let mut retries_with_same_peer = 0; 165 | const MAX_RETRIES_PER_PEER: i32 = 2; 166 | 167 | while offset < piece_length { 168 | let peers_per_piece = self.get_piece_availability(total_pieces); 169 | if let Some(peers) = peers_per_piece.get(&index) { 170 | let mut peer_idx = 0; 171 | while peer_idx < peers.len() { 172 | self.current_peer_idx = peers[peer_idx]; 173 | 174 | let peer = &mut self.peers[self.current_peer_idx]; 175 | println!( 176 | "Attempting download of piece {} from peer {}:{}", 177 | index, peer.peer_info.ip, peer.peer_info.port 178 | ); 179 | 180 | match peer 181 | .request_piece(index, piece_length, offset, file_path) 182 | .await 183 | { 184 | Ok(bytes_downloaded) => { 185 | offset += bytes_downloaded; 186 | retries_with_same_peer = 0; 187 | if offset >= piece_length { 188 | return Ok(()); 189 | } 190 | } 191 | Err(_e) => { 192 | println!( 193 | "Error downloading from peer: {}:{}", 194 | peer.peer_info.ip, peer.peer_info.port 195 | ); 196 | retries_with_same_peer += 1; 197 | if retries_with_same_peer >= MAX_RETRIES_PER_PEER { 198 | println!( 199 | "Switching to the next peer after {} retries", 200 | retries_with_same_peer 201 | ); 202 | retries_with_same_peer = 0; 203 | peer_idx += 1; 204 | } else { 205 | tokio::time::sleep(Duration::from_secs(1)).await; 206 | } 207 | } 208 | } 209 | } 210 | } 211 | } 212 | Ok(()) 213 | } 214 | 215 | pub async fn download_torrent( 216 | &mut self, 217 | torrent: &TorrentMetaData, 218 | output_dir: &str, 219 | ) -> Result<()> { 220 | self.initialize_peers().await?; 221 | 222 | let piece_length = torrent.get_pieces_length() as u32; 223 | let total_pieces = torrent.calculate_total_pieces(); 224 | 225 | let temp_dir = format!("{}/temp_pieces", output_dir); 226 | tokio::fs::create_dir_all(&temp_dir).await?; 227 | 228 | let mut downloaded_pieces = HashSet::new(); 229 | 230 | let piece_availability = self.get_piece_availability(total_pieces); 231 | 232 | for piece_index in 0..total_pieces { 233 | if downloaded_pieces.contains(&piece_index) { 234 | continue; 235 | } 236 | let actual_piece_length = if piece_index == total_pieces - 1 { 237 | let total_size = torrent.get_total_size(); 238 | let remainder = total_size % torrent.get_pieces_length(); 239 | if remainder == 0 { 240 | piece_length 241 | } else { 242 | remainder as u32 243 | } 244 | } else { 245 | piece_length 246 | }; 247 | 248 | let temp_piece_path = format!("{}/piece_{}", temp_dir, piece_index); 249 | 250 | if let Some(peer_indices) = piece_availability.get(&piece_index) { 251 | let mut success = false; 252 | 253 | for &peer_idx in peer_indices { 254 | self.current_peer_idx = peer_idx; 255 | match self 256 | .download_piece( 257 | piece_index, 258 | actual_piece_length, 259 | &temp_piece_path, 260 | total_pieces, 261 | ) 262 | .await 263 | { 264 | Ok(_) => { 265 | downloaded_pieces.insert(piece_index); 266 | success = true; 267 | println!("Successfully downloaded and verified piece {}", piece_index); 268 | break; 269 | } 270 | Err(e) => { 271 | println!("failed to download the piece {}: {}", piece_index, e); 272 | } 273 | } 274 | } 275 | if !success { 276 | return Err(TorrentError::DownloadTimedout); 277 | } 278 | } else { 279 | return Err(TorrentError::NoAvailablePeers(piece_index)); 280 | } 281 | } 282 | println!("All pieces downloaded! Assembling files..."); 283 | PieceDownloader::assemble_files(torrent, &temp_dir, output_dir).await?; 284 | tokio::fs::remove_dir_all(&temp_dir).await?; 285 | Ok(()) 286 | } 287 | pub async fn assemble_files( 288 | torrent: &TorrentMetaData, 289 | temp_dir: &str, 290 | output_dir: &str, 291 | ) -> Result<()> { 292 | let piece_length = torrent.get_pieces_length() as u64; 293 | let file_structure = torrent.get_file_structure(); 294 | let mut absolute_offset = 0u64; 295 | 296 | for (file_path, file_length) in file_structure { 297 | let full_path = format!("{}/{}", output_dir, file_path); 298 | 299 | if let Some(parent) = std::path::Path::new(&full_path).parent() { 300 | tokio::fs::create_dir_all(parent).await?; 301 | } 302 | let mut outputfile = tokio::fs::OpenOptions::new() 303 | .create(true) 304 | .write(true) 305 | .truncate(true) 306 | .open(&full_path) 307 | .await?; 308 | 309 | let file_length = file_length as u64; 310 | let mut bytes_written = 0u64; 311 | 312 | while bytes_written < file_length { 313 | let current_piece = (absolute_offset / piece_length) as u32; 314 | let offset_in_piece = absolute_offset % piece_length; 315 | 316 | let piece_path = format!("{}/piece_{}", temp_dir, current_piece); 317 | let piece_data = tokio::fs::read(&piece_path).await?; 318 | 319 | let bytes_remaining_in_piece = piece_data.len() as u64 - offset_in_piece; 320 | let bytes_remaining_in_file = file_length - bytes_written; 321 | 322 | let bytes_to_write = 323 | std::cmp::min(bytes_remaining_in_piece, bytes_remaining_in_file) as usize; 324 | outputfile 325 | .write_all( 326 | &piece_data 327 | [offset_in_piece as usize..(offset_in_piece as usize + bytes_to_write)], 328 | ) 329 | .await?; 330 | bytes_written += bytes_to_write as u64; 331 | absolute_offset += bytes_to_write as u64; 332 | } 333 | } 334 | Ok(()) 335 | } 336 | } 337 | 338 | impl Peer { 339 | pub fn new(ip: String, port: u16) -> Self { 340 | let peer_info = PeerInfo { ip, port }; 341 | Peer { 342 | peer_info, 343 | stream: None, 344 | bitfields: None, 345 | is_choked: true, 346 | is_interested: false, 347 | piece_availability: HashSet::new(), 348 | } 349 | } 350 | 351 | async fn read_message(&mut self) -> Result { 352 | let stream = self 353 | .stream 354 | .as_mut() 355 | .ok_or_else(|| TorrentError::PeerError("No active stream".to_string()))?; 356 | 357 | let mut len_buf = [0u8; 4]; 358 | stream.read_exact(&mut len_buf).await?; 359 | let msg_len = u32::from_be_bytes(len_buf) as usize; 360 | if msg_len == 0 { 361 | return Ok(PeerMessage::KeepAlive); 362 | } 363 | 364 | let mut msg_id = [0u8; 1]; 365 | stream.read_exact(&mut msg_id).await?; 366 | 367 | match msg_id[0] { 368 | 0 => Ok(PeerMessage::Choke), 369 | 1 => Ok(PeerMessage::Unchoke), 370 | 2 => Ok(PeerMessage::Interested), 371 | 3 => Ok(PeerMessage::NotInterested), 372 | 4 => { 373 | let mut piece_idx = [0u8; 4]; 374 | stream.read_exact(&mut piece_idx).await?; 375 | Ok(PeerMessage::Have { 376 | piece_idx: u32::from_be_bytes(piece_idx), 377 | }) 378 | } 379 | 5 => { 380 | let payload_len = msg_len - 1; 381 | let mut bitfield = vec![0u8; payload_len]; 382 | stream.read_exact(&mut bitfield).await?; 383 | Ok(PeerMessage::BitField { bitfield }) 384 | } 385 | 386 | 6 => { 387 | let mut buff = [0u8; 12]; 388 | stream.read_exact(&mut buff).await?; 389 | Ok(PeerMessage::Request { 390 | index: u32::from_be_bytes(buff[0..4].try_into().unwrap()), 391 | begin: u32::from_be_bytes(buff[4..8].try_into().unwrap()), 392 | length: u32::from_be_bytes(buff[8..12].try_into().unwrap()), 393 | }) 394 | } 395 | 7 => { 396 | // Piece message 397 | let mut index = [0u8; 4]; 398 | let mut begin = [0u8; 4]; 399 | stream.read_exact(&mut index).await?; 400 | stream.read_exact(&mut begin).await?; 401 | 402 | let block_len = msg_len - 9; // msg_len - (1 + 4 + 4) 403 | let mut block = vec![0u8; block_len]; 404 | stream.read_exact(&mut block).await?; 405 | 406 | Ok(PeerMessage::Piece { 407 | index: u32::from_be_bytes(index), 408 | begin: u32::from_be_bytes(begin), 409 | block, 410 | }) 411 | } 412 | 413 | 8 => { 414 | // Cancel message 415 | let mut buf = [0u8; 12]; 416 | stream.read_exact(&mut buf).await?; 417 | Ok(PeerMessage::Cancel { 418 | index: u32::from_be_bytes(buf[0..4].try_into().unwrap()), 419 | begin: u32::from_be_bytes(buf[4..8].try_into().unwrap()), 420 | length: u32::from_be_bytes(buf[8..12].try_into().unwrap()), 421 | }) 422 | } 423 | 424 | id => { 425 | // Unknown message - skip payload 426 | let payload_len = msg_len - 1; 427 | let mut payload = vec![0u8; payload_len]; 428 | stream.read_exact(&mut payload).await?; 429 | Err(TorrentError::InvalidMessage(format!( 430 | "Unknown message ID: {id:?}", 431 | ))) 432 | } 433 | } 434 | } 435 | pub async fn connect(&mut self) -> Result<()> { 436 | let address = format!("{}:{}", self.peer_info.ip, self.peer_info.port); 437 | let connect_future = TcpStream::connect(&address); 438 | match timeout(Duration::from_secs(5), connect_future).await { 439 | Ok(Ok(stream)) => { 440 | self.stream = Some(stream); 441 | println!("Successfully connected to stream: {}", address); 442 | Ok(()) 443 | } 444 | Ok(Err(e)) => Err(TorrentError::ConnectionFailed( 445 | format!("Connection error to peer {}: {}", address, e).into(), 446 | )), 447 | Err(_) => Err(TorrentError::ConnectionTimedOut(format!( 448 | "Connection timeout to {}", 449 | address 450 | ))), 451 | } 452 | } 453 | 454 | pub async fn handshake(&mut self, info_hash: [u8; 20], peer_id: [u8; 20]) -> Result<()> { 455 | if self.stream.is_none() { 456 | return Err(TorrentError::PeerError( 457 | "Stream not established yet".to_string(), 458 | )); 459 | } 460 | 461 | let mut handshake_msg = Vec::with_capacity(68); 462 | let stream = self.stream.as_mut().unwrap(); 463 | 464 | handshake_msg.push(19); 465 | handshake_msg.extend_from_slice(b"BitTorrent protocol"); 466 | handshake_msg.extend_from_slice(&[0u8; 8]); 467 | handshake_msg.extend_from_slice(&info_hash); 468 | handshake_msg.extend_from_slice(&peer_id); 469 | 470 | match timeout(Duration::from_secs(4), stream.write_all(&handshake_msg)).await { 471 | Ok(result) => result?, 472 | Err(_) => { 473 | return Err(TorrentError::ConnectionTimedOut( 474 | "Handshake send timeout".to_string(), 475 | )); 476 | } 477 | }; 478 | 479 | println!("Handshake sent, waiting for response..."); 480 | 481 | let mut response = vec![0u8; 68]; 482 | timeout(Duration::from_secs(10), stream.read_exact(&mut response)) 483 | .await 484 | .map_err(|_| TorrentError::ConnectionTimedOut("Handshake receive timeout".to_string()))? 485 | .map_err(|e| TorrentError::PeerError(format!("Failed to read handshake: {}", e)))?; 486 | 487 | println!("Handshake response received!"); 488 | if response[0] != 19 || &response[1..20] != b"BitTorrent protocol" { 489 | return Err(TorrentError::InvalidHandshake(format!( 490 | "Invalid protocol string. Got: {:?}", 491 | &response[..20] 492 | ))); 493 | } 494 | 495 | let peer_info_hash = &response[28..48]; 496 | if peer_info_hash != info_hash { 497 | return Err(TorrentError::InvalidHandshake(format!( 498 | "Info hash mismatch.\nExpected: {:02x?}\nReceived: {:02x?}", 499 | info_hash, peer_info_hash 500 | ))); 501 | } 502 | 503 | Ok(()) 504 | } 505 | 506 | pub async fn receive_init_msg(&mut self) -> Result<()> { 507 | match self.read_message().await? { 508 | PeerMessage::BitField { bitfield } => { 509 | self.bitfields = Some(bitfield.clone()); 510 | self.parse_bitfield(&bitfield); 511 | Ok(()) 512 | } 513 | PeerMessage::Unchoke => { 514 | self.is_choked = false; 515 | Ok(()) 516 | } 517 | msg => Err(TorrentError::InvalidMessage(format!( 518 | "expected either bitfield or unchoke msgs, got {msg:?}" 519 | ))), 520 | } 521 | } 522 | 523 | pub async fn request_piece( 524 | &mut self, 525 | index: u32, 526 | piece_length: u32, 527 | start_offset: u32, 528 | file_path: &str, 529 | ) -> Result { 530 | if !self.is_interested { 531 | self.send_interested().await?; 532 | self.is_interested = true; 533 | println!("interested message sent!"); 534 | } 535 | self.wait_for_unchoke().await?; 536 | 537 | println!("Requesting piece: {}", index); 538 | let mut file = if start_offset == 0 { 539 | OpenOptions::new() 540 | .create(true) 541 | .write(true) 542 | .truncate(true) 543 | .open(file_path) 544 | .await 545 | .map_err(|e| match e.kind() { 546 | std::io::ErrorKind::PermissionDenied => { 547 | TorrentError::PermissionDenied(file_path.to_string()) 548 | } 549 | _ => TorrentError::IoError(e), 550 | })? 551 | } else { 552 | OpenOptions::new() 553 | .create(true) 554 | .append(true) 555 | .open(file_path) 556 | .await 557 | .map_err(TorrentError::from)? 558 | }; 559 | 560 | const BLOCK_SIZE: u32 = 16 * 1024; 561 | let mut offset = start_offset; 562 | let mut bytes_downloaded = 0; 563 | 564 | while offset < piece_length { 565 | let block_length = std::cmp::min(BLOCK_SIZE, piece_length - offset); 566 | self.send_piece_req(index, offset, block_length).await?; 567 | // Wait for piece data with timeout 568 | let time_dur = Duration::from_secs(15); 569 | let msg = timeout(time_dur, self.wait_for_piece_block(index, offset)) 570 | .await 571 | .map_err(|_| { 572 | TorrentError::PeerError("Timeout waiting for piece data".to_string()) 573 | })??; 574 | 575 | if let PeerMessage::Piece { block, .. } = msg { 576 | file.write_all(&block).await?; 577 | offset += block_length; 578 | bytes_downloaded += block_length; 579 | println!("Received piece block at offset {}", offset); 580 | } 581 | } 582 | 583 | Ok(bytes_downloaded) 584 | } 585 | 586 | pub async fn send_msg(&mut self, message: &[u8]) -> Result<()> { 587 | if let Some(stream) = self.stream.as_mut() { 588 | stream 589 | .write_all(message) 590 | .await 591 | .expect("Failed to write the message in TCP stream!"); 592 | stream.flush().await.expect("Failed to flush the stream"); 593 | Ok(()) 594 | } else { 595 | Err(TorrentError::PeerError("No stream was found!".to_string())) 596 | } 597 | } 598 | 599 | pub async fn receive_msg(&mut self) -> Result> { 600 | if let Some(stream) = self.stream.as_mut() { 601 | let mut length_bytes = [0u8; 4]; 602 | stream 603 | .read_exact(&mut length_bytes) 604 | .await 605 | .expect("Failed to read the length bytes"); 606 | let length = u32::from_be_bytes(length_bytes); 607 | let mut msg = vec![0u8; length as usize]; 608 | stream 609 | .read_exact(&mut msg) 610 | .await 611 | .expect("Failed to read the message!"); 612 | Ok(msg) 613 | } else { 614 | Err(TorrentError::PeerError("No stream was found!".to_string())) 615 | } 616 | } 617 | 618 | async fn send_interested(&mut self) -> Result<()> { 619 | let interested_msg = [0u8, 0, 0, 1, 2]; 620 | let stream = self 621 | .stream 622 | .as_mut() 623 | .ok_or_else(|| TorrentError::PeerError("Not connected to peer".to_string()))?; 624 | stream 625 | .write_all(&interested_msg) 626 | .await 627 | .map_err(|e| TorrentError::PeerError(format!("Failed to send interested: {}", e)))?; 628 | Ok(()) 629 | } 630 | 631 | async fn wait_for_unchoke(&mut self) -> Result<()> { 632 | let start_time = std::time::Instant::now(); 633 | let timeout = Duration::from_secs(20); 634 | while self.is_choked && start_time.elapsed() < timeout { 635 | match self.read_message().await? { 636 | PeerMessage::Unchoke => { 637 | self.is_choked = true; 638 | return Ok(()); 639 | } 640 | PeerMessage::Have { piece_idx } => { 641 | self.piece_availability.insert(piece_idx); 642 | } 643 | _ => {} 644 | } 645 | } 646 | if self.is_choked { 647 | Err(TorrentError::DownloadTimedout) 648 | } else { 649 | Ok(()) 650 | } 651 | } 652 | 653 | async fn send_piece_req(&mut self, index: u32, begin: u32, length: u32) -> Result<()> { 654 | let mut request = Vec::with_capacity(17); 655 | request.extend_from_slice(&13u32.to_be_bytes()); // msg length 656 | request.push(6); 657 | request.extend_from_slice(&index.to_be_bytes()); 658 | request.extend_from_slice(&begin.to_be_bytes()); 659 | request.extend_from_slice(&length.to_be_bytes()); 660 | 661 | self.stream 662 | .as_mut() 663 | .ok_or_else(|| TorrentError::PeerError("No active stream".to_string()))? 664 | .write_all(&request) 665 | .await?; 666 | Ok(()) 667 | } 668 | async fn wait_for_piece_block( 669 | &mut self, 670 | expected_index: u32, 671 | expected_offset: u32, 672 | ) -> Result { 673 | loop { 674 | match self.read_message().await? { 675 | msg @ PeerMessage::Piece { index, begin, .. } => { 676 | if index == expected_index && begin == expected_offset { 677 | return Ok(msg); 678 | } else { 679 | println!( 680 | "Received piece for wrong index/offset: expected {}/{}, got {}/{}", 681 | expected_index, expected_offset, index, begin 682 | ); 683 | } 684 | } 685 | PeerMessage::Choke => { 686 | return Err(TorrentError::PeerError( 687 | "Peer choked during download".to_string(), 688 | )); 689 | } 690 | PeerMessage::Have { piece_idx } => { 691 | println!("Received have message for piece {}", piece_idx); 692 | self.piece_availability.insert(piece_idx); 693 | } 694 | PeerMessage::KeepAlive => { 695 | println!("Received keep-alive"); 696 | } 697 | msg => { 698 | println!( 699 | "Skipping unexpected message during piece download: {:?}", 700 | msg 701 | ); 702 | } 703 | } 704 | } 705 | } 706 | 707 | fn parse_bitfield(&mut self, bitfield: &[u8]) { 708 | for (byte_idx, &byte) in bitfield.iter().enumerate() { 709 | // For each bit in the byte 710 | for bit_idx in 0..8 { 711 | // Check if the bit is set (1) 712 | if (byte & (1 << (7 - bit_idx))) != 0 { 713 | // Calculate piece index from byte_idx and bit_idx 714 | let piece_idx = (byte_idx * 8 + bit_idx) as u32; 715 | self.piece_availability.insert(piece_idx); 716 | } 717 | } 718 | } 719 | } 720 | } 721 | 722 | #[cfg(test)] 723 | mod tests { 724 | 725 | use super::*; 726 | use crate::mapper::TorrentMetaData; 727 | use crate::tracker::{generate_peer_id, request_peers}; 728 | use tokio::test; 729 | 730 | #[test] 731 | async fn test_get_peers() { 732 | let path = r"/home/rusty/Rs/Torrs/Gym Manager [FitGirl Repack].torrent"; 733 | let torrent_meta_data = TorrentMetaData::from_trnt_file(path).unwrap(); 734 | println!("Got the torrent meta data"); 735 | 736 | match request_peers(&torrent_meta_data).await { 737 | Ok(peers) => { 738 | println!("Successfully retrieved {} peers", peers.len()); 739 | for (i, peer) in peers.iter().enumerate() { 740 | println!("Peer {}: {:?}", i + 1, peer); 741 | } 742 | assert!(!peers.is_empty(), "Peer list should not be empty"); 743 | } 744 | Err(e) => { 745 | eprintln!("Failed to retrieve peers: {:?}", e); 746 | } 747 | } 748 | } 749 | #[tokio::test] 750 | async fn test_download() { 751 | let path = "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].torrent"; 752 | let torrent_meta_data = TorrentMetaData::from_trnt_file(path).unwrap(); 753 | let peers = request_peers(&torrent_meta_data).await.unwrap(); 754 | let peer_id = generate_peer_id(); 755 | let info_hash = torrent_meta_data.calculate_info_hash().unwrap(); 756 | 757 | let mut downloader = PieceDownloader::new(peers, info_hash, peer_id); 758 | 759 | let torrent_path = std::path::Path::new(path); 760 | let output_dir = torrent_path 761 | .parent() 762 | .unwrap() 763 | .join("downloads") 764 | .to_string_lossy() 765 | .to_string(); 766 | 767 | match downloader 768 | .download_torrent(&torrent_meta_data, &output_dir) 769 | .await 770 | { 771 | Ok(()) => println!("Successfully downloaded torrent"), 772 | Err(e) => println!("Failed to download: {}", e), 773 | } 774 | } 775 | } 776 | --------------------------------------------------------------------------------