├── .gitignore ├── Cargo.toml ├── README.md └── src ├── error.rs ├── magnet.rs ├── main.rs ├── mapper.rs ├── parser.rs ├── peer.rs └── tracker.rs /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | Cargo.lock 6 | 7 | .vscode/ 8 | *.torrent 9 | *.Identifier 10 | *.bin 11 | downloads/ 12 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "torrent" 3 | version = "0.0.0" 4 | authors = ["you"] 5 | edition = "2024" 6 | 7 | 8 | 9 | [dependencies] 10 | serde = { version = "1.0.210", features = ["derive"] } 11 | env_logger = "0.11.5" 12 | serde_json = "1.0.128" 13 | bencode-encoder = "0.1.2" 14 | linked_hash_set = "0.1.4" 15 | sha1 = "0.10.6" 16 | base64 = "0.22.1" 17 | rand = "0.8.5" 18 | url = "2.5.2" 19 | reqwest = "0.12.8" 20 | serde_bencode = "0.2.4" 21 | log = "0.4.22" 22 | tokio = { version = "1.40.0", features = ["full"] } 23 | serde_bytes = "0.11.15" 24 | hex = "0.4.3" 25 | percent-encoding = "2.3.1" 26 | data-encoding = "2.3.1" 27 | futures = "0.3.31" 28 | 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Approaching a torrent downloader from a microservice architecture perspective is an interesting way to modularize the system and potentially make it more scalable and maintainable. Here's a breakdown of potential microservices for a torrent downloading system: 2 | 3 | 4 | 5 | ```mermaid 6 | graph TD 7 | A[API Gateway] --> B[Torrent Parser Service] 8 | A --> C[Peer Discovery Service] 9 | A --> D[Download Manager Service] 10 | A --> E[File Storage Service] 11 | A --> F[User Management Service] 12 | C --> G[Tracker Communication Service] 13 | C --> H[DHT Service] 14 | D --> I[Peer Connection Service] 15 | D --> J[Piece Selection Service] 16 | E --> K[File Assembly Service] 17 | 18 | ``` 19 | 20 | Let's break down each of these services and their responsibilities: 21 | 22 | 1. API Gateway: 23 | - [ ] Handles external requests 24 | - [ ] Routes requests to appropriate microservices 25 | - [ ] Manages authentication and rate limiting 26 | 27 | 2. Torrent Parser Service: 28 | - [x] Parses .torrent files and magnet links 29 | - [x] Extracts metadata (file info, trackers, piece hashes) 30 | 31 | 3. Peer Discovery Service: 32 | - [x] Coordinates peer discovery methods 33 | - [ ] Interfaces with Tracker Communication and DHT services 34 | 35 | 4. Tracker Communication Service: 36 | - [x] Communicates with trackers to get peer lists 37 | - [x] Handles tracker protocol specifics 38 | 39 | 5. DHT (Distributed Hash Table) Service: 40 | - [ ] Implements DHT protocol for trackerless torrents 41 | - [ ] Manages DHT node connections and queries 42 | 43 | 6. Download Manager Service: 44 | - [x] Orchestrates the overall download process 45 | - [x] Manages download queues and priorities 46 | 47 | 7. Peer Connection Service: 48 | - [x] Establishes and manages connections to peers 49 | - [x] Implements BitTorrent protocol messaging 50 | 51 | 8. Piece Selection Service: 52 | - [x] Implements piece selection algorithms (e.g., rarest first) 53 | - [x] Tracks piece availability across peers 54 | 55 | 9. File Storage Service: 56 | - [x] Handles writing downloaded pieces to disk 57 | - [x] Manages file allocation and disk space 58 | 59 | 10. File Assembly Service: 60 | - [x] Assembles downloaded pieces into complete files 61 | - [x] Verifies file integrity 62 | 63 | 11. User Management Service: 64 | - [ ] Handles user accounts, if applicable 65 | - [ ] Manages user preferences and download history 66 | 67 | Each of these services could be implemented as a separate microservice, potentially in different languages or using different technologies as appropriate. They would communicate via APIs, possibly using REST or gRPC. 68 | 69 | Key considerations for this architecture: 70 | 71 | 1. Service Discovery: Implement a way for services to find and communicate with each other. 72 | 73 | 2. Data Consistency: Ensure data consistency across services, especially for shared state like download progress. 74 | 75 | 3. Fault Tolerance: Design each service to be resilient and the overall system to handle partial failures. 76 | 77 | 4. Scalability: Design services to be independently scalable based on load. 78 | 79 | 5. Monitoring and Logging: Implement comprehensive logging and monitoring across all services. 80 | 81 | 6. Security: Ensure secure communication between services and proper access controls. 82 | 83 | 7. Testing: Implement thorough unit and integration testing for each service and the system as a whole. 84 | 85 | 86 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | #[derive(Debug)] 4 | #[allow(dead_code)] 5 | pub enum TorrentError { 6 | // Network errors 7 | ConnectionTimedOut(String), 8 | ConnectionFailed(String), 9 | TrackerError(String), 10 | PeerError(String), 11 | 12 | // protocol related errors 13 | InvalidHandshake(String), 14 | InvalidMessage(String), 15 | ProtocolViolation(String), 16 | 17 | // file or io related errors 18 | FileNotFound(String), 19 | PermissionDenied(String), 20 | DiskFull, 21 | IoError(std::io::Error), 22 | 23 | //parsing related errors 24 | InvalidTorrentFile(String), 25 | InvalidMagnetLink(String), 26 | BencodeError(serde_bencode::Error), 27 | 28 | //downloading errors 29 | PieceVerificationFailed(u32), 30 | NoAvailablePeers(u32), 31 | DownloadTimedout, 32 | InsufficientSeeds, 33 | 34 | //config errs 35 | InvalidConfigs(String), 36 | } 37 | 38 | impl fmt::Display for TorrentError { 39 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 40 | match self { 41 | TorrentError::ConnectionTimedOut(msg) => write!(f, "Connection Timed Out: {}", msg), 42 | TorrentError::ConnectionFailed(msg) => write!(f, "Connection failed: {}", msg), 43 | TorrentError::TrackerError(msg) => write!(f, "Tracker error: {}", msg), 44 | TorrentError::PeerError(msg) => write!(f, "Peer error: {}", msg), 45 | TorrentError::InvalidHandshake(msg) => write!(f, "Invalid Handshake: {}", msg), 46 | TorrentError::InvalidMessage(msg) => write!(f, "Invalid message: {}", msg), 47 | TorrentError::ProtocolViolation(msg) => write!(f, "Protocol violation: {}", msg), 48 | TorrentError::FileNotFound(path) => write!(f, "File not found: {}", path), 49 | TorrentError::PermissionDenied(path) => write!(f, "Permission denied: {}", path), 50 | TorrentError::DiskFull => write!(f, "Disk is full."), 51 | TorrentError::IoError(e) => write!(f, "IO error: {}", e), 52 | TorrentError::InvalidTorrentFile(msg) => write!(f, "Invalid torrent file: {}", msg), 53 | TorrentError::InvalidMagnetLink(msg) => write!(f, "Invalid magnet link: {}", msg), 54 | TorrentError::BencodeError(e) => write!(f, "Bencode error: {}", e), 55 | TorrentError::PieceVerificationFailed(piece) => { 56 | write!(f, "Piece {} verification failed", piece) 57 | } 58 | TorrentError::NoAvailablePeers(piece) => { 59 | write!(f, "No available peers for piece {}", piece) 60 | } 61 | TorrentError::DownloadTimedout => write!(f, "Download timed out"), 62 | TorrentError::InsufficientSeeds => write!(f, "Insufficient seeds"), 63 | TorrentError::InvalidConfigs(msg) => write!(f, "Invalid configurations: {}", msg), 64 | } 65 | } 66 | } 67 | 68 | impl std::error::Error for TorrentError {} 69 | 70 | pub type Result = std::result::Result; 71 | 72 | impl From for TorrentError { 73 | fn from(value: std::io::Error) -> Self { 74 | match value.kind() { 75 | std::io::ErrorKind::NotFound => TorrentError::FileNotFound(value.to_string()), 76 | std::io::ErrorKind::PermissionDenied => { 77 | TorrentError::PermissionDenied(value.to_string()) 78 | } 79 | std::io::ErrorKind::TimedOut => TorrentError::ConnectionTimedOut(value.to_string()), 80 | _ => TorrentError::IoError(value), 81 | } 82 | } 83 | } 84 | 85 | impl From for TorrentError { 86 | fn from(value: serde_bencode::Error) -> Self { 87 | TorrentError::BencodeError(value) 88 | } 89 | } 90 | 91 | impl From for TorrentError { 92 | fn from(value: reqwest::Error) -> Self { 93 | if value.is_timeout() { 94 | TorrentError::ConnectionTimedOut(value.to_string()) 95 | } else if value.is_connect() { 96 | TorrentError::ConnectionFailed(value.to_string()) 97 | } else { 98 | TorrentError::TrackerError(value.to_string()) 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/magnet.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | mapper::TorrentMetaData, 3 | peer::{Peer, PeerInfo}, 4 | tracker::{generate_peer_id, request_tracker}, 5 | }; 6 | use data_encoding::BASE32; 7 | use futures::future::join_all; 8 | use percent_encoding::percent_decode_str; 9 | use serde::{Deserialize, Serialize}; 10 | use sha1::{Digest, Sha1}; 11 | use std::{ 12 | collections::{HashMap, HashSet}, 13 | time::Duration, 14 | }; 15 | use tokio::time::timeout; 16 | use url::Url; 17 | 18 | const METADATA_PIECE_SIZE: usize = 16384; //metadata is chunked into 16kb pieces! 19 | const EXTENSION_HANDSHAKE_ID: usize = 0; 20 | const METADATA_EXTENSION_ID: usize = 1; 21 | const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(10); 22 | const PIECE_TIMEOUT: Duration = Duration::from_secs(30); 23 | 24 | //TODO: Add TorrentError to this module 25 | #[derive(Debug, Clone)] 26 | #[allow(dead_code)] 27 | pub struct MagnetInfo { 28 | pub info_hash: [u8; 20], 29 | pub display_name: Option, 30 | pub trackers: Vec, 31 | pub peers: Option>, 32 | } 33 | 34 | #[derive(Debug, Serialize, Deserialize)] 35 | pub struct ExtensionHandshake { 36 | m: HashMap, 37 | metadata_size: Option, 38 | #[serde(skip_serializing_if = "Option::is_none")] 39 | v: Option, 40 | } 41 | 42 | #[derive(Debug, Serialize, Deserialize)] 43 | pub struct MetaDataMessage { 44 | msg_type: i64, 45 | piece: i64, 46 | #[serde(skip_serializing_if = "Option::is_none")] 47 | total_size: Option, 48 | } 49 | 50 | #[allow(dead_code)] 51 | impl MagnetInfo { 52 | pub async fn to_torrent_metadata(&self) -> Result> { 53 | let mut all_peers = Vec::new(); 54 | 55 | println!( 56 | "Attempting to get peers from {} trackers", 57 | self.trackers.len() 58 | ); 59 | for tracker in &self.trackers { 60 | println!("Trying tracker: {}", tracker); 61 | match request_tracker(tracker, &self.info_hash, 0).await { 62 | Ok(peers) => { 63 | println!("Got {} peers from tracker {}", peers.len(), tracker); 64 | all_peers.extend(peers); 65 | } 66 | Err(e) => { 67 | println!("Failed to get peers from tracker {}: {}", tracker, e); 68 | continue; 69 | } 70 | } 71 | } 72 | 73 | if all_peers.is_empty() { 74 | return Err("Could not get any peers from any tracker".into()); 75 | } 76 | 77 | println!("Total peers collected: {}", all_peers.len()); 78 | match self.fetch_metadata_from_peers(&all_peers).await { 79 | Ok(metadata_bytes) => { 80 | let torrent_metadata: TorrentMetaData = serde_bencode::from_bytes(&metadata_bytes) 81 | .map_err(|e| format!("Failed to decode metadata: {}", e))?; 82 | Ok(torrent_metadata) 83 | } 84 | Err(e) => Err(format!( 85 | "Failed to get metadata from {} peers: {}", 86 | all_peers.len(), 87 | e 88 | ) 89 | .into()), 90 | } 91 | } 92 | pub async fn fetch_metadata_from_peer( 93 | &self, 94 | peer_info: &PeerInfo, 95 | ) -> Result, Box> { 96 | let mut peer = Peer::new(peer_info.ip.clone(), peer_info.port); 97 | match peer.connect().await { 98 | Ok(_) => println!( 99 | "Connected to peer: {}:{}", 100 | peer.peer_info.ip.clone(), 101 | peer.peer_info.port 102 | ), 103 | Err(e) => println!( 104 | "Failed to connect to peer {}:{} - {}", 105 | peer.peer_info.ip.clone(), 106 | peer_info.port, 107 | e 108 | ), 109 | } 110 | 111 | let mut extension_bits = [0u8; 8]; 112 | extension_bits[5] |= 0x10; 113 | let peer_id = generate_peer_id(); 114 | 115 | match peer.handshake(self.info_hash, peer_id).await { 116 | Ok(_) => println!("Handshake successful!"), 117 | Err(e) => println!("Handshake failed: {}", e), 118 | } 119 | 120 | let mut extension_handshake = HashMap::new(); 121 | extension_handshake.insert("ut_metadata".to_string(), METADATA_EXTENSION_ID as i64); 122 | 123 | let handshake_msg = ExtensionHandshake { 124 | m: extension_handshake, 125 | metadata_size: None, 126 | v: Some("RU0001".to_string()), 127 | }; 128 | 129 | let handshake_bytes = serde_bencode::to_bytes(&handshake_msg).unwrap(); 130 | println!("Handshake Bytes: {:?}", handshake_bytes.clone()); 131 | self.send_extension_message(&mut peer, EXTENSION_HANDSHAKE_ID as u8, &handshake_bytes) 132 | .await?; 133 | 134 | let response = timeout(HANDSHAKE_TIMEOUT, peer.receive_msg()) 135 | .await 136 | .unwrap()?; 137 | 138 | if response[0] != 20 { 139 | return Err("Invalid Extension message".into()); 140 | } 141 | 142 | let handshake_resp: ExtensionHandshake = serde_bencode::from_bytes(&response[2..]) 143 | .map_err(|e| format!("Failed handshake mapping :{}", e)) 144 | .unwrap(); 145 | 146 | let metadata_size = handshake_resp 147 | .metadata_size 148 | .ok_or("No metadata received!") 149 | .unwrap(); 150 | let num_pieces = 151 | (metadata_size + METADATA_PIECE_SIZE as i64 - 1) / (METADATA_PIECE_SIZE as i64); 152 | 153 | // Request all metadata pieces 154 | let mut metadata = vec![0u8; metadata_size as usize]; 155 | 156 | for piece in 0..num_pieces { 157 | let msg = MetaDataMessage { 158 | msg_type: 0, // request 159 | piece, 160 | total_size: None, 161 | }; 162 | 163 | let msg_bytes = serde_bencode::to_bytes(&msg)?; 164 | self.send_extension_message(&mut peer, METADATA_EXTENSION_ID as u8, &msg_bytes) 165 | .await?; 166 | 167 | // Wait for piece response 168 | let piece_data = timeout(PIECE_TIMEOUT, peer.receive_msg()).await??; 169 | 170 | if piece_data[0] != 20 { 171 | return Err("Invalid metadata piece response".into()); 172 | } 173 | 174 | // Extract and validate piece data 175 | let start = piece as usize * METADATA_PIECE_SIZE; 176 | let end = std::cmp::min(start + METADATA_PIECE_SIZE, metadata_size as usize); 177 | metadata[start..end].copy_from_slice(&piece_data[2..end - start + 2]); 178 | } 179 | 180 | // Verify metadata hash matches info_hash 181 | let mut hasher = Sha1::new(); 182 | Digest::update(&mut hasher, &metadata); 183 | //hasher.update(&metadata); 184 | let hash: [u8; 20] = hasher.finalize().into(); 185 | 186 | if hash != self.info_hash { 187 | return Err("Metadata hash mismatch".into()); 188 | } 189 | 190 | Ok(metadata) 191 | } 192 | pub async fn fetch_metadata_from_peers( 193 | &self, 194 | peers: &[PeerInfo], 195 | ) -> Result, Box> { 196 | let max_concurrent = 10; 197 | 198 | let mut handles = Vec::with_capacity(std::cmp::min(peers.len(), max_concurrent)); 199 | 200 | for peer in peers.iter().take(max_concurrent) { 201 | let peer_info = peer.clone(); 202 | let self_clone = self.clone(); 203 | 204 | let handle = tokio::spawn(async move { 205 | match self_clone.fetch_metadata_from_peer(&peer_info).await { 206 | Ok(metadata) => Some((peer_info.clone(), metadata)), 207 | Err(_) => None, 208 | } 209 | }); 210 | handles.push(handle); 211 | } 212 | 213 | let results = join_all(handles).await; 214 | 215 | let mut valid_metadata = HashSet::new(); 216 | let mut metadata_count = HashMap::new(); 217 | 218 | for result in results { 219 | if let Ok(Some((_peer_info, metadata))) = result { 220 | let metadata_hash = { 221 | let mut hasher = Sha1::new(); 222 | Digest::update(&mut hasher, &metadata); 223 | hasher.finalize().to_vec() 224 | }; 225 | valid_metadata.insert((metadata_hash.clone(), metadata.clone())); 226 | *metadata_count.entry(metadata_hash).or_insert(0) += 1 227 | } 228 | } 229 | if let Some((most_common_hash, _)) = metadata_count.iter().max_by_key(|&(_, count)| count) { 230 | if let Some((_, metadata)) = valid_metadata 231 | .iter() 232 | .find(|(hash, _)| hash == most_common_hash) 233 | { 234 | return Ok(metadata.clone()); 235 | } 236 | } 237 | Err("failed to get consistent emtadata from peers".into()) 238 | } 239 | 240 | async fn send_extension_message( 241 | &self, 242 | peer: &mut Peer, 243 | extension_id: u8, 244 | payload: &[u8], 245 | ) -> Result<(), Box> { 246 | let msg_len = 2 + payload.len(); 247 | let mut message = Vec::with_capacity(4 + msg_len); 248 | 249 | message.extend_from_slice(&(msg_len as u32).to_be_bytes()); 250 | message.push(20); // Extension message ID 251 | message.push(extension_id); 252 | message.extend_from_slice(payload); 253 | 254 | peer.send_msg(&message).await?; 255 | Ok(()) 256 | } 257 | /// parses the link to extract info-hash, display-name(optiona), tracker urls and peer info! 258 | pub fn parse(magnet_url: &str) -> Result> { 259 | let url = Url::parse(magnet_url)?; 260 | 261 | // Get all query parameters, including duplicates 262 | let params: Vec<(String, String)> = url 263 | .query_pairs() 264 | .map(|(k, v)| (k.into_owned(), v.into_owned())) 265 | .collect(); 266 | 267 | let info_hash = if let Some((_, xt)) = params.iter().find(|(k, _)| k == "xt") { 268 | if let Some(hash) = xt.strip_prefix("urn:btih:") { 269 | if hash.len() == 40 { 270 | let mut result = [0u8; 20]; 271 | hex::decode_to_slice(hash, &mut result)?; 272 | result 273 | } else if hash.len() == 32 { 274 | let mut result = [0u8; 20]; 275 | let decoded = BASE32.decode(hash.as_bytes())?; 276 | result.copy_from_slice(&decoded); 277 | result 278 | } else { 279 | return Err("Invalid info hash".into()); 280 | } 281 | } else { 282 | return Err("Invalid xt parameter".into()); 283 | } 284 | } else { 285 | return Err("Missing xt parameter".into()); 286 | }; 287 | 288 | let display_name = params 289 | .iter() 290 | .find(|(k, _)| k == "dn") 291 | .map(|(_, v)| percent_decode_str(v).decode_utf8_lossy().into_owned()); 292 | 293 | // Collect all trackers (tr parameters) 294 | let trackers = params 295 | .iter() 296 | .filter(|(k, _)| k == "tr") 297 | .map(|(_, v)| percent_decode_str(v).decode_utf8_lossy().into_owned()) 298 | .collect::>(); 299 | 300 | println!("Found {} trackers in magnet link:", trackers.len()); 301 | for tracker in &trackers { 302 | println!(" - {}", tracker); 303 | } 304 | 305 | let peers = Some( 306 | params 307 | .iter() 308 | .filter(|(k, _)| k.starts_with("x.pe")) 309 | .map(|(_, v)| percent_decode_str(v).decode_utf8_lossy().into_owned()) 310 | .collect(), 311 | ); 312 | 313 | Ok(MagnetInfo { 314 | info_hash, 315 | display_name, 316 | trackers, 317 | peers, 318 | }) 319 | } 320 | //TODO:: Modify the magnet structure to handle offsets 321 | 322 | //pub async fn download(&self, output_dir: &str) -> Result<(), Box> { 323 | // println!("Fetching metadata from the magnet link!"); 324 | // 325 | // let metadata = self 326 | // .to_torrent_metadata() 327 | // .await 328 | // .expect("Failed to convert the link to metadata!"); 329 | // 330 | // println!("Getting the peer list...!"); 331 | // 332 | // let peers = request_peers(&metadata) 333 | // .await 334 | // .expect("Failed to request peers from magnet link metadata!"); 335 | // if peers.is_empty() { 336 | // return Err("No peer is available!".into()); 337 | // } 338 | // 339 | // tokio::fs::create_dir_all(output_dir) 340 | // .await 341 | // .expect("Failed to create a directory!"); 342 | // 343 | // let pieces_length = metadata.get_pieces_length(); 344 | // let pieces_hashes = metadata.get_pieces_hashes(); 345 | // let file_structure = metadata.get_file_structure(); 346 | // let total_pieces = pieces_hashes.len(); 347 | // 348 | // println!("Starting the download of {} pieces...!", total_pieces); 349 | // 350 | // let mut good_peer = None; 351 | // for peer in peers { 352 | // let mut tmp_peer = Peer::new(peer.ip.clone(), peer.port); 353 | // 354 | // match tmp_peer.connect().await { 355 | // Ok(_) => { 356 | // println!("Connected to {}:{}", peer.ip.clone(), peer.port); 357 | // let info_hash = metadata 358 | // .calculate_info_hash() 359 | // .expect("Failed to calculate the info hash!"); 360 | // match tmp_peer.handshake(info_hash, generate_peer_id()).await { 361 | // Ok(_) => { 362 | // println!("Handshake successful!"); 363 | // good_peer = Some(tmp_peer); 364 | // break; 365 | // } 366 | // Err(e) => { 367 | // println!("Handshake Failed! {}", e); 368 | // continue; 369 | // } 370 | // } 371 | // } 372 | // Err(_) => { 373 | // println!("Failed to connect to the peer!"); 374 | // continue; 375 | // } 376 | // } 377 | // } 378 | // let mut peer = good_peer.ok_or("Could not find a peer!").unwrap(); 379 | // 380 | // for (piece_index, piece_hash) in pieces_hashes.iter().enumerate() { 381 | // println!("Downloading {}/{} ... ", piece_index + 1, total_pieces); 382 | // let file_path = format!("{}/piece_{}", output_dir, piece_index); 383 | // match peer 384 | // .request_piece(piece_index as u32, pieces_length as u32, &file_path) 385 | // .await 386 | // { 387 | // Ok(_) => { 388 | // let piece_data = tokio::fs::read(file_path.as_str()) 389 | // .await 390 | // .expect("Failed to read the file"); 391 | // let mut hasher = Sha1::new(); 392 | // Digest::update(&mut hasher, &piece_data); 393 | // let downloaded_data: [u8; 20] = hasher.finalize().into(); 394 | // if &downloaded_data != piece_hash { 395 | // return Err( 396 | // format!("Piece {} hash verification failed!", piece_index).into() 397 | // ); 398 | // } 399 | // println!("Piece {} verified sucessfully!", piece_index); 400 | // } 401 | // Err(e) => { 402 | // return Err(format!("Failed to receive piece {}: {}", piece_index, e).into()); 403 | // } 404 | // } 405 | // } 406 | // println!("Reconstructing the file tree.."); 407 | // for (file_path, file_length) in file_structure { 408 | // let output_path = format!("{}/{}", output_dir, file_path); 409 | // if let Some(parent) = Path::new(&output_path).parent() { 410 | // tokio::fs::create_dir_all(parent).await?; 411 | // } 412 | // let mut output_file = tokio::fs::File::create(&output_path) 413 | // .await 414 | // .expect("Failed to create output file!"); 415 | // let mut bytes_written = 0i64; 416 | // 417 | // while bytes_written < file_length { 418 | // let piece_index = (bytes_written / pieces_length as i64) as usize; 419 | // let piece_path = format!("{}/piece_{}", output_dir, piece_index); 420 | // let mut piece_data = tokio::fs::File::open(&piece_path).await?; 421 | // 422 | // let offset = bytes_written % pieces_length as i64; 423 | // piece_data 424 | // .seek(std::io::SeekFrom::Start(offset as u64)) 425 | // .await 426 | // .unwrap(); 427 | // 428 | // let bytes_to_write = 429 | // std::cmp::min(pieces_length as i64 - offset, file_length - bytes_written) 430 | // as usize; 431 | // let mut buffer = vec![0u8; bytes_to_write]; 432 | // piece_data.read_exact(&mut buffer).await?; 433 | // output_file.write_all(&buffer).await?; 434 | // 435 | // bytes_written += bytes_to_write as i64; 436 | // } 437 | // } 438 | // for piece_index in 0..total_pieces { 439 | // let piece_path = format!("{}/piece_{}", output_dir, piece_index); 440 | // tokio::fs::remove_file(piece_path).await?; 441 | // } 442 | // println!("Download Completed Successfully!"); 443 | // Ok(()) 444 | //} 445 | } 446 | 447 | #[cfg(test)] 448 | mod tests { 449 | 450 | use super::*; 451 | 452 | #[test] 453 | fn test_magnet_parser() { 454 | let magnet = "magnet:?xt=urn:btih:12451f81a977a2d8bb402f21cd643422c5d4c50a&dn=The.Agency.2024.S01E05.WEB.x264-TORRENTGALAXY&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=udp%3A%2F%2Fexodus.desync.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.cyberia.is%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.torrent.eu.org%3A451%2Fannounce&tr=udp%3A%2F%2Fexplodie.org%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.birkenwald.de%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.moeking.me%3A6969%2Fannounce&tr=udp%3A%2F%2Fipv4.tracker.harry.lu%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.tiny-vps.com%3A6969%2Fannounce"; 455 | 456 | let result = MagnetInfo::parse(magnet).unwrap(); 457 | 458 | assert_eq!( 459 | result.display_name.unwrap().as_str(), 460 | "The.Agency.2024.S01E05.WEB.x264-TORRENTGALAXY" 461 | ); 462 | } 463 | #[test] 464 | fn test_base32_magnet() { 465 | let magnet = "magnet:?xt=urn:btih:c9e15763f722f23e98a29decdfae341b98d53056&dn=Test&tr=udp%3A%2F%2Ftracker.example.org%3A6969"; 466 | let magnet_info = MagnetInfo::parse(magnet).unwrap(); 467 | assert!(magnet_info.info_hash.len() == 20); 468 | } 469 | 470 | #[tokio::test] 471 | async fn test_fetch_metadata_from_peers() { 472 | let magnet = "magnet:?xt=urn:btih:678BC6AC22A5BEFAC6BBC50834E91D4F9755DEE4&dn=Dragon%26%23039%3Bs+Dogma+2+%28Dev+Build+v1.0.0.1%2C+MULTi14%29+%5BFitGirl+Repack%2C+Selective+Download+-+from+38.4+GB%5D%5D&tr=udp%3A%2F%2Fopentor.net%3A6969&tr=udp%3A%2F%2Ftracker.torrent.eu.org%3A451%2Fannounce&tr=udp%3A%2F%2Ftracker.theoks.net%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.ccp.ovh%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=http%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=https%3A%2F%2Ftracker.tamersunion.org%3A443%2Fannounce&tr=udp%3A%2F%2Fexplodie.org%3A6969%2Fannounce&tr=http%3A%2F%2Ftracker.bt4g.com%3A2095%2Fannounce&tr=udp%3A%2F%2Fbt2.archive.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fbt1.archive.org%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.filemail.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker1.bt.moack.co.kr%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=http%3A%2F%2Ftracker.openbittorrent.com%3A80%2Fannounce&tr=udp%3A%2F%2Fopentracker.i2p.rocks%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.internetwarriors.net%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.leechers-paradise.org%3A6969%2Fannounce&tr=udp%3A%2F%2Fcoppersurfer.tk%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.zer0day.to%3A1337%2Fannounce"; 473 | 474 | let magnet_info = MagnetInfo::parse(magnet).unwrap(); 475 | println!("Getting the peers from trackers..."); 476 | 477 | let mut peers = Vec::new(); 478 | for tracker in &magnet_info.trackers { 479 | match request_tracker(tracker, &magnet_info.info_hash, 0).await { 480 | Ok(peer_vec) => { 481 | println!("Got {} number of peers from {}", peer_vec.len(), tracker); 482 | peers.extend(peer_vec); 483 | } 484 | Err(e) => { 485 | println!("Failed to get peers from tracker {}, {}", tracker, e); 486 | continue; 487 | } 488 | } 489 | } 490 | peers.sort_by_key(|p| (p.ip.clone(), p.port)); 491 | peers.dedup_by_key(|p| (p.ip.clone(), p.port)); 492 | 493 | if peers.is_empty() { 494 | panic!("could not find any peers"); 495 | } 496 | println!("Testing with {} unique peers", peers.len()); 497 | 498 | match magnet_info.fetch_metadata_from_peers(&peers).await { 499 | Ok(metadata) => { 500 | println!("Fetched metadata {} bytes", metadata.len()); 501 | assert!(!metadata.is_empty(), "Metadata should not be empty"); 502 | } 503 | Err(e) => { 504 | panic!("Failed to fetch metadata {}", e); 505 | } 506 | } 507 | } 508 | 509 | //#[tokio::test] 510 | //async fn test_magnet_download() { 511 | // let magnet = "magnet:?xt=urn:btih:c0084178f6df4d5d11f87e61f0f84c6de0c72993&dn=MomWantsToBreed%2024%2012%2020%20Alexis%20Malone%20Stepmom%20Only%20Says%20Yes%20XXX%20480p%20MP4-XXX%20[XC]&tr=udp%3A%2F%2Fopen.stealth.si%3A80%2Fannounce&tr=udp%3A%2F%2Fexodus.desync.com%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.cyberia.is%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannounce&tr=udp%3A%2F%2Ftracker.torrent.eu.org%3A451%2Fannounce&tr=udp%3A%2F%2Fexplodie.org%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.birkenwald.de%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.moeking.me%3A6969%2Fannounce&tr=udp%3A%2F%2Fipv4.tracker.harry.lu%3A80%2Fannounce&tr=udp%3A%2F%2Ftracker.tiny-vps.com%3A6969%2Fannounce"; 512 | // 513 | // let magnet_info = MagnetInfo::parse(magnet).expect("Failed to parse magnet link"); 514 | // println!("Successfully parsed magnet link"); 515 | // println!("Display name: {:?}", magnet_info.display_name); 516 | // println!("Number of trackers: {}", magnet_info.trackers.len()); 517 | // 518 | // let output_dir = "./test_downloads"; 519 | // 520 | // match magnet_info.download(output_dir).await { 521 | // Ok(_) => println!("Download completed successfully"), 522 | // Err(e) => { 523 | // println!("Download failed: {}", e); 524 | // panic!("Download test failed"); 525 | // } 526 | // } 527 | //} 528 | } 529 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | mod error; 2 | mod magnet; 3 | mod mapper; 4 | mod parser; 5 | mod peer; 6 | mod tracker; 7 | fn main() { 8 | println!("Hello!") 9 | } 10 | -------------------------------------------------------------------------------- /src/mapper.rs: -------------------------------------------------------------------------------- 1 | use crate::error::*; 2 | use linked_hash_set::LinkedHashSet; 3 | use serde::{Deserialize, Serialize}; 4 | use serde_bytes::ByteBuf; 5 | use sha1::Digest; 6 | use sha1::Sha1; 7 | use std::collections::HashMap; 8 | use std::fs::File; 9 | use std::io::BufReader; 10 | use std::path::Path; 11 | 12 | /// Stores length and path parameters in a torrent file 13 | #[derive(Serialize, Deserialize, Debug)] 14 | pub struct TorrentFile { 15 | pub length: i64, 16 | pub path: Vec, 17 | } 18 | 19 | /// Stores the torrent info present in a torrent file 20 | #[derive(Debug, Serialize, Deserialize)] 21 | pub struct TorrentInfo { 22 | pub name: String, 23 | #[serde(rename = "piece length")] 24 | pub piece_length: i64, 25 | #[serde(with = "serde_bytes")] 26 | pub pieces: ByteBuf, 27 | pub files: Option>, 28 | pub length: Option, // optional 29 | #[serde(default)] 30 | pub private: i64, 31 | } 32 | 33 | /// Stores the Actual MetaData of a torrent file 34 | #[derive(Debug, Serialize, Deserialize)] 35 | pub struct TorrentMetaData { 36 | announce: String, 37 | #[serde(rename = "announce-list")] 38 | announce_list: Option>>, 39 | azureus_properties: Option>, 40 | #[serde(rename = "created by")] 41 | created_by: String, 42 | #[serde(rename = "creation date")] 43 | creation_date: i64, 44 | encoding: Option, 45 | pub info: TorrentInfo, 46 | publisher: Option, 47 | #[serde(rename = "publisher-url")] 48 | publisher_url: Option, 49 | } 50 | #[allow(dead_code)] 51 | impl TorrentMetaData { 52 | /// Reads a json file and maps its data to a TorrentMetaData format 53 | pub fn from_json_file(path: &str) -> Result { 54 | let file_path = Path::new(path); 55 | let file = File::open(file_path).expect("Could not open the file"); 56 | let reader = BufReader::new(file); 57 | 58 | let torrent_meta_data = 59 | serde_json::from_reader(reader).expect("serde could not read the buffer"); 60 | Ok(torrent_meta_data) 61 | } 62 | 63 | pub fn calculate_total_pieces(&self) -> u32 { 64 | let piece_bytes = self.info.pieces.as_ref(); 65 | let total = piece_bytes.len() / 20; 66 | total as u32 67 | } 68 | /// Reads a torrent file and maps ints data to a TorrentMetaData format 69 | pub fn from_trnt_file(path: &str) -> Result { 70 | let file_path = Path::new(path); 71 | // let file = File::open(file_path).expect("failed to open the file"); 72 | let bytes = std::fs::read(file_path).expect("failed to read the file"); 73 | let torrent: TorrentMetaData = serde_bencode::from_bytes(&bytes) 74 | .map_err(|e| { 75 | format!( 76 | "Failed to decode bencode from {}: {}", 77 | file_path.display(), 78 | e 79 | ) 80 | }) 81 | .unwrap(); 82 | Ok(torrent) 83 | } 84 | 85 | /// gets tracker urls 86 | pub fn get_tracker_url(&self) -> Vec { 87 | let mut trackers = LinkedHashSet::new(); 88 | let main_url = self.announce.clone(); 89 | trackers.insert(main_url); 90 | 91 | if let Some(secondary_urls) = &self.announce_list { 92 | for sub_url in secondary_urls { 93 | for url in sub_url { 94 | trackers.insert(url.clone()); 95 | } 96 | } 97 | } else { 98 | println!("No secondary URLs found."); // Debug print 99 | } 100 | trackers.into_iter().collect() 101 | } 102 | 103 | /// Gets the pieces length for each info 104 | pub fn get_pieces_length(&self) -> i64 { 105 | self.info.piece_length 106 | } 107 | 108 | /// Gets pieces hashes stored in Info 109 | pub fn get_pieces_hashes(&self) -> Vec<[u8; 20]> { 110 | let pieces_bytes = self.info.pieces.as_ref(); 111 | println!("length of pieces data: {}", pieces_bytes.len()); 112 | if pieces_bytes.len() % 20 != 0 { 113 | panic!("The length of the pieces string is not a multiple of 20"); 114 | } 115 | pieces_bytes 116 | .chunks(20) 117 | .map(|chunk| { 118 | let mut hash = [0u8; 20]; 119 | hash.copy_from_slice(chunk); 120 | hash 121 | }) 122 | .collect() 123 | } 124 | 125 | /// Gets the total size of files in a torrent file 126 | pub fn get_total_size(&self) -> i64 { 127 | if let Some(files) = &self.info.files { 128 | return files.iter().map(|file| file.length).sum(); 129 | } 130 | self.info.length.unwrap_or(0) 131 | } 132 | 133 | /// Gets the file structure for later use 134 | pub fn get_file_structure(&self) -> Vec<(String, i64)> { 135 | if let Some(files) = &self.info.files { 136 | files 137 | .iter() 138 | .map(|file| (file.path.join("/"), file.length)) 139 | .collect() 140 | } else { 141 | vec![(self.info.name.clone(), self.info.length.unwrap_or(0))] 142 | } 143 | } 144 | 145 | /// Calculates the complete info hash 146 | pub fn calculate_info_hash(&self) -> Result<[u8; 20]> { 147 | let info_bencoded = serde_bencode::to_bytes(&self.info)?; 148 | let mut hasher = Sha1::new(); 149 | hasher.update(&info_bencoded); 150 | Ok(hasher.finalize().into()) 151 | } 152 | } 153 | 154 | #[cfg(test)] 155 | mod tests { 156 | use crate::tracker::urlencode; 157 | 158 | use super::*; 159 | 160 | macro_rules! test_torrent { 161 | ($name:ident,$method:ident) => { 162 | #[test] 163 | fn $name() { 164 | let path = "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].torrent"; 165 | let result = TorrentMetaData::from_trnt_file(path).unwrap(); 166 | let output = result.$method(); 167 | 168 | println!("{}: {:?}", stringify!($method), output); 169 | } 170 | }; 171 | } 172 | 173 | #[test] 174 | fn test_from_file() { 175 | let path = "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].torrent"; 176 | let result = TorrentMetaData::from_trnt_file(path).unwrap(); 177 | println!("torrent file publishe: {:?}", result.publisher) 178 | } 179 | 180 | test_torrent!(test_get_pieces_length, get_pieces_length); 181 | test_torrent!(test_get_file_structure, get_file_structure); 182 | test_torrent!(test_get_tracker_url, get_tracker_url); 183 | test_torrent!(test_get_total_size, get_total_size); 184 | test_torrent!(test_get_pieces_hashes, get_pieces_hashes); 185 | test_torrent!(test_hash_info, calculate_info_hash); 186 | #[tokio::test] 187 | async fn debug_info_hash() { 188 | let path = r"C:\Users\Lenovo\Downloads\ubuntu-24.10-desktop-amd64.iso.torrent"; 189 | let torrent_meta_data = TorrentMetaData::from_trnt_file(path).unwrap(); 190 | 191 | let info_hash = torrent_meta_data.calculate_info_hash().unwrap(); 192 | // println!("Raw info hash bytes: {:?}", info_hash); 193 | println!("URL encoded info hash: {}", urlencode(&info_hash)); 194 | println!( 195 | "Url encoded info hash using earlier funct: {}", 196 | urlencode(&info_hash) 197 | ); 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /src/parser.rs: -------------------------------------------------------------------------------- 1 | use crate::error::*; 2 | use crate::mapper::TorrentMetaData; 3 | use std::fs::File; 4 | use std::io::Write; 5 | use std::path::Path; 6 | 7 | /// Decodes the torrent file as a json 8 | #[allow(dead_code)] 9 | pub fn decode_json(bpath: &str, opath: &str) -> Result<()> { 10 | let input_path = Path::new(bpath); 11 | let output_path = Path::new(opath); 12 | 13 | if !input_path.exists() { 14 | return Err(TorrentError::FileNotFound(bpath.to_string())); 15 | } 16 | 17 | if let Some(parent) = output_path.parent() { 18 | if !parent.exists() { 19 | std::fs::create_dir_all(parent).map_err(|e| { 20 | TorrentError::PermissionDenied(format!( 21 | "Cannot create output directory {}: {}", 22 | parent.display(), 23 | e 24 | )) 25 | })?; 26 | } 27 | } 28 | 29 | let torrent_bytes = std::fs::read(input_path).map_err(|e| match e.kind() { 30 | std::io::ErrorKind::PermissionDenied => { 31 | TorrentError::PermissionDenied(format!("Cannot read file: {}", bpath)) 32 | } 33 | std::io::ErrorKind::NotFound => TorrentError::FileNotFound(bpath.to_string()), 34 | _ => TorrentError::IoError(e), 35 | })?; 36 | 37 | let torrent: TorrentMetaData = serde_bencode::from_bytes(&torrent_bytes).map_err(|e| { 38 | TorrentError::InvalidTorrentFile(format!( 39 | "Bencode decode error in {}: {}", 40 | input_path.display(), 41 | e 42 | )) 43 | })?; 44 | let json_content = serde_json::to_string_pretty(&torrent).map_err(|e| { 45 | TorrentError::InvalidTorrentFile(format!("JSON serialization failed: {}", e)) 46 | })?; 47 | 48 | let mut file = File::create(output_path).map_err(|e| match e.kind() { 49 | std::io::ErrorKind::PermissionDenied => { 50 | TorrentError::PermissionDenied(format!("Cannot create file: {}", output_path.display())) 51 | } 52 | std::io::ErrorKind::AlreadyExists => { 53 | TorrentError::InvalidConfigs(format!("File already exists: {}", output_path.display())) 54 | } 55 | _ => TorrentError::IoError(e), 56 | })?; 57 | 58 | file.write_all(json_content.as_bytes()) 59 | .map_err(|e| match e.kind() { 60 | std::io::ErrorKind::WriteZero => TorrentError::DiskFull, 61 | std::io::ErrorKind::PermissionDenied => TorrentError::PermissionDenied(format!( 62 | "Cannot write to file: {}", 63 | output_path.display() 64 | )), 65 | _ => TorrentError::IoError(e), 66 | })?; 67 | 68 | println!( 69 | "Successfully decoded torrent file to JSON at: {}", 70 | output_path.display() 71 | ); 72 | Ok(()) 73 | } 74 | 75 | #[cfg(test)] 76 | mod tests { 77 | use super::*; 78 | 79 | #[test] 80 | fn test_decode_json() { 81 | decode_json( 82 | "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].torrent", 83 | "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].json", 84 | ) 85 | .unwrap(); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/peer.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use std::collections::{HashMap, HashSet}; 3 | use std::time::Duration; 4 | use tokio::fs::OpenOptions; 5 | use tokio::io::{AsyncReadExt, AsyncWriteExt}; 6 | use tokio::net::TcpStream; 7 | use tokio::time::timeout; 8 | 9 | use crate::error::{Result, TorrentError}; 10 | use crate::mapper::TorrentMetaData; 11 | 12 | //TODO: have to modify the mapper to optionally look for some fields but otherwise ignore them if 13 | //they dont exist! 14 | //TODO: Go through the downloading process once again and implement multi threading 15 | //TODO: Should start the downloading process once it has found a peer with the right bitfields 16 | //then run the rest of the process in the background 17 | //TODO: Add rarest first algo to download the pieces that fewer peers have first 18 | //TODO: Refactor getting piece availability to be dynamically called instead of once at the 19 | //beginning 20 | //TODO: Skip pieces that fail to be downloaded- must have them saved somewhere to download them 21 | //later on 22 | //TODO: Add piece verification 23 | 24 | #[derive(Debug, Deserialize, Serialize, Clone)] 25 | pub struct PeerInfo { 26 | pub ip: String, 27 | pub port: u16, 28 | } 29 | 30 | #[allow(dead_code)] 31 | #[derive(Debug)] 32 | pub struct Peer { 33 | pub peer_info: PeerInfo, 34 | stream: Option, 35 | pub bitfields: Option>, 36 | pub is_choked: bool, 37 | pub is_interested: bool, 38 | pub piece_availability: HashSet, 39 | } 40 | 41 | #[allow(dead_code)] 42 | #[derive(Debug)] 43 | pub struct PieceDownloader { 44 | pub peers: Vec, 45 | pub current_peer_idx: usize, 46 | pub hash_info: [u8; 20], 47 | pub peer_id: [u8; 20], 48 | } 49 | 50 | #[allow(dead_code)] 51 | impl PieceDownloader { 52 | pub fn new(peers: Vec, info_hash: [u8; 20], peer_id: [u8; 20]) -> Self { 53 | let peers = peers 54 | .into_iter() 55 | .map(|peer_info| Peer { 56 | peer_info, 57 | stream: None, 58 | bitfields: None, 59 | is_choked: true, 60 | is_interested: false, 61 | piece_availability: HashSet::new(), 62 | }) 63 | .collect(); 64 | 65 | PieceDownloader { 66 | peers, 67 | current_peer_idx: 0, 68 | hash_info: info_hash, 69 | peer_id, 70 | } 71 | } 72 | // a method to initialize all peer connections and gather piece availability 73 | // should be called first so that we could run get_piece_availability on peers 74 | pub async fn initialize_peers(&mut self) -> Result<()> { 75 | let mut successful_connections = 0; 76 | 77 | // connect to all peers 78 | for peer_idx in 0..self.peers.len() { 79 | self.current_peer_idx = peer_idx; 80 | let peer = &mut self.peers[peer_idx]; 81 | 82 | match peer.connect().await { 83 | Ok(()) => match peer.handshake(self.hash_info, self.peer_id).await { 84 | Ok(()) => match peer.receive_init_msg().await { 85 | Ok(()) => { 86 | successful_connections += 1; 87 | println!( 88 | "Successfully initialized peer {}:{}", 89 | peer.peer_info.ip, peer.peer_info.port 90 | ); 91 | } 92 | Err(e) => println!("Failed to receive bitfield: {}", e), 93 | }, 94 | Err(e) => println!("Handshake failed: {}", e), 95 | }, 96 | Err(e) => println!("Connection failed: {}", e), 97 | } 98 | } 99 | if successful_connections == 0 { 100 | return Err(TorrentError::PeerError( 101 | "couldnt connect to any peer".to_string(), 102 | )); 103 | } 104 | 105 | Ok(()) 106 | } 107 | 108 | /// Enumerates through the peers and finds map the piece index to peers that offer that piece! 109 | pub fn get_piece_availability(&self, total_pieces: u32) -> HashMap> { 110 | let mut pices_with_peers: HashMap> = HashMap::new(); 111 | for piece in 0..total_pieces { 112 | let peers_with_piece: Vec = self 113 | .peers 114 | .iter() 115 | .enumerate() 116 | .filter(|(_, peer)| peer.piece_availability.contains(&piece)) 117 | .map(|(idx, _)| idx) 118 | .collect(); 119 | if !peers_with_piece.is_empty() { 120 | pices_with_peers.insert(piece, peers_with_piece); 121 | } 122 | } 123 | pices_with_peers 124 | } 125 | pub async fn download_piece( 126 | &mut self, 127 | index: u32, 128 | piece_length: u32, 129 | file_path: &str, 130 | total_pieces: u32, 131 | ) -> Result<()> { 132 | let mut offset = 0; 133 | let mut retries_with_same_peer = 0; 134 | const MAX_RETRIES_PER_PEER: i32 = 2; 135 | 136 | while offset < piece_length { 137 | let peers_per_piece = self.get_piece_availability(total_pieces); 138 | if let Some(peers) = peers_per_piece.get(&index) { 139 | let mut peer_idx = 0; 140 | while peer_idx < peers.len() { 141 | self.current_peer_idx = peers[peer_idx]; 142 | 143 | let peer = &mut self.peers[self.current_peer_idx]; 144 | println!( 145 | "Attempting download of piece {} from peer {}:{}", 146 | index, peer.peer_info.ip, peer.peer_info.port 147 | ); 148 | 149 | match peer 150 | .request_piece(index, piece_length, offset, file_path) 151 | .await 152 | { 153 | Ok(bytes_downloaded) => { 154 | offset += bytes_downloaded; 155 | retries_with_same_peer = 0; 156 | if offset >= piece_length { 157 | return Ok(()); 158 | } 159 | } 160 | Err(_e) => { 161 | println!( 162 | "Error downloading from peer: {}:{}", 163 | peer.peer_info.ip, peer.peer_info.port 164 | ); 165 | retries_with_same_peer += 1; 166 | if retries_with_same_peer >= MAX_RETRIES_PER_PEER { 167 | println!( 168 | "Switching to the next peer after {} retries", 169 | retries_with_same_peer 170 | ); 171 | retries_with_same_peer = 0; 172 | peer_idx += 1; 173 | } else { 174 | tokio::time::sleep(Duration::from_secs(1)).await; 175 | } 176 | } 177 | } 178 | } 179 | } 180 | } 181 | Ok(()) 182 | } 183 | pub async fn download_torrent( 184 | &mut self, 185 | torrent: &TorrentMetaData, 186 | output_dir: &str, 187 | ) -> Result<()> { 188 | self.initialize_peers().await?; 189 | 190 | let piece_length = torrent.get_pieces_length() as u32; 191 | let total_pieces = torrent.calculate_total_pieces(); 192 | 193 | let temp_dir = format!("{}/temp_pieces", output_dir); 194 | tokio::fs::create_dir_all(&temp_dir).await?; 195 | 196 | let mut downloaded_pieces = HashSet::new(); 197 | 198 | let piece_availability = self.get_piece_availability(total_pieces); 199 | 200 | for piece_index in 0..total_pieces { 201 | if downloaded_pieces.contains(&piece_index) { 202 | continue; 203 | } 204 | let actual_piece_length = if piece_index == total_pieces - 1 { 205 | let total_size = torrent.get_total_size(); 206 | let remainder = total_size % torrent.get_pieces_length(); 207 | if remainder == 0 { 208 | piece_length 209 | } else { 210 | remainder as u32 211 | } 212 | } else { 213 | piece_length 214 | }; 215 | 216 | let temp_piece_path = format!("{}/piece_{}", temp_dir, piece_index); 217 | 218 | if let Some(peer_indices) = piece_availability.get(&piece_index) { 219 | let mut success = false; 220 | 221 | for &peer_idx in peer_indices { 222 | self.current_peer_idx = peer_idx; 223 | match self 224 | .download_piece( 225 | piece_index, 226 | actual_piece_length, 227 | &temp_piece_path, 228 | total_pieces, 229 | ) 230 | .await 231 | { 232 | Ok(_) => { 233 | downloaded_pieces.insert(piece_index); 234 | success = true; 235 | println!("Successfully downloaded and verified piece {}", piece_index); 236 | break; 237 | } 238 | Err(e) => { 239 | println!("failed to download the piece {}: {}", piece_index, e); 240 | } 241 | } 242 | } 243 | if !success { 244 | return Err(TorrentError::DownloadTimedout); 245 | } 246 | } else { 247 | return Err(TorrentError::NoAvailablePeers(piece_index)); 248 | } 249 | } 250 | println!("All pieces downloaded! Assembling files..."); 251 | PieceDownloader::assemble_files(torrent, &temp_dir, output_dir).await?; 252 | tokio::fs::remove_dir_all(&temp_dir).await?; 253 | Ok(()) 254 | } 255 | pub async fn assemble_files( 256 | torrent: &TorrentMetaData, 257 | temp_dir: &str, 258 | output_dir: &str, 259 | ) -> Result<()> { 260 | let piece_length = torrent.get_pieces_length() as u64; 261 | let file_structure = torrent.get_file_structure(); 262 | let mut absolute_offset = 0u64; 263 | 264 | for (file_path, file_length) in file_structure { 265 | let full_path = format!("{}/{}", output_dir, file_path); 266 | 267 | if let Some(parent) = std::path::Path::new(&full_path).parent() { 268 | tokio::fs::create_dir_all(parent).await?; 269 | } 270 | let mut outputfile = tokio::fs::OpenOptions::new() 271 | .create(true) 272 | .write(true) 273 | .truncate(true) 274 | .open(&full_path) 275 | .await?; 276 | 277 | let file_length = file_length as u64; 278 | let mut bytes_written = 0u64; 279 | 280 | while bytes_written < file_length { 281 | let current_piece = (absolute_offset / piece_length) as u32; 282 | let offset_in_piece = absolute_offset % piece_length; 283 | 284 | let piece_path = format!("{}/piece_{}", temp_dir, current_piece); 285 | let piece_data = tokio::fs::read(&piece_path).await?; 286 | 287 | let bytes_remaining_in_piece = piece_data.len() as u64 - offset_in_piece; 288 | let bytes_remaining_in_file = file_length - bytes_written; 289 | 290 | let bytes_to_write = 291 | std::cmp::min(bytes_remaining_in_piece, bytes_remaining_in_file) as usize; 292 | outputfile 293 | .write_all( 294 | &piece_data 295 | [offset_in_piece as usize..(offset_in_piece as usize + bytes_to_write)], 296 | ) 297 | .await?; 298 | bytes_written += bytes_to_write as u64; 299 | absolute_offset += bytes_to_write as u64; 300 | } 301 | } 302 | Ok(()) 303 | } 304 | } 305 | 306 | impl Peer { 307 | pub fn new(ip: String, port: u16) -> Self { 308 | let peer_info = PeerInfo { ip, port }; 309 | Peer { 310 | peer_info, 311 | stream: None, 312 | bitfields: None, 313 | is_choked: true, 314 | is_interested: false, 315 | piece_availability: HashSet::new(), 316 | } 317 | } 318 | 319 | pub async fn connect(&mut self) -> Result<()> { 320 | let address = format!("{}:{}", self.peer_info.ip, self.peer_info.port); 321 | let connect_future = TcpStream::connect(&address); 322 | match timeout(Duration::from_secs(5), connect_future).await { 323 | Ok(Ok(stream)) => { 324 | self.stream = Some(stream); 325 | println!("Successfully connected to stream: {}", address); 326 | Ok(()) 327 | } 328 | Ok(Err(e)) => Err(TorrentError::ConnectionFailed( 329 | format!("Connection error to peer {}: {}", address, e).into(), 330 | )), 331 | Err(_) => Err(TorrentError::ConnectionTimedOut(format!( 332 | "Connection timeout to {}", 333 | address 334 | ))), 335 | } 336 | } 337 | 338 | pub async fn handshake(&mut self, info_hash: [u8; 20], peer_id: [u8; 20]) -> Result<()> { 339 | if self.stream.is_none() { 340 | return Err(TorrentError::PeerError( 341 | "Stream not established yet".to_string(), 342 | )); 343 | } 344 | 345 | let mut handshake_msg = Vec::with_capacity(68); 346 | let stream = self.stream.as_mut().unwrap(); 347 | 348 | handshake_msg.push(19); 349 | handshake_msg.extend_from_slice(b"BitTorrent protocol"); 350 | handshake_msg.extend_from_slice(&[0u8; 8]); 351 | handshake_msg.extend_from_slice(&info_hash); 352 | handshake_msg.extend_from_slice(&peer_id); 353 | 354 | match timeout(Duration::from_secs(4), stream.write_all(&handshake_msg)).await { 355 | Ok(result) => result?, 356 | Err(_) => { 357 | return Err(TorrentError::ConnectionTimedOut( 358 | "Handshake send timeout".to_string(), 359 | )); 360 | } 361 | }; 362 | 363 | println!("Handshake sent, waiting for response..."); 364 | 365 | let mut response = vec![0u8; 68]; 366 | timeout(Duration::from_secs(10), stream.read_exact(&mut response)) 367 | .await 368 | .map_err(|_| TorrentError::ConnectionTimedOut("Handshake receive timeout".to_string()))? 369 | .map_err(|e| TorrentError::PeerError(format!("Failed to read handshake: {}", e)))?; 370 | 371 | println!("Handshake response received!"); 372 | if response[0] != 19 || &response[1..20] != b"BitTorrent protocol" { 373 | return Err(TorrentError::InvalidHandshake(format!( 374 | "Invalid protocol string. Got: {:?}", 375 | &response[..20] 376 | ))); 377 | } 378 | 379 | let peer_info_hash = &response[28..48]; 380 | if peer_info_hash != info_hash { 381 | return Err(TorrentError::InvalidHandshake(format!( 382 | "Info hash mismatch.\nExpected: {:02x?}\nReceived: {:02x?}", 383 | info_hash, peer_info_hash 384 | ))); 385 | } 386 | 387 | Ok(()) 388 | } 389 | 390 | pub async fn receive_init_msg(&mut self) -> Result<()> { 391 | let mut got_bitfield = false; 392 | let start_time = std::time::Instant::now(); 393 | let timeout_duration = std::time::Duration::from_secs(15); 394 | 395 | while !got_bitfield && start_time.elapsed() < timeout_duration { 396 | let mut msg_len = [0u8; 4]; 397 | let stream = self 398 | .stream 399 | .as_mut() 400 | .ok_or_else(|| TorrentError::PeerError("Not connected to peer".to_string()))?; 401 | stream.read_exact(&mut msg_len).await?; 402 | let msg_len = u32::from_be_bytes(msg_len) as usize; 403 | 404 | // Handle keep-alive message 405 | if msg_len == 0 { 406 | println!("Received keep-alive"); 407 | continue; 408 | } 409 | 410 | let mut msg_id = [0u8; 1]; 411 | stream.read_exact(&mut msg_id).await?; 412 | 413 | println!("Received message type: {}", msg_id[0]); 414 | 415 | match msg_id[0] { 416 | 0 => println!("Peer sent choke"), 417 | 1 => println!("Peer sent unchoke"), 418 | 2 => println!("Peer sent interested"), 419 | 3 => println!("Peer sent not interested"), 420 | 4 => { 421 | // Have message 422 | let mut have = [0u8; 4]; 423 | stream.read_exact(&mut have).await?; 424 | let pieces = u32::from_be_bytes(have); 425 | self.piece_availability.insert(pieces); 426 | println!( 427 | "Received have message for piece {}", 428 | u32::from_be_bytes(have) 429 | ); 430 | } 431 | 5 => { 432 | // Bitfield 433 | let mut bitfield = vec![0u8; msg_len - 1]; 434 | stream.read_exact(&mut bitfield).await?; 435 | println!("Received bitfield of length {}", bitfield.len()); 436 | self.parse_bitfield(&bitfield); 437 | self.bitfields = Some(bitfield); 438 | got_bitfield = true; 439 | } 440 | _ => { 441 | // Skip unknown message 442 | let mut payload = vec![0u8; msg_len - 1]; 443 | stream.read_exact(&mut payload).await?; 444 | println!("Skipping unknown message type: {}", msg_id[0]); 445 | } 446 | } 447 | } 448 | 449 | if !got_bitfield { 450 | return Err(TorrentError::ConnectionTimedOut( 451 | "Timeout waiting for bitfield".to_string(), 452 | )); 453 | } 454 | Ok(()) 455 | } 456 | pub async fn request_piece( 457 | &mut self, 458 | index: u32, 459 | piece_length: u32, 460 | start_offset: u32, 461 | file_path: &str, 462 | ) -> Result { 463 | let stream = self 464 | .stream 465 | .as_mut() 466 | .ok_or_else(|| TorrentError::PeerError("Not connected to peer".to_string()))?; 467 | //////////////////////////// Send interested message ////////////////////////////// 468 | if !self.is_interested { 469 | let interested_msg = [0u8, 0, 0, 1, 2]; 470 | stream.write_all(&interested_msg).await.map_err(|e| { 471 | TorrentError::PeerError(format!("Failed to send interested: {}", e)) 472 | })?; 473 | self.is_interested = true; 474 | println!("interested message sent!"); 475 | } 476 | //////////////////////////// Wait for unchoke message ////////////////////////////// 477 | let start_time = std::time::Instant::now(); 478 | let timeout = std::time::Duration::from_secs(20); 479 | while self.is_choked && start_time.elapsed() < timeout { 480 | let mut msg_len = [0u8; 4]; 481 | stream.read_exact(&mut msg_len).await?; 482 | let msg_len = u32::from_be_bytes(msg_len); 483 | if msg_len == 0 { 484 | println!("recevied keep-alive msg"); 485 | continue; 486 | } 487 | 488 | let mut msg_id = [0u8; 1]; 489 | stream.read_exact(&mut msg_id).await?; 490 | match msg_id[0] { 491 | 0 => println!("Received choke message"), 492 | 1 => { 493 | println!("Received unchoke message"); 494 | self.is_choked = false; 495 | } 496 | 2 => println!("Received interested message"), 497 | 3 => println!("Received not interested message"), 498 | 4 => { 499 | let mut have_payload = vec![0u8; msg_len as usize - 1]; 500 | stream.read_exact(&mut have_payload).await?; 501 | //TODO: Remove the println or impl the error from try into for TorrentError 502 | println!( 503 | "Received have message for piece {}", 504 | u32::from_be_bytes(have_payload[..4].try_into().unwrap()) 505 | ); 506 | } 507 | _ => { 508 | let mut payload = vec![0u8; msg_len as usize - 1]; 509 | stream.read_exact(&mut payload).await?; 510 | println!("Received unknown message type: {}", msg_id[0]); 511 | } 512 | } 513 | } 514 | if self.is_choked { 515 | return Err(TorrentError::DownloadTimedout); 516 | } 517 | 518 | //////////////////////////// Requesting Piece ////////////////////////////// 519 | println!("Requesting piece: {}", index); 520 | let mut file = if start_offset == 0 { 521 | OpenOptions::new() 522 | .create(true) 523 | .write(true) 524 | .truncate(true) 525 | .open(file_path) 526 | .await 527 | .map_err(|e| match e.kind() { 528 | std::io::ErrorKind::PermissionDenied => { 529 | TorrentError::PermissionDenied(file_path.to_string()) 530 | } 531 | _ => TorrentError::IoError(e), 532 | })? 533 | } else { 534 | OpenOptions::new() 535 | .create(true) 536 | .append(true) 537 | .open(file_path) 538 | .await 539 | .map_err(TorrentError::from)? 540 | }; 541 | 542 | let block_size = 16 * 1024; 543 | let mut offset = start_offset; 544 | let mut bytes_downloaded = 0; 545 | 546 | while offset < piece_length { 547 | let block_length = if piece_length - offset < block_size { 548 | piece_length - offset 549 | } else { 550 | block_size 551 | }; 552 | let mut request: Vec = Vec::new(); 553 | request.extend_from_slice(&(13u32).to_be_bytes()); 554 | request.push(6); 555 | request.extend_from_slice(&index.to_be_bytes()); 556 | request.extend_from_slice(&offset.to_be_bytes()); 557 | request.extend_from_slice(&block_length.to_be_bytes()); 558 | stream.write_all(&request).await?; 559 | 560 | // Wait for piece data with timeout 561 | let mut got_piece = false; 562 | let start_time = std::time::Instant::now(); 563 | let timeout_duration = std::time::Duration::from_secs(15); 564 | 565 | while !got_piece && start_time.elapsed() < timeout_duration { 566 | let mut msg_len = [0u8; 4]; 567 | stream.read_exact(&mut msg_len).await?; 568 | let msg_len = u32::from_be_bytes(msg_len) as usize; 569 | 570 | if msg_len == 0 { 571 | println!("Received keep-alive"); 572 | continue; 573 | } 574 | 575 | let mut msg_id = [0u8; 1]; 576 | stream.read_exact(&mut msg_id).await?; 577 | 578 | println!("Received message type: {}", msg_id[0]); 579 | 580 | match msg_id[0] { 581 | 0 => { 582 | return Err(TorrentError::PeerError( 583 | "Peer sent choke message during download".to_string(), 584 | )); 585 | } 586 | 4 => { 587 | // Have message 588 | let mut have = [0u8; 4]; 589 | stream.read_exact(&mut have).await?; 590 | println!( 591 | "Received 'have' message for piece {}", 592 | u32::from_be_bytes(have) 593 | ); 594 | } 595 | 7 => { 596 | // Piece message 597 | let mut piece_index = [0u8; 4]; 598 | stream.read_exact(&mut piece_index).await?; 599 | let mut piece_offset = [0u8; 4]; 600 | stream.read_exact(&mut piece_offset).await?; 601 | 602 | let block_size = msg_len - 9; // subtract message type and index/offset 603 | let mut block = vec![0u8; block_size]; 604 | stream.read_exact(&mut block).await?; 605 | 606 | file.write_all(&block).await?; 607 | offset += block_length; 608 | bytes_downloaded += block_length; 609 | got_piece = true; 610 | println!("Received piece block at offset {}", offset); 611 | } 612 | _ => { 613 | // Skip unknown message types 614 | let mut payload = vec![0u8; msg_len - 1]; 615 | stream.read_exact(&mut payload).await?; 616 | println!("Skipping unknown message type: {}", msg_id[0]); 617 | } 618 | } 619 | } 620 | 621 | if !got_piece { 622 | return Err(TorrentError::PeerError( 623 | "Timeout waiting for piece data".to_string(), 624 | )); 625 | } 626 | } 627 | 628 | Ok(bytes_downloaded) 629 | } 630 | pub async fn send_msg(&mut self, message: &[u8]) -> Result<()> { 631 | if let Some(stream) = self.stream.as_mut() { 632 | stream 633 | .write_all(message) 634 | .await 635 | .expect("Failed to write the message in TCP stream!"); 636 | stream.flush().await.expect("Failed to flush the stream"); 637 | Ok(()) 638 | } else { 639 | Err(TorrentError::PeerError("No stream was found!".to_string())) 640 | } 641 | } 642 | pub async fn receive_msg(&mut self) -> Result> { 643 | if let Some(stream) = self.stream.as_mut() { 644 | let mut length_bytes = [0u8; 4]; 645 | stream 646 | .read_exact(&mut length_bytes) 647 | .await 648 | .expect("Failed to read the length bytes"); 649 | let length = u32::from_be_bytes(length_bytes); 650 | let mut msg = vec![0u8; length as usize]; 651 | stream 652 | .read_exact(&mut msg) 653 | .await 654 | .expect("Failed to read the message!"); 655 | Ok(msg) 656 | } else { 657 | Err(TorrentError::PeerError("No stream was found!".to_string())) 658 | } 659 | } 660 | fn parse_bitfield(&mut self, bitfield: &[u8]) { 661 | for (byte_idx, &byte) in bitfield.iter().enumerate() { 662 | // For each bit in the byte 663 | for bit_idx in 0..8 { 664 | // Check if the bit is set (1) 665 | if (byte & (1 << (7 - bit_idx))) != 0 { 666 | // Calculate piece index from byte_idx and bit_idx 667 | let piece_idx = (byte_idx * 8 + bit_idx) as u32; 668 | self.piece_availability.insert(piece_idx); 669 | } 670 | } 671 | } 672 | } 673 | } 674 | 675 | #[cfg(test)] 676 | mod tests { 677 | 678 | use super::*; 679 | use crate::mapper::TorrentMetaData; 680 | use crate::tracker::{generate_peer_id, request_peers}; 681 | use tokio::test; 682 | 683 | #[test] 684 | async fn test_get_peers() { 685 | let path = r"/home/rusty/Rs/Torrs/Gym Manager [FitGirl Repack].torrent"; 686 | let torrent_meta_data = TorrentMetaData::from_trnt_file(path).unwrap(); 687 | println!("Got the torrent meta data"); 688 | 689 | match request_peers(&torrent_meta_data).await { 690 | Ok(peers) => { 691 | println!("Successfully retrieved {} peers", peers.len()); 692 | for (i, peer) in peers.iter().enumerate() { 693 | println!("Peer {}: {:?}", i + 1, peer); 694 | } 695 | assert!(!peers.is_empty(), "Peer list should not be empty"); 696 | } 697 | Err(e) => { 698 | eprintln!("Failed to retrieve peers: {:?}", e); 699 | } 700 | } 701 | } 702 | //#[tokio::test] 703 | //async fn test_download() { 704 | // let path = r"/home/rusty/Codes/Fun/Torrs/The Genesis Order [FitGirl Repack].torrent"; 705 | // let torrent_meta_data = TorrentMetaData::from_trnt_file(path).unwrap(); 706 | // println!("Got the torrent meta data"); 707 | // // Get peers from trackers 708 | // let peers = request_peers(&torrent_meta_data).await.unwrap(); 709 | // println!("Got {} peers", peers.len()); 710 | // assert!(!peers.is_empty(), "No peers found"); 711 | // 712 | // let peer_id = generate_peer_id(); 713 | // let info_hash = torrent_meta_data.calculate_info_hash().unwrap(); 714 | // 715 | // // Create downloader with our peer list 716 | // let mut downloader = PieceDownloader::new(peers, info_hash, peer_id); 717 | // 718 | // // Initialize all peer connections first 719 | // match downloader.initialize_peers().await { 720 | // Ok(()) => println!("Successfully initialized peer connections"), 721 | // Err(e) => { 722 | // println!("Failed to initialize peer connections: {}", e); 723 | // return; 724 | // } 725 | // } 726 | // 727 | // // Download the files 728 | // let file_struct = torrent_meta_data.get_file_structure(); 729 | // let torrent_path = Path::new(path); 730 | // let parent_dir = torrent_path.parent().unwrap().to_string_lossy().to_string(); 731 | // let total_pieces = torrent_meta_data.calculate_total_pieces(); 732 | // 733 | // for (file_index, (file, _)) in file_struct.iter().enumerate() { 734 | // println!("Downloading file: {:?}", file); 735 | // let piece_length = torrent_meta_data.get_pieces_length(); 736 | // let file_path = format!("{}/{}", parent_dir, file); 737 | // 738 | // match downloader 739 | // .download_piece( 740 | // file_index as u32, 741 | // piece_length as u32, 742 | // &file_path, 743 | // total_pieces, 744 | // ) 745 | // .await 746 | // { 747 | // Ok(()) => println!("Successfully downloaded file: {:?}", file), 748 | // Err(e) => { 749 | // println!("Failed to download file {:?}: {}", file, e); 750 | // break; 751 | // } 752 | // } 753 | // } 754 | //} 755 | #[tokio::test] 756 | async fn test_download() { 757 | let path = "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].torrent"; 758 | let torrent_meta_data = TorrentMetaData::from_trnt_file(path).unwrap(); 759 | let peers = request_peers(&torrent_meta_data).await.unwrap(); 760 | let peer_id = generate_peer_id(); 761 | let info_hash = torrent_meta_data.calculate_info_hash().unwrap(); 762 | 763 | let mut downloader = PieceDownloader::new(peers, info_hash, peer_id); 764 | 765 | let torrent_path = std::path::Path::new(path); 766 | let output_dir = torrent_path 767 | .parent() 768 | .unwrap() 769 | .join("downloads") 770 | .to_string_lossy() 771 | .to_string(); 772 | 773 | match downloader 774 | .download_torrent(&torrent_meta_data, &output_dir) 775 | .await 776 | { 777 | Ok(()) => println!("Successfully downloaded torrent"), 778 | Err(e) => println!("Failed to download: {}", e), 779 | } 780 | } 781 | } 782 | -------------------------------------------------------------------------------- /src/tracker.rs: -------------------------------------------------------------------------------- 1 | use crate::mapper::*; 2 | use crate::peer::PeerInfo; 3 | use rand::Rng; 4 | use serde::Deserialize; 5 | use serde_bytes::ByteBuf; 6 | use std::net::Ipv4Addr; 7 | use std::time::Duration; 8 | use tokio::task; 9 | use tokio::time::timeout; 10 | use url::Url; 11 | 12 | const UDP_TIMEOUT: Duration = Duration::from_secs(10); 13 | 14 | //TODO: Add TorrentError to this module 15 | #[allow(dead_code)] 16 | #[derive(Debug, Deserialize)] 17 | pub struct TrackerResponse { 18 | interval: Option, 19 | #[serde(default)] 20 | peer: Vec, 21 | #[serde(rename = "peers", default)] 22 | peers_binary: Option, 23 | } 24 | 25 | /// Used to generate Peer Id that is later used to request trackers 26 | pub fn generate_peer_id() -> [u8; 20] { 27 | let mut rng = rand::thread_rng(); 28 | let mut peer_id = [0u8; 20]; 29 | 30 | rng.fill(&mut peer_id); 31 | 32 | peer_id[0] = b'-'; 33 | peer_id[1..7].copy_from_slice(b"TR2940"); 34 | 35 | peer_id 36 | } 37 | 38 | /// Request Peers in order to get the Peer Info that is needed to establish connections. 39 | #[allow(dead_code)] 40 | pub async fn request_peers( 41 | torrent: &TorrentMetaData, 42 | ) -> Result, Box> { 43 | let info_hash = torrent.calculate_info_hash()?; 44 | let trackers = torrent.get_tracker_url(); 45 | let total_length = torrent.get_total_size(); 46 | 47 | let mut handles = vec![]; 48 | 49 | for tracker in trackers { 50 | if tracker.starts_with("udp://") { 51 | println!("Connecting to UDP tracker: {}", tracker); 52 | //continue; 53 | let tracker = tracker.to_string(); 54 | 55 | let handle = task::spawn(async move { 56 | match tokio::time::timeout( 57 | Duration::from_secs(20), 58 | request_udp_tracker(&tracker, &info_hash, total_length), 59 | ) 60 | .await 61 | { 62 | Ok(Ok(peers)) => Ok((tracker, peers)), 63 | Ok(Err(e)) => Err(format!("Failed to connect to tracker {}: {:?}", tracker, e)), 64 | Err(_) => Err(format!("Timeout connecting to tracker {}", tracker)), 65 | } 66 | }); 67 | handles.push(handle); 68 | } else { 69 | let tracker = tracker.to_string(); 70 | 71 | let handle = task::spawn(async move { 72 | match tokio::time::timeout( 73 | Duration::from_secs(20), 74 | request_http_trackers(&tracker, &info_hash, total_length), 75 | ) 76 | .await 77 | { 78 | Ok(Ok(peers)) => Ok((tracker, peers)), 79 | Ok(Err(e)) => Err(format!("Failed to connect to tracker {}: {:?}", tracker, e)), 80 | Err(_) => Err(format!("Timeout connecting to tracker {}", tracker)), 81 | } 82 | }); 83 | 84 | handles.push(handle); 85 | } 86 | } 87 | 88 | for handle in handles { 89 | match handle.await { 90 | Ok(Ok((tracker, peers))) => { 91 | println!( 92 | "Successfully retrieved {} peers from tracker: {}", 93 | peers.len(), 94 | tracker 95 | ); 96 | return Ok(peers); 97 | } 98 | Ok(Err(e)) => eprintln!("{}", e), 99 | Err(e) => eprintln!("Task failed: {:?}", e), 100 | } 101 | } 102 | 103 | Err("Failed to retrieve peers from any tracker".into()) 104 | } 105 | 106 | /// Sending connection request with a socker 107 | pub async fn send_connection_request( 108 | socket: &tokio::net::UdpSocket, 109 | ) -> Result> { 110 | let protocol_id: u64 = 0x0000041727101980; // Fixed protocol ID 111 | let action: u32 = 0; // connect 112 | let transaction_id: u32 = rand::random(); 113 | 114 | let mut request = Vec::with_capacity(16); 115 | request.extend_from_slice(&protocol_id.to_be_bytes()); 116 | request.extend_from_slice(&action.to_be_bytes()); 117 | request.extend_from_slice(&transaction_id.to_be_bytes()); 118 | 119 | socket.send(&request).await?; 120 | 121 | let mut response = vec![0u8; 16]; 122 | let size = socket.recv(&mut response).await?; 123 | 124 | if size != 16 { 125 | return Err(format!("Invalid connection response size: {}", size).into()); 126 | } 127 | 128 | let resp_action = u32::from_be_bytes(response[0..4].try_into()?); 129 | let resp_transaction_id = u32::from_be_bytes(response[4..8].try_into()?); 130 | 131 | if resp_action != 0 { 132 | return Err(format!("Invalid action in connection response: {}", resp_action).into()); 133 | } 134 | if resp_transaction_id != transaction_id { 135 | return Err("Transaction ID mismatch in connection response".into()); 136 | } 137 | 138 | Ok(u64::from_be_bytes(response[8..16].try_into()?)) 139 | } 140 | /// Request Trakcers using udp links! 141 | pub async fn request_udp_tracker( 142 | announce: &str, 143 | info_hash: &[u8; 20], 144 | total_length: i64, 145 | ) -> Result, Box> { 146 | let url = Url::parse(announce)?; 147 | let host = url.host_str().ok_or("No host in tracker URL")?; 148 | let port = url.port().unwrap_or(80); 149 | 150 | // Bind to an IPv4 address specifically 151 | let socket = tokio::net::UdpSocket::bind("0.0.0.0:0").await?; 152 | 153 | let mut addrs = tokio::net::lookup_host((host, port)).await?; 154 | let addr = addrs 155 | .find(|addr| addr.is_ipv4()) 156 | .ok_or("No IPv4 address found for tracker")?; 157 | 158 | match timeout(UDP_TIMEOUT, socket.connect(addr)).await { 159 | Ok(result) => result?, 160 | Err(_) => return Err("UDP tracker connection timeout".into()), 161 | } 162 | 163 | let mut retries = 2; 164 | let mut connection_id = None; 165 | 166 | while retries > 0 && connection_id.is_none() { 167 | match timeout(UDP_TIMEOUT, send_connection_request(&socket)).await { 168 | Ok(Ok(id)) => { 169 | connection_id = Some(id); 170 | break; 171 | } 172 | Ok(Err(e)) => { 173 | println!("Connection request failed, retries left {}: {}", retries, e); 174 | retries -= 1; 175 | } 176 | Err(_) => { 177 | println!("Connection request timed out, retries left {}", retries); 178 | retries -= 1; 179 | } 180 | } 181 | } 182 | 183 | let connection_id = connection_id.ok_or("Failed to get connection ID after retries")?; 184 | 185 | let transaction_id: u32 = rand::random(); 186 | let peer_id = generate_peer_id(); 187 | 188 | let mut request = Vec::with_capacity(98); 189 | request.extend_from_slice(&connection_id.to_be_bytes()); // 8 bytes 190 | request.extend_from_slice(&1_u32.to_be_bytes()); // 4 bytes - action (1 for announce) 191 | request.extend_from_slice(&transaction_id.to_be_bytes()); // 4 bytes 192 | request.extend_from_slice(info_hash); // 20 bytes 193 | request.extend_from_slice(&peer_id); // 20 bytes 194 | request.extend_from_slice(&0_i64.to_be_bytes()); // 8 bytes - downloaded 195 | request.extend_from_slice(&total_length.to_be_bytes()); // 8 bytes - left 196 | request.extend_from_slice(&0_i64.to_be_bytes()); // 8 bytes - uploaded 197 | request.extend_from_slice(&0_i32.to_be_bytes()); // 4 bytes - event 198 | request.extend_from_slice(&0_u32.to_be_bytes()); // 4 bytes - IP address 199 | request.extend_from_slice(&0_u32.to_be_bytes()); // 4 bytes - key 200 | request.extend_from_slice(&(-1_i32).to_be_bytes()); // 4 bytes - num_want 201 | request.extend_from_slice(&6881_u16.to_be_bytes()); // 2 bytes - port 202 | 203 | retries = 2; 204 | while retries > 0 { 205 | // Send announce 206 | match timeout(UDP_TIMEOUT, socket.send(&request)).await { 207 | Ok(Ok(_)) => {} 208 | Ok(Err(e)) => { 209 | println!("Failed to send announce, retries left {}: {}", retries, e); 210 | retries -= 1; 211 | continue; 212 | } 213 | Err(_) => { 214 | println!("Announce send timed out, retries left {}", retries); 215 | retries -= 1; 216 | continue; 217 | } 218 | } 219 | 220 | let mut response = vec![0u8; 1024]; 221 | match timeout(UDP_TIMEOUT, socket.recv(&mut response)).await { 222 | Ok(Ok(size)) => { 223 | response.truncate(size); 224 | if size < 8 { 225 | println!("Response too short: {} bytes", size); 226 | retries -= 1; 227 | continue; 228 | } 229 | 230 | let action = u32::from_be_bytes(response[0..4].try_into()?); 231 | let resp_transaction_id = u32::from_be_bytes(response[4..8].try_into()?); 232 | println!( 233 | "Response action: {}, transaction_id: {}", 234 | action, resp_transaction_id 235 | ); 236 | 237 | if resp_transaction_id != transaction_id { 238 | println!("Transaction ID mismatch"); 239 | retries -= 1; 240 | continue; 241 | } 242 | 243 | match action { 244 | 1 => { 245 | let interval = u32::from_be_bytes(response[8..12].try_into()?); 246 | let leechers = u32::from_be_bytes(response[12..16].try_into()?); 247 | let seeders = u32::from_be_bytes(response[16..20].try_into()?); 248 | 249 | println!( 250 | "Success! Interval: {}s, Leechers: {}, Seeders: {}", 251 | interval, leechers, seeders 252 | ); 253 | 254 | let mut peers = Vec::new(); 255 | for chunk in response[20..].chunks(6) { 256 | if chunk.len() == 6 { 257 | let ip = 258 | format!("{}.{}.{}.{}", chunk[0], chunk[1], chunk[2], chunk[3]); 259 | let port = u16::from_be_bytes([chunk[4], chunk[5]]); 260 | peers.push(PeerInfo { ip, port }); 261 | } 262 | } 263 | return Ok(peers); 264 | } 265 | 2 => { 266 | // Scrape response 267 | println!("Got scrape response, trying announce again..."); 268 | 269 | let mut announce_request = Vec::with_capacity(98); 270 | announce_request.extend_from_slice(&connection_id.to_be_bytes()); 271 | announce_request.extend_from_slice(&1_u32.to_be_bytes()); // Action 1 for announce 272 | announce_request.extend_from_slice(&transaction_id.to_be_bytes()); 273 | 274 | // Convert info_hash to proper network byte order 275 | let mut formatted_hash = [0u8; 20]; 276 | for i in 0..20 { 277 | formatted_hash[i] = info_hash[19 - i]; 278 | } 279 | announce_request.extend_from_slice(&formatted_hash); 280 | 281 | announce_request.extend_from_slice(&peer_id); 282 | announce_request.extend_from_slice(&0_i64.to_be_bytes()); // downloaded 283 | announce_request.extend_from_slice(&total_length.to_be_bytes()); // left 284 | announce_request.extend_from_slice(&0_i64.to_be_bytes()); // uploaded 285 | announce_request.extend_from_slice(&0_i32.to_be_bytes()); // event 286 | announce_request.extend_from_slice(&0_u32.to_be_bytes()); // IP 287 | announce_request.extend_from_slice(&0_u32.to_be_bytes()); // key 288 | announce_request.extend_from_slice(&(-1_i32).to_be_bytes()); // num_want 289 | announce_request.extend_from_slice(&6881_u16.to_be_bytes()); // port 290 | 291 | socket.send(&announce_request).await?; 292 | 293 | let mut retry_response = vec![0u8; 1024]; 294 | let retry_size = socket.recv(&mut retry_response).await?; 295 | retry_response.truncate(retry_size); 296 | 297 | if retry_size < 20 { 298 | return Err("Retry size too short for announce".into()); 299 | } 300 | let retry_action = u32::from_be_bytes(retry_response[0..4].try_into()?); 301 | if retry_action == 1 { 302 | let mut peers = Vec::new(); 303 | for chunk in retry_response[20..].chunks(6) { 304 | if chunk.len() == 6 { 305 | let ip = format!( 306 | "{}.{}.{}.{}", 307 | chunk[0], chunk[1], chunk[2], chunk[3] 308 | ); 309 | let port = u16::from_be_bytes([chunk[4], chunk[5]]); 310 | if &ip != "0.0.0.0" && port != 0 { 311 | peers.push(PeerInfo { ip, port }); 312 | } 313 | } 314 | } 315 | return Ok(peers); 316 | } else { 317 | return Err( 318 | "Failed to get proper announce response after scrape".into() 319 | ); 320 | }; 321 | } 322 | 3 => { 323 | // Error 324 | let error_msg = String::from_utf8_lossy(&response[8..]); 325 | println!("Got error response: {}", error_msg); 326 | retries -= 1; 327 | continue; 328 | } 329 | _ => { 330 | println!("Got unexpected action: {}", action); 331 | retries -= 1; 332 | continue; 333 | } 334 | } 335 | } 336 | Ok(Err(e)) => { 337 | println!( 338 | "Failed to receive announce response, retries left {}: {}", 339 | retries, e 340 | ); 341 | retries -= 1; 342 | } 343 | Err(_) => { 344 | println!("Announce receive timed out, retries left {}", retries); 345 | retries -= 1; 346 | } 347 | } 348 | } 349 | 350 | Err("Failed to get valid response after retries".into()) 351 | } 352 | /// Request trackers from http and udp origins 353 | pub async fn request_tracker( 354 | announce: &str, 355 | info_hash: &[u8; 20], 356 | total_length: i64, 357 | ) -> Result, Box> { 358 | if announce.starts_with("udp://") { 359 | return request_udp_tracker(announce, info_hash, total_length).await; 360 | } else if announce.starts_with("http://") || announce.starts_with("https://") { 361 | return request_http_trackers(announce, info_hash, total_length).await; 362 | } 363 | Err("Unsupported tracker protocol".into()) 364 | } 365 | /// Request Trackers based on the info that has been parsed from torrent file. 366 | pub async fn request_http_trackers( 367 | announce: &str, 368 | info_hash: &[u8; 20], 369 | total_length: i64, 370 | ) -> Result, Box> { 371 | let url = Url::parse(announce)?; 372 | let peer_id = generate_peer_id(); 373 | 374 | let q = format!( 375 | "?info_hash={}&peer_id={}&port=6881&uploaded=0&downloaded=0&compact=1&left={}", 376 | urlencode(info_hash), 377 | urlencode(&peer_id), 378 | total_length 379 | ); 380 | let full_url = format!("{}{}", url.as_str().trim_end_matches('/'), q); 381 | 382 | let response = match reqwest::get(full_url.clone()).await { 383 | Ok(bytes) => match bytes.bytes().await { 384 | Ok(byte) => byte, 385 | Err(e) => return Err(format!("Failed to get a response: {}", e).into()), 386 | }, 387 | Err(e) => return Err(format!("Failed to connect to {}:{}", url, e).into()), 388 | }; 389 | 390 | if response.starts_with(b"<") { 391 | return Err("Tracker returned HTML instead of bencoded data".into()); 392 | } 393 | 394 | let tracker_response: TrackerResponse = serde_bencode::de::from_bytes(&response) 395 | .map_err(|e| format!("failed to decode the bytes {}", e))?; 396 | let peers = if !tracker_response.peer.is_empty() { 397 | tracker_response.peer 398 | } else if let Some(binary_peer) = tracker_response.peers_binary { 399 | parse_binary_peers(&binary_peer) 400 | } else { 401 | return Err("No peers found in response".into()); 402 | }; 403 | 404 | if peers.is_empty() { 405 | Err("Tracker returned no peers".into()) 406 | } else { 407 | Ok(peers) 408 | } 409 | } 410 | 411 | pub fn parse_binary_peers(binary: &[u8]) -> Vec { 412 | binary 413 | .chunks(6) 414 | .filter_map(|chunk| { 415 | if chunk.len() == 6 { 416 | let ip = Ipv4Addr::new(chunk[0], chunk[1], chunk[2], chunk[3]).to_string(); 417 | let port = u16::from_be_bytes([chunk[4], chunk[5]]); 418 | Some(PeerInfo { ip, port }) 419 | } else { 420 | None 421 | } 422 | }) 423 | .collect() 424 | } 425 | 426 | /// Encodes url to a String 427 | pub fn urlencode(bytes: &[u8]) -> String { 428 | bytes 429 | .iter() 430 | .map(|&b| format!("%{:02x}", b)) 431 | .collect::() 432 | } 433 | 434 | #[cfg(test)] 435 | mod tests { 436 | 437 | use super::*; 438 | #[tokio::test] 439 | async fn test_request_http_tracker() { 440 | let path = "/home/rusty/Codes/Fun/Torrs/Violet [FitGirl Repack].torrent"; 441 | 442 | let torrent_meta_data = TorrentMetaData::from_trnt_file(path).unwrap(); 443 | println!("Got the torrent meta data"); 444 | 445 | let info_hash = torrent_meta_data.calculate_info_hash().unwrap(); 446 | let trackers = torrent_meta_data.get_tracker_url(); 447 | let total_length = torrent_meta_data.get_total_size(); 448 | 449 | let mut any_success = false; 450 | 451 | for tracker in trackers { 452 | println!("\nTrying tracker: {}", tracker); 453 | let result = if tracker.starts_with("udp") { 454 | match request_udp_tracker(&tracker, &info_hash, total_length).await { 455 | Ok(peers) => { 456 | println!("Successfully got {} peers from UDP tracker", peers.len()); 457 | any_success = true; 458 | Ok(peers) 459 | } 460 | Err(e) => { 461 | println!("UDP tracker failed: {}", e); 462 | Err(e) 463 | } 464 | } 465 | } else { 466 | match request_http_trackers(&tracker, &info_hash, total_length).await { 467 | Ok(peers) => { 468 | println!("Successfully got {} peers from HTTP tracker", peers.len()); 469 | any_success = true; 470 | Ok(peers) 471 | } 472 | Err(e) => { 473 | println!("HTTP tracker failed: {}", e); 474 | Err(e) 475 | } 476 | } 477 | }; 478 | 479 | // Print peer info if successful 480 | if let Ok(peers) = result { 481 | println!("First 5 peers from tracker {}:", tracker); 482 | for (i, peer) in peers.iter().take(5).enumerate() { 483 | println!(" Peer {}: {}:{}", i + 1, peer.ip, peer.port); 484 | } 485 | if peers.len() > 5 { 486 | println!(" ... and {} more peers", peers.len() - 5); 487 | } 488 | } 489 | } 490 | 491 | // Test passes if at least one tracker worked 492 | assert!(any_success, "No trackers successfully returned peers"); 493 | } 494 | } 495 | --------------------------------------------------------------------------------