├── .github └── workflows │ └── rust.yaml ├── .gitignore ├── CODEOWNERS ├── Cargo.toml ├── LICENSE ├── MakeFile ├── README.md ├── bencoder ├── Cargo.toml └── src │ ├── bencode.rs │ └── lib.rs ├── configs ├── config1.cfg └── config2.cfg ├── dtorrent ├── Cargo.toml ├── README.md └── src │ ├── bt_server │ ├── mod.rs │ └── server.rs │ ├── config │ ├── cfg.rs │ ├── constants.rs │ └── mod.rs │ ├── lib.rs │ ├── main.rs │ ├── peer │ ├── bt_peer.rs │ ├── handshake.rs │ ├── message_handler.rs │ ├── mod.rs │ ├── peer_message │ │ ├── bitfield.rs │ │ ├── message.rs │ │ ├── mod.rs │ │ └── request.rs │ ├── peer_session.rs │ └── session_status.rs │ ├── storage_manager │ ├── manager.rs │ └── mod.rs │ ├── torrent_handler │ ├── handler.rs │ ├── mod.rs │ └── status.rs │ ├── torrent_parser │ ├── info.rs │ ├── mod.rs │ ├── parser.rs │ └── torrent.rs │ └── tracker │ ├── http │ ├── http_handler.rs │ ├── mod.rs │ ├── query_params.rs │ └── url_parser.rs │ ├── mod.rs │ ├── tracker_handler.rs │ └── tracker_response.rs ├── dtracker ├── Cargo.toml ├── README.md ├── config.cfg ├── src │ ├── announce │ │ ├── announce_request.rs │ │ ├── announce_request_error.rs │ │ ├── announce_response.rs │ │ └── mod.rs │ ├── bt_tracker │ │ ├── mod.rs │ │ └── tracker.rs │ ├── http │ │ ├── http_method.rs │ │ ├── http_parser.rs │ │ ├── http_status.rs │ │ └── mod.rs │ ├── http_server │ │ ├── mod.rs │ │ ├── request_handler.rs │ │ ├── server.rs │ │ └── thread_pool │ │ │ ├── mod.rs │ │ │ ├── pool.rs │ │ │ └── worker.rs │ ├── lib.rs │ ├── main.rs │ ├── stats │ │ ├── mod.rs │ │ ├── stats_response.rs │ │ └── stats_updater.rs │ ├── torrent_swarm │ │ ├── mod.rs │ │ └── swarm.rs │ ├── tracker_peer │ │ ├── event.rs │ │ ├── mod.rs │ │ ├── peer.rs │ │ └── peer_status.rs │ └── tracker_status │ │ ├── atomic_tracker_status.rs │ │ ├── current_tracker_stats.rs │ │ └── mod.rs └── templates │ ├── graph.js │ └── index.html ├── torrents ├── file1.torrent ├── file2.torrent └── file3.torrent └── url_encoder ├── Cargo.toml └── src ├── lib.rs └── url_encoder.rs /.github/workflows/rust.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - main 5 | pull_request: 6 | 7 | name: Rust 8 | 9 | jobs: 10 | tests-stable: 11 | name: Tests (Stable) 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout sources 15 | uses: actions/checkout@v2 16 | 17 | - name: Install toolchain 18 | uses: actions-rs/toolchain@v1 19 | with: 20 | toolchain: stable 21 | profile: minimal 22 | override: true 23 | 24 | - uses: Swatinem/rust-cache@v1 25 | with: 26 | cache-on-failure: true 27 | 28 | - name: cargo test 29 | run: cargo test --all --all-features 30 | 31 | lint: 32 | runs-on: ubuntu-latest 33 | steps: 34 | - name: Checkout sources 35 | uses: actions/checkout@v2 36 | 37 | - name: Install toolchain 38 | uses: actions-rs/toolchain@v1 39 | with: 40 | toolchain: nightly 41 | profile: minimal 42 | components: rustfmt, clippy 43 | override: true 44 | 45 | - uses: Swatinem/rust-cache@v1 46 | with: 47 | cache-on-failure: true 48 | 49 | - name: cargo fmt 50 | run: cargo fmt --all -- --check 51 | 52 | - name: cargo clippy 53 | run: cargo clippy --all --all-features -- -D warnings -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | **/*.log 12 | **/.DS_Store -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @Oppen @HermanObst @juanbono @Jrigada @edg-l @azteca1998 @unbalancedparentheses @igaray 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "dtorrent", 5 | "dtracker", 6 | "bencoder", 7 | "url_encoder" 8 | ] 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Lambdaclass 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MakeFile: -------------------------------------------------------------------------------- 1 | .PHONY: build run check clippy docs test 2 | 3 | build: 4 | cargo build --release 5 | 6 | check: 7 | cargo check 8 | 9 | clippy: 10 | cargo clippy -- -D warnings 11 | 12 | docs: 13 | cargo doc --verbose --release --locked --no-deps 14 | 15 | test: 16 | cargo test 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # libtorrent-rs 2 | 3 | A Rust implementation of the [BitTorrent V2](http://bittorrent.org/beps/bep_0052.html) protocol. For now only V1 is implemented but we're working on V2. 4 | 5 | ## Dependencies 6 | - Rust 7 | - Cargo 8 | 9 | ## Running 10 | To run the program there needs to be a `.cfg` file in the `configs` directory of the project. We provide two with default values as an example. 11 | Then run the program with `cargo` followed by the directory containing the `.torrent` files, and the directory and name of the `.cfg` file as shown below: 12 | ```bash 13 | $ cargo run --bin dtorrent -- --file ./torrents/file_name --config ./configs/config_file 14 | ``` 15 | On startup the client gets all the `.torrent` files on the specified directory and immediately starts the download & upload. 16 | 17 | ## Testing 18 | Run the test suite: 19 | ```bash 20 | make test 21 | ``` 22 | 23 | ## BitTorrent Specification 24 | 25 | - [Index of BitTorrent Enhancement Proposals](http://bittorrent.org/beps/bep_0000.html) 26 | - [The BitTorrent Protocol Specification v2](http://bittorrent.org/beps/bep_0052.html) 27 | - [DHT Protocol](http://bittorrent.org/beps/bep_0005.html) 28 | -------------------------------------------------------------------------------- /bencoder/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bencoder" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | -------------------------------------------------------------------------------- /bencoder/src/bencode.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | #[derive(PartialEq, Debug, Clone)] 4 | pub enum Bencode { 5 | BNumber(i64), 6 | BString(Vec), 7 | BList(Vec), 8 | BDict(BTreeMap, Bencode>), 9 | } 10 | 11 | #[derive(PartialEq, Eq, Debug)] 12 | pub enum BencodeError { 13 | InvalidBencode, 14 | InvalidBencodeType, 15 | InvalidBencodeNumber, 16 | InvalidBencodeString, 17 | InvalidBencodeList, 18 | InvalidBencodeDict, 19 | } 20 | 21 | pub trait ToBencode { 22 | fn to_bencode(&self) -> Bencode; 23 | } 24 | 25 | impl ToBencode for String { 26 | fn to_bencode(&self) -> Bencode { 27 | Bencode::BString(self.as_bytes().to_vec()) 28 | } 29 | } 30 | 31 | impl ToBencode for i64 { 32 | fn to_bencode(&self) -> Bencode { 33 | Bencode::BNumber(*self) 34 | } 35 | } 36 | 37 | impl ToBencode for u64 { 38 | fn to_bencode(&self) -> Bencode { 39 | Bencode::BNumber((*self) as i64) 40 | } 41 | } 42 | 43 | impl ToBencode for u32 { 44 | fn to_bencode(&self) -> Bencode { 45 | Bencode::BNumber((*self).into()) 46 | } 47 | } 48 | 49 | impl ToBencode for u16 { 50 | fn to_bencode(&self) -> Bencode { 51 | Bencode::BNumber((*self).into()) 52 | } 53 | } 54 | 55 | impl ToBencode for i32 { 56 | fn to_bencode(&self) -> Bencode { 57 | Bencode::BNumber((*self).into()) 58 | } 59 | } 60 | 61 | impl ToBencode for Vec { 62 | fn to_bencode(&self) -> Bencode { 63 | Bencode::BString(self.clone()) 64 | } 65 | } 66 | 67 | impl ToBencode for BTreeMap, Bencode> { 68 | fn to_bencode(&self) -> Bencode { 69 | Bencode::BDict(self.clone()) 70 | } 71 | } 72 | 73 | impl ToBencode for Vec { 74 | fn to_bencode(&self) -> Bencode { 75 | Bencode::BList(self.iter().map(|s| s.to_bencode()).collect()) 76 | } 77 | } 78 | 79 | impl Bencode { 80 | /// Parses a bencoded vec of bytes into a Bencode enum. 81 | /// 82 | /// # Example 83 | /// 84 | /// ```rust 85 | /// use bencoder::bencode::Bencode; 86 | /// 87 | /// // String 88 | /// let data = b"5:hello"; 89 | /// let bencode = Bencode::decode(&data.to_vec()).unwrap(); 90 | /// 91 | /// assert_eq!(bencode, Bencode::BString(b"hello".to_vec())); 92 | /// 93 | /// // Integer 94 | /// let data = b"i123e"; 95 | /// let bencode = Bencode::decode(&data.to_vec()).unwrap(); 96 | /// 97 | /// assert_eq!(bencode, Bencode::BNumber(123)); 98 | /// ``` 99 | pub fn decode(data: &[u8]) -> Result { 100 | let (bencode, _) = Bencode::do_decode(&data[0..])?; 101 | Ok(bencode) 102 | } 103 | 104 | fn do_decode(data: &[u8]) -> Result<(Bencode, usize), BencodeError> { 105 | if data.is_empty() { 106 | return Err(BencodeError::InvalidBencode); 107 | }; 108 | match data[0] { 109 | b'i' => Bencode::decode_number(data), 110 | b'l' => Bencode::decode_list(data), 111 | b'd' => Bencode::decode_dict(data), 112 | b'0'..=b'9' => Bencode::decode_string(data), 113 | _ => Err(BencodeError::InvalidBencode), 114 | } 115 | } 116 | 117 | fn decode_string(data: &[u8]) -> Result<(Bencode, usize), BencodeError> { 118 | let mut i = 0; 119 | while data[i] != b':' { 120 | i += 1; 121 | } 122 | let length = 123 | std::str::from_utf8(&data[0..i]).map_err(|_| BencodeError::InvalidBencodeString)?; 124 | let length: usize = length 125 | .parse() 126 | .map_err(|_| BencodeError::InvalidBencodeString)?; 127 | 128 | i += 1; 129 | 130 | let string: Vec = data.iter().skip(i).take(length).cloned().collect(); 131 | let len = string.len(); 132 | 133 | Ok((Bencode::BString(string), i + len)) 134 | } 135 | 136 | fn decode_number(data: &[u8]) -> Result<(Bencode, usize), BencodeError> { 137 | let mut i = 1; 138 | 139 | while data[i] != b'e' { 140 | i += 1; 141 | } 142 | 143 | let number = 144 | std::str::from_utf8(&data[1..i]).map_err(|_| BencodeError::InvalidBencodeNumber)?; 145 | let number: i64 = number 146 | .parse() 147 | .map_err(|_| BencodeError::InvalidBencodeNumber)?; 148 | 149 | Ok((Bencode::BNumber(number), i + 1)) 150 | } 151 | 152 | fn decode_list(data: &[u8]) -> Result<(Bencode, usize), BencodeError> { 153 | let mut i = 1; 154 | let mut list = Vec::new(); 155 | while data[i] != b'e' { 156 | let (value, size) = Bencode::do_decode(&data[i..])?; 157 | list.push(value); 158 | i += size; 159 | } 160 | Ok((Bencode::BList(list), i + 1)) 161 | } 162 | 163 | fn decode_dict(data: &[u8]) -> Result<(Bencode, usize), BencodeError> { 164 | let mut i = 1; 165 | let mut dict = BTreeMap::new(); 166 | while data[i] != b'e' { 167 | let (key, size) = Bencode::do_decode(&data[i..])?; 168 | i += size; 169 | let (value, size) = Bencode::do_decode(&data[i..])?; 170 | i += size; 171 | match key { 172 | Bencode::BString(key) => dict.insert(key, value), 173 | _ => return Err(BencodeError::InvalidBencodeDict), 174 | }; 175 | } 176 | Ok((Bencode::BDict(dict), i + 1)) 177 | } 178 | 179 | /// Encodes a Bencode enum into a bencoded vec of bytes. 180 | /// 181 | /// # Example 182 | /// 183 | /// ```rust 184 | /// use bencoder::bencode::Bencode; 185 | /// 186 | /// // String 187 | /// let data = String::from("spam"); 188 | /// let encoded = Bencode::encode(&data); 189 | /// 190 | /// assert_eq!(encoded, b"4:spam"); 191 | /// 192 | /// // Integer 193 | /// let data = 123; 194 | /// let encoded = Bencode::encode(&data); 195 | /// 196 | /// assert_eq!(encoded, b"i123e"); 197 | /// ``` 198 | pub fn encode(bencode: &dyn ToBencode) -> Vec { 199 | let bencode = bencode.to_bencode(); 200 | Bencode::do_encode(&bencode) 201 | } 202 | 203 | fn do_encode(bencode: &Bencode) -> Vec { 204 | match bencode { 205 | Bencode::BNumber(n) => Bencode::encode_number(*n), 206 | Bencode::BString(s) => Bencode::encode_string(s), 207 | Bencode::BList(l) => Bencode::encode_list(l), 208 | Bencode::BDict(d) => Bencode::encode_dict(d), 209 | } 210 | } 211 | 212 | fn encode_number(n: i64) -> Vec { 213 | let mut encoded = vec![b'i']; 214 | encoded.extend(n.to_string().into_bytes()); 215 | encoded.push(b'e'); 216 | encoded 217 | } 218 | 219 | fn encode_string(s: &[u8]) -> Vec { 220 | let mut encoded = Vec::new(); 221 | encoded.extend(s.len().to_string().into_bytes()); 222 | encoded.push(b':'); 223 | encoded.extend(s); 224 | encoded 225 | } 226 | 227 | fn encode_list(l: &[Bencode]) -> Vec { 228 | let mut encoded = vec![b'l']; 229 | for bencode in l { 230 | encoded.extend(Bencode::do_encode(bencode)); 231 | } 232 | encoded.push(b'e'); 233 | encoded 234 | } 235 | 236 | fn encode_dict(d: &BTreeMap, Bencode>) -> Vec { 237 | let mut encoded = vec![b'd']; 238 | for (key, value) in d { 239 | encoded.extend(Bencode::do_encode(&key.to_bencode())); 240 | encoded.extend(Bencode::do_encode(value)); 241 | } 242 | encoded.push(b'e'); 243 | encoded 244 | } 245 | } 246 | 247 | #[cfg(test)] 248 | mod tests { 249 | use super::*; 250 | 251 | #[test] 252 | fn test_decode_empty_data() { 253 | let data: &[u8; 0] = &[]; 254 | assert_eq!(Bencode::decode(data), Err(BencodeError::InvalidBencode)); 255 | } 256 | 257 | #[test] 258 | fn test_decode_string() { 259 | let data = b"4:spam"; 260 | 261 | assert_eq!( 262 | Bencode::decode(data).unwrap(), 263 | Bencode::BString(b"spam".to_vec()) 264 | ); 265 | } 266 | 267 | #[test] 268 | fn test_decode_empty_string() { 269 | let data = b"0:"; 270 | assert_eq!( 271 | Bencode::decode(data).unwrap(), 272 | Bencode::BString(b"".to_vec()) 273 | ); 274 | } 275 | 276 | #[test] 277 | fn test_decode_positive_integer() { 278 | let data = b"i3e"; 279 | assert_eq!(Bencode::decode(data).unwrap(), Bencode::BNumber(3)); 280 | } 281 | 282 | #[test] 283 | fn test_decode_negative_integer() { 284 | let data = b"i-3e"; 285 | assert_eq!(Bencode::decode(data).unwrap(), Bencode::BNumber(-3)); 286 | } 287 | 288 | #[test] 289 | fn test_decode_list() { 290 | let data = b"l4:spam4:eggse"; 291 | assert_eq!( 292 | Bencode::decode(data).unwrap(), 293 | Bencode::BList(vec![ 294 | Bencode::BString(b"spam".to_vec()), 295 | Bencode::BString(b"eggs".to_vec()), 296 | ]) 297 | ); 298 | } 299 | 300 | #[test] 301 | fn test_decode_empty_list() { 302 | let data = b"le"; 303 | assert_eq!(Bencode::decode(data).unwrap(), Bencode::BList(vec![])); 304 | } 305 | 306 | #[test] 307 | fn test_decode_nested_list() { 308 | let data = b"ll3:fooee"; 309 | assert_eq!( 310 | Bencode::decode(data).unwrap(), 311 | Bencode::BList(vec![Bencode::BList(vec![Bencode::BString( 312 | b"foo".to_vec() 313 | )])]) 314 | ); 315 | } 316 | 317 | #[test] 318 | fn test_decode_nested_with_2_lists() { 319 | let data = b"ll3:fooel3:baree"; 320 | 321 | assert_eq!( 322 | Bencode::decode(data).unwrap(), 323 | Bencode::BList(vec![ 324 | Bencode::BList(vec![Bencode::BString(b"foo".to_vec())]), 325 | Bencode::BList(vec![Bencode::BString(b"bar".to_vec())]) 326 | ]) 327 | ); 328 | } 329 | 330 | #[test] 331 | fn test_decode_dict() { 332 | let data = b"d3:cow3:moo4:spam4:eggse"; 333 | let mut dict = BTreeMap::new(); 334 | dict.insert(b"cow".to_vec(), Bencode::BString(b"moo".to_vec())); 335 | dict.insert(b"spam".to_vec(), Bencode::BString(b"eggs".to_vec())); 336 | 337 | assert_eq!(Bencode::decode(data).unwrap(), Bencode::BDict(dict)); 338 | } 339 | 340 | #[test] 341 | fn test_decode_dict_with_list() { 342 | let data = b"d4:spaml1:a1:bee"; 343 | let mut dict = BTreeMap::new(); 344 | dict.insert( 345 | b"spam".to_vec(), 346 | Bencode::BList(vec![ 347 | Bencode::BString(b"a".to_vec()), 348 | Bencode::BString(b"b".to_vec()), 349 | ]), 350 | ); 351 | 352 | assert_eq!(Bencode::decode(data).unwrap(), Bencode::BDict(dict)); 353 | } 354 | 355 | #[test] 356 | fn test_decode_longer_dict() { 357 | let data = 358 | b"d9:publisher3:bob17:publisher-webpage15:www.example.com18:publisher.location4:homee"; 359 | let mut dict = BTreeMap::new(); 360 | dict.insert(b"publisher".to_vec(), Bencode::BString(b"bob".to_vec())); 361 | dict.insert( 362 | b"publisher-webpage".to_vec(), 363 | Bencode::BString(b"www.example.com".to_vec()), 364 | ); 365 | dict.insert( 366 | b"publisher.location".to_vec(), 367 | Bencode::BString(b"home".to_vec()), 368 | ); 369 | 370 | assert_eq!(Bencode::decode(data).unwrap(), Bencode::BDict(dict)); 371 | } 372 | 373 | #[test] 374 | fn test_decode_empty_dict() { 375 | let data = b"de"; 376 | let dict = BTreeMap::new(); 377 | 378 | assert_eq!(Bencode::decode(data).unwrap(), Bencode::BDict(dict)); 379 | } 380 | 381 | #[test] 382 | fn test_decode_dict_with_number_and_string() { 383 | let data = b"d3:fooi42e3:bar5:thinge"; 384 | let mut dict = BTreeMap::new(); 385 | dict.insert(b"bar".to_vec(), Bencode::BString(b"thing".to_vec())); 386 | dict.insert(b"foo".to_vec(), Bencode::BNumber(42)); 387 | 388 | assert_eq!(Bencode::decode(data).unwrap(), Bencode::BDict(dict)); 389 | } 390 | 391 | #[test] 392 | fn test_encode_string() { 393 | let data = String::from("spam"); 394 | assert_eq!(Bencode::encode(&data), b"4:spam"); 395 | } 396 | 397 | #[test] 398 | fn test_encode_empty_string() { 399 | let data = String::from(""); 400 | assert_eq!(Bencode::encode(&data), b"0:"); 401 | } 402 | 403 | #[test] 404 | fn test_encode_positive_integer() { 405 | let data = 3; 406 | assert_eq!(Bencode::encode(&data), b"i3e"); 407 | } 408 | 409 | #[test] 410 | fn test_encode_negative_integer() { 411 | let data = -3; 412 | assert_eq!(Bencode::encode(&data), b"i-3e"); 413 | } 414 | 415 | #[test] 416 | fn test_encode_vec_of_bytes() { 417 | let data = b"spam".to_vec(); 418 | assert_eq!(Bencode::encode(&data), b"4:spam"); 419 | } 420 | 421 | #[test] 422 | fn test_encode_vec_of_strings() { 423 | let data = vec![String::from("spam"), String::from("eggs")]; 424 | assert_eq!(Bencode::encode(&data), b"l4:spam4:eggse"); 425 | } 426 | 427 | #[test] 428 | fn test_encode_vec_of_integers() { 429 | let data = vec![1, 2, 3]; 430 | assert_eq!(Bencode::encode(&data), b"li1ei2ei3ee"); 431 | } 432 | 433 | #[test] 434 | fn test_encode_nested_list() { 435 | let data = vec![vec![String::from("spam"), String::from("eggs")]]; 436 | assert_eq!(Bencode::encode(&data), b"ll4:spam4:eggsee"); 437 | } 438 | 439 | #[test] 440 | fn test_encode_empty_list() { 441 | let data: Vec = vec![]; 442 | assert_eq!(Bencode::encode(&data), b"le"); 443 | } 444 | } 445 | -------------------------------------------------------------------------------- /bencoder/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod bencode; 2 | -------------------------------------------------------------------------------- /configs/config1.cfg: -------------------------------------------------------------------------------- 1 | TCP_PORT=6969 2 | LOG_DIRECTORY=./dtorrent_logs 3 | DOWNLOAD_DIRECTORY=./downloads 4 | PIPELINING_SIZE=5 5 | READ_WRITE_SECONDS_TIMEOUT=20 6 | MAX_PEERS_PER_TORRENT=20 7 | MAX_LOG_FILE_KB_SIZE=100000 8 | -------------------------------------------------------------------------------- /configs/config2.cfg: -------------------------------------------------------------------------------- 1 | TCP_PORT=6969 2 | LOG_DIRECTORY=./dtorrent_logs 3 | DOWNLOAD_DIRECTORY=./downloads 4 | PIPELINING_SIZE=5 5 | READ_WRITE_SECONDS_TIMEOUT=20 6 | MAX_PEERS_PER_TORRENT=20 7 | MAX_LOG_FILE_KB_SIZE=100000 8 | -------------------------------------------------------------------------------- /dtorrent/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dtorrent" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | sha-1 = '0.10.0' 10 | native-tls = "0.2" 11 | chrono = "0.4" 12 | rand = "0.8.5" 13 | bencoder = { path = "../bencoder" } 14 | tokio = { version = "1", features = ["rt-multi-thread","macros"]} 15 | url_encoder = { path = "../url_encoder" } 16 | clap = { version = "4.1.1", features = ["derive"] } 17 | tracing = "0.1" 18 | tracing-subscriber = "0.3" 19 | -------------------------------------------------------------------------------- /dtorrent/README.md: -------------------------------------------------------------------------------- 1 | ## Running 2 | To run the program there needs to be a `config.cfg` file in the `configs` directory of the project. We provide two with default values as an example. 3 | Then run the program with `cargo` followed by the directory containing the `.torrent` files, and the directory and name of the `.cfg` file as shown below: 4 | ```bash 5 | $ cargo run --bin dtorrent -- --file ./torrents/file_name --config ./configs/config_file 6 | ``` 7 | On startup the client gets all the .torrent files on the specified directory and immediately starts the download & upload. 8 | 9 | ## Tests 10 | Run tests with `cargo`: 11 | ```bash 12 | $ cargo test --package dtorrent 13 | ``` -------------------------------------------------------------------------------- /dtorrent/src/bt_server/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod server; 2 | -------------------------------------------------------------------------------- /dtorrent/src/bt_server/server.rs: -------------------------------------------------------------------------------- 1 | use crate::config::cfg::Cfg; 2 | use crate::peer::bt_peer::{BtPeer, BtPeerError}; 3 | use crate::peer::peer_session::{PeerSession, PeerSessionError}; 4 | use crate::torrent_handler::status::{AtomicTorrentStatus, AtomicTorrentStatusError}; 5 | use crate::torrent_parser::torrent::Torrent; 6 | use std::collections::HashMap; 7 | use std::net::{TcpListener, TcpStream}; 8 | use std::sync::Arc; 9 | use std::thread; 10 | use std::time::Duration; 11 | use tracing::{error, info, warn}; 12 | 13 | /// Struct for handling the server side. 14 | /// 15 | /// To create a new `BtServer`, use BtServer::new(torrent, config). 16 | #[derive(Debug)] 17 | pub struct BtServer { 18 | config: Cfg, 19 | torrents_with_status: HashMap>, 20 | client_peer_id: String, 21 | } 22 | 23 | /// Posible BtServer errors. 24 | #[derive(Debug)] 25 | pub enum BtServerError { 26 | TorrentStatusError(AtomicTorrentStatusError), 27 | OpeningListenerError(std::io::Error), 28 | HandleConnectionError(std::io::Error), 29 | PeerSessionError(PeerSessionError), 30 | BtPeerError(BtPeerError), 31 | TorrentNotFound(String), 32 | ErrorSettingStreamTimeout, 33 | MaxPeersConnectedReached(String), 34 | } 35 | 36 | impl BtServer { 37 | /// Creates a new `BtServer` from a `HashMap` containing a torrent with its `AtomicTorrentStatus` and `Config`. 38 | pub fn new( 39 | torrents_with_status: HashMap>, 40 | config: Cfg, 41 | client_peer_id: String, 42 | ) -> Self { 43 | Self { 44 | config, 45 | torrents_with_status, 46 | client_peer_id, 47 | } 48 | } 49 | 50 | /// Starts the server and starts listening for connections. 51 | /// 52 | /// # Errors 53 | /// - `OpeningListenerError` if the TcpLister couldn't be opened. 54 | pub fn init(&mut self) -> Result<(), BtServerError> { 55 | let listener = TcpListener::bind(format!("0.0.0.0:{}", self.config.tcp_port)) 56 | .map_err(BtServerError::OpeningListenerError)?; 57 | 58 | info!("Server started, listening for connections"); 59 | 60 | for stream in listener.incoming() { 61 | match stream { 62 | Ok(stream) => match self.handle_connection(stream) { 63 | Ok(_) => (), 64 | Err(e) => warn!("Couldn't handle incoming connection: {:?}", e), 65 | }, 66 | Err(e) => warn!("Couldn't handle incoming connection: {:?}", e), 67 | } 68 | } 69 | 70 | Ok(()) 71 | } 72 | 73 | fn handle_connection(&self, mut stream: TcpStream) -> Result<(), BtServerError> { 74 | let addr = stream 75 | .peer_addr() 76 | .map_err(BtServerError::HandleConnectionError)?; 77 | 78 | // set timeouts 79 | self.set_stream_timeouts(&mut stream)?; 80 | 81 | let mut peer = BtPeer::new(addr.ip().to_string(), addr.port() as i64); 82 | 83 | let info_hash = peer.receive_handshake(&mut stream).map_err(|err| { 84 | warn!("{:?} for peer: {}:{}", err, addr.ip(), addr.port() as i64); 85 | BtServerError::BtPeerError(err) 86 | })?; 87 | 88 | // See if the torrent is in the list of torrents. 89 | let (torrent, torrent_status) = match self.find_torrent_and_status(info_hash) { 90 | Ok(value) => value, 91 | Err(value) => return value, 92 | }; 93 | 94 | let current_peers = torrent_status.all_current_peers(); 95 | // if we reached the max number of peers, we can't accept any more connections. 96 | if current_peers >= self.config.max_peers_per_torrent as usize { 97 | return Err(BtServerError::MaxPeersConnectedReached(torrent.name())); 98 | } 99 | 100 | let mut peer_session = self.create_peer_session(&peer, torrent, torrent_status)?; 101 | 102 | match peer_session.handshake_incoming_leecher(&mut stream) { 103 | Ok(_) => { 104 | self.unchoke_peer(peer_session, peer, stream, torrent.clone(), torrent_status)?; 105 | } 106 | Err(err) => { 107 | warn!("{:?}", err) 108 | } 109 | } 110 | Ok(()) 111 | } 112 | 113 | fn find_torrent_and_status( 114 | &self, 115 | info_hash: Vec, 116 | ) -> Result<(&Torrent, &Arc), Result<(), BtServerError>> { 117 | let (torrent, torrent_status) = 118 | match self.torrents_with_status.iter().find(|(torrent, _)| { 119 | match torrent.get_info_hash_as_bytes() { 120 | Ok(info_hash_bytes) => info_hash_bytes == info_hash, 121 | Err(_) => false, 122 | } 123 | }) { 124 | Some((torrent, torrent_status)) => (torrent, torrent_status), 125 | None => { 126 | return Err(Err(BtServerError::TorrentNotFound( 127 | String::from_utf8_lossy(&info_hash).to_string(), 128 | ))) 129 | } 130 | }; 131 | Ok((torrent, torrent_status)) 132 | } 133 | 134 | fn create_peer_session( 135 | &self, 136 | peer: &BtPeer, 137 | torrent: &Torrent, 138 | torrent_status: &Arc, 139 | ) -> Result { 140 | let peer_session = PeerSession::new( 141 | peer.clone(), 142 | torrent.clone(), 143 | torrent_status.clone(), 144 | self.config.clone(), 145 | self.client_peer_id.clone(), 146 | ) 147 | .map_err(BtServerError::PeerSessionError)?; 148 | Ok(peer_session) 149 | } 150 | 151 | /// Sets read and write timeouts for the stream. 152 | fn set_stream_timeouts(&self, stream: &mut TcpStream) -> Result<(), BtServerError> { 153 | stream 154 | .set_read_timeout(Some(Duration::from_secs( 155 | self.config.read_write_seconds_timeout, 156 | ))) 157 | .map_err(|_| BtServerError::ErrorSettingStreamTimeout)?; 158 | 159 | stream 160 | .set_write_timeout(Some(Duration::from_secs( 161 | self.config.read_write_seconds_timeout, 162 | ))) 163 | .map_err(|_| BtServerError::ErrorSettingStreamTimeout)?; 164 | Ok(()) 165 | } 166 | 167 | fn unchoke_peer( 168 | &self, 169 | mut peer_session: PeerSession, 170 | peer: BtPeer, 171 | mut stream: TcpStream, 172 | torrent: Torrent, 173 | torrent_status: &Arc, 174 | ) -> Result<(), BtServerError> { 175 | torrent_status.peer_connecting(); 176 | let peer_name = format!("{}:{}", peer.ip, peer.port); 177 | 178 | let builder = thread::Builder::new().name(format!( 179 | "Torrent: {} / Peer: {}", 180 | torrent.info.name, peer_name 181 | )); 182 | 183 | let join = 184 | builder.spawn( 185 | move || match peer_session.unchoke_incoming_leecher(&mut stream) { 186 | Ok(_) => (), 187 | Err(err) => { 188 | warn!("{:?}", err); 189 | } 190 | }, 191 | ); 192 | match join { 193 | Ok(_) => (), 194 | Err(err) => { 195 | error!("{:?}", err); 196 | } 197 | } 198 | Ok(()) 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /dtorrent/src/config/cfg.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io; 3 | use std::io::BufRead; 4 | use std::io::BufReader; 5 | use std::str::FromStr; 6 | 7 | use super::constants; 8 | 9 | /// `Cfg` struct containing the config file information, previusly created with `Cfg::new`. 10 | /// 11 | /// - `tcp_port`: port to listen for incoming connections, 12 | /// - `log_directory`: directory where the log files will be stored, 13 | /// - `download_directory`: directory where the downloaded files will be stored, 14 | /// - `pipelining_size`: number of request sent to a peer before waiting for the response, 15 | /// - `read_write_seconds_timeout`: timeout in seconds for the read and write operations to a peer, 16 | /// - `max_peers_per_torrent`: maximum number of simultaneous peers that a torrent can have, 17 | /// - `max_log_file_kb_size`: max file size in kilobytes the log can have, 18 | #[derive(Debug, Clone)] 19 | pub struct Cfg { 20 | pub tcp_port: u16, 21 | pub log_directory: String, 22 | pub download_directory: String, 23 | pub pipelining_size: u32, 24 | pub read_write_seconds_timeout: u64, 25 | pub max_peers_per_torrent: u32, 26 | pub max_log_file_kb_size: u32, 27 | } 28 | 29 | impl Cfg { 30 | /// Builds a Cfg struct containing the config file information by the given path. 31 | /// The format of the config file must be: {config_name}={config_value} (without brackets). 32 | /// In case of success it returns a Cfg struct. 33 | /// 34 | /// It returns an io::Error if: 35 | /// - The path to the config file does not exist or could not be open/readed. 36 | /// - The confing file has wrong format. 37 | /// - A wrong config_name was in the config file. 38 | /// - tcp_port setting is not a valid number in the config file. 39 | /// - pipelining_size setting is not a valid number in the config file. 40 | /// - read_write_timeout setting is not a valid number in the config file. 41 | /// - max_peers_per_torrent setting is not a valid number in the config file. 42 | /// - max_log_file_size setting is not a valid number in the config file. 43 | /// - Minimum number of correct settings were not reached. 44 | pub fn new(path: &str) -> io::Result { 45 | let mut cfg = Self { 46 | tcp_port: 0, 47 | log_directory: String::from(""), 48 | download_directory: String::from(""), 49 | pipelining_size: 0, 50 | read_write_seconds_timeout: 0, 51 | max_peers_per_torrent: 0, 52 | max_log_file_kb_size: 0, 53 | }; 54 | 55 | let file = File::open(path)?; 56 | let reader = BufReader::new(file); 57 | 58 | let mut settings_loaded = 0; 59 | 60 | for line in reader.lines() { 61 | let current_line = line?; 62 | let setting: Vec<&str> = current_line.split('=').collect(); 63 | 64 | if setting.len() != 2 { 65 | return Err(io::Error::new( 66 | io::ErrorKind::InvalidInput, 67 | format!("Invalid config input: {}", current_line), 68 | )); 69 | } 70 | cfg = Self::load_setting(cfg, setting[0], setting[1])?; 71 | settings_loaded += 1; 72 | } 73 | if settings_loaded < constants::MIN_SETTINGS { 74 | return Err(io::Error::new( 75 | io::ErrorKind::InvalidInput, 76 | format!( 77 | "Minimum number of correct settings were not reached: {}", 78 | settings_loaded 79 | ), 80 | )); 81 | } 82 | Ok(cfg) 83 | } 84 | 85 | fn load_setting(mut self, name: &str, value: &str) -> io::Result { 86 | match name { 87 | constants::TCP_PORT => { 88 | self.tcp_port = self.parse_value(value, constants::TCP_PORT)?; 89 | } 90 | constants::LOG_DIRECTORY => self.log_directory = String::from(value), 91 | 92 | constants::DOWNLOAD_DIRECTORY => self.download_directory = String::from(value), 93 | 94 | constants::PIPELINING_SIZE => { 95 | self.pipelining_size = self.parse_value(value, constants::PIPELINING_SIZE)?; 96 | } 97 | 98 | constants::READ_WRITE_SECONDS_TIMEOUT => { 99 | self.read_write_seconds_timeout = 100 | self.parse_value(value, constants::READ_WRITE_SECONDS_TIMEOUT)?; 101 | } 102 | 103 | constants::MAX_PEERS_PER_TORRENT => { 104 | self.max_peers_per_torrent = 105 | self.parse_value(value, constants::MAX_PEERS_PER_TORRENT)?; 106 | } 107 | 108 | constants::MAX_LOG_FILE_KB_SIZE => { 109 | self.max_log_file_kb_size = 110 | self.parse_value(value, constants::MAX_LOG_FILE_KB_SIZE)?; 111 | } 112 | 113 | _ => { 114 | return Err(io::Error::new( 115 | io::ErrorKind::InvalidInput, 116 | format!("Invalid config setting name: {}", name), 117 | )) 118 | } 119 | } 120 | Ok(self) 121 | } 122 | 123 | fn parse_value(&self, value: &str, setting: &str) -> io::Result 124 | where 125 | F: FromStr, 126 | { 127 | let parse = value.parse::(); 128 | match parse { 129 | Err(_) => { 130 | return Err(io::Error::new( 131 | io::ErrorKind::InvalidInput, 132 | format!( 133 | "Invalid setting: {}, is not a valid type: {}", 134 | setting, value 135 | ), 136 | )); 137 | } 138 | Ok(parse) => Ok(parse), 139 | } 140 | } 141 | } 142 | 143 | #[cfg(test)] 144 | mod tests { 145 | use super::*; 146 | use std::{fs, io::Write}; 147 | 148 | // tests: 149 | // 1- test todo ok 150 | // 2- test archivo de config no existe 151 | // 3- test archivo vacio 152 | // 4- test setting que no existe 153 | // 5- test solo 2 settings 154 | // 6- test tcp_port no es numero 155 | // 7- test no importa el orden de los settings en el archivo 156 | // 8- test mal formato 157 | 158 | #[test] 159 | fn test_good_config() { 160 | let path = "./test_good_config.cfg"; 161 | let contents = b"TCP_PORT=1000\nLOG_DIRECTORY=./log\nDOWNLOAD_DIRECTORY=./download\nPIPELINING_SIZE=5\nREAD_WRITE_SECONDS_TIMEOUT=120\nMAX_PEERS_PER_TORRENT=5\nMAX_LOG_FILE_KB_SIZE=100"; 162 | create_and_write_file(path, contents); 163 | 164 | create_and_assert_config_is_ok(path, 1000, "./log", "./download", 5, 120, 5, 100); 165 | } 166 | 167 | #[test] 168 | fn test_bad_path() { 169 | let path = "bad path"; 170 | let config = Cfg::new(path); 171 | assert!(config.is_err()); 172 | } 173 | 174 | #[test] 175 | fn test_empty_file() { 176 | let path = "./test_empty_file.cfg"; 177 | let contents = b""; 178 | create_and_write_file(path, contents); 179 | 180 | create_and_assert_config_is_not_ok(path); 181 | } 182 | 183 | #[test] 184 | fn test_setting_doesnt_exist() { 185 | let path = "./test_setting_doesnt_exist.cfg"; 186 | let contents = b"WRONG_SETTING=1000"; 187 | create_and_write_file(path, contents); 188 | 189 | create_and_assert_config_is_not_ok(path); 190 | } 191 | 192 | #[test] 193 | fn test_bad_number_of_settings() { 194 | let path = "./test_bad_number_of_settings.cfg"; 195 | let contents = b"TCP_PORT=1000\nLOG_DIRECTORY=./log"; 196 | create_and_write_file(path, contents); 197 | 198 | create_and_assert_config_is_not_ok(path); 199 | } 200 | 201 | #[test] 202 | fn test_tcp_port_not_a_number() { 203 | let path = "./test_tcp_port_not_a_number.cfg"; 204 | let contents = b"TCP_PORT=abcd\nLOG_DIRECTORY=./log\nDOWNLOAD_DIRECTORY=./download\nPIPELINING_SIZE=5\nREAD_WRITE_SECONDS_TIMEOUT=120\nMAX_PEERS_PER_TORRENT=5\nMAX_LOG_FILE_KB_SIZE=100"; 205 | create_and_write_file(path, contents); 206 | 207 | create_and_assert_config_is_not_ok(path); 208 | } 209 | 210 | #[test] 211 | fn test_read_write_timeout_not_a_number() { 212 | let path = "./test_read_write_timeout_not_a_number.cfg"; 213 | let contents = b"TCP_PORT=1000\nLOG_DIRECTORY=./log\nDOWNLOAD_DIRECTORY=./download\nPIPELINING_SIZE=5\nREAD_WRITE_SECONDS_TIMEOUT=2segundos\nMAX_PEERS_PER_TORRENT=5\nMAX_LOG_FILE_KB_SIZE=100"; 214 | create_and_write_file(path, contents); 215 | 216 | create_and_assert_config_is_not_ok(path); 217 | } 218 | 219 | #[test] 220 | fn test_pipelining_not_a_number() { 221 | let path = "./test_pipelining_not_a_number.cfg"; 222 | let contents = b"TCP_PORT=1000\nLOG_DIRECTORY=./log\nDOWNLOAD_DIRECTORY=./download\nPIPELINING_SIZE=muy_grande\nREAD_WRITE_SECONDS_TIMEOUT=120\nMAX_PEERS_PER_TORRENT=5\nMAX_LOG_FILE_KB_SIZE=100"; 223 | create_and_write_file(path, contents); 224 | 225 | create_and_assert_config_is_not_ok(path); 226 | } 227 | 228 | #[test] 229 | fn test_max_peers_not_a_number() { 230 | let path = "./test_max_peers_not_a_number.cfg"; 231 | let contents = b"TCP_PORT=1000\nLOG_DIRECTORY=./log\nDOWNLOAD_DIRECTORY=./download\nPIPELINING_SIZE=5\nREAD_WRITE_SECODS_TIMEOUT=120\nMAX_PEERS_PER_TORRENT=un_millon\nMAX_LOG_FILE_KB_SIZE=100"; 232 | create_and_write_file(path, contents); 233 | 234 | create_and_assert_config_is_not_ok(path); 235 | } 236 | 237 | #[test] 238 | fn test_max_log_file_size() { 239 | let path = "./test_max_log_file_size.cfg"; 240 | let contents = b"TCP_PORT=1000\nLOG_DIRECTORY=./log\nDOWNLOAD_DIRECTORY=./download\nPIPELINING_SIZE=5\nREAD_WRITE_SECONDS_TIMEOUT=120\nMAX_PEERS_PER_TORRENT=100\nMAX_LOG_FILE_KB_SIZE=abc"; 241 | create_and_write_file(path, contents); 242 | 243 | create_and_assert_config_is_not_ok(path); 244 | } 245 | 246 | #[test] 247 | fn test_order_doesnt_matter() { 248 | let path = "./test_order_doesnt_matter.cfg"; 249 | let contents = b"LOG_DIRECTORY=./log2\nDOWNLOAD_DIRECTORY=./download2\nTCP_PORT=2500\nREAD_WRITE_SECONDS_TIMEOUT=10\nMAX_PEERS_PER_TORRENT=1\nPIPELINING_SIZE=10\nMAX_LOG_FILE_KB_SIZE=100"; 250 | create_and_write_file(path, contents); 251 | 252 | create_and_assert_config_is_ok(path, 2500, "./log2", "./download2", 10, 10, 1, 100); 253 | } 254 | 255 | #[test] 256 | fn test_bad_format() { 257 | let path = "./test_bad_format.cfg"; 258 | let contents = b"TCP_PORT=abcd=1234\nLOG_DIRECTORY=./log\nDOWNLOAD_DIRECTORY=./download\nPIPELINING_SIZE=5\nREAD_WRITE_SECONDS_TIMEOUT=120\nMAX_PEERS_PER_TORRENT=5"; 259 | create_and_write_file(path, contents); 260 | 261 | create_and_assert_config_is_not_ok(path); 262 | } 263 | 264 | // Auxiliary functions 265 | 266 | fn create_and_write_file(path: &str, contents: &[u8]) -> () { 267 | let mut file = 268 | File::create(path).expect(&format!("Error creating file in path: {}", &path)); 269 | file.write_all(contents) 270 | .expect(&format!("Error writing file in path: {}", &path)); 271 | } 272 | 273 | fn create_and_assert_config_is_ok( 274 | path: &str, 275 | tcp_port: u16, 276 | log_directory: &str, 277 | download_directory: &str, 278 | pipelining_size: u32, 279 | read_write_timeout: u64, 280 | max_peers_per_torrent: u32, 281 | max_log_file_size: u32, 282 | ) { 283 | let config = Cfg::new(path); 284 | 285 | assert!(config.is_ok()); 286 | 287 | let config = config.expect(&format!("Error creating config in path: {}", &path)); 288 | 289 | assert_eq!(config.tcp_port, tcp_port); 290 | assert_eq!(config.log_directory, log_directory); 291 | assert_eq!(config.download_directory, download_directory); 292 | assert_eq!(config.pipelining_size, pipelining_size); 293 | assert_eq!(config.read_write_seconds_timeout, read_write_timeout); 294 | assert_eq!(config.max_peers_per_torrent, max_peers_per_torrent); 295 | assert_eq!(config.max_log_file_kb_size, max_log_file_size); 296 | 297 | fs::remove_file(path).expect(&format!("Error removing file in path: {}", &path)); 298 | } 299 | 300 | fn create_and_assert_config_is_not_ok(path: &str) { 301 | let config = Cfg::new(path); 302 | assert!(config.is_err()); 303 | fs::remove_file(path).expect(&format!("Error removing file in path: {}", &path)); 304 | } 305 | } 306 | -------------------------------------------------------------------------------- /dtorrent/src/config/constants.rs: -------------------------------------------------------------------------------- 1 | pub const TCP_PORT: &str = "TCP_PORT"; 2 | pub const LOG_DIRECTORY: &str = "LOG_DIRECTORY"; 3 | pub const DOWNLOAD_DIRECTORY: &str = "DOWNLOAD_DIRECTORY"; 4 | pub const PIPELINING_SIZE: &str = "PIPELINING_SIZE"; 5 | pub const READ_WRITE_SECONDS_TIMEOUT: &str = "READ_WRITE_SECONDS_TIMEOUT"; 6 | pub const MAX_PEERS_PER_TORRENT: &str = "MAX_PEERS_PER_TORRENT"; 7 | pub const MAX_LOG_FILE_KB_SIZE: &str = "MAX_LOG_FILE_KB_SIZE"; 8 | 9 | pub const MIN_SETTINGS: i8 = 7; 10 | -------------------------------------------------------------------------------- /dtorrent/src/config/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cfg; 2 | pub mod constants; 3 | -------------------------------------------------------------------------------- /dtorrent/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod bt_server; 2 | pub mod config; 3 | pub mod peer; 4 | pub mod storage_manager; 5 | pub mod torrent_handler; 6 | pub mod torrent_parser; 7 | pub mod tracker; 8 | -------------------------------------------------------------------------------- /dtorrent/src/main.rs: -------------------------------------------------------------------------------- 1 | use clap::Parser; 2 | use dtorrent::{ 3 | bt_server::server::BtServer, config::cfg::Cfg, torrent_handler::status::AtomicTorrentStatus, 4 | torrent_parser::parser::TorrentParser, 5 | }; 6 | use std::collections::HashMap; 7 | use std::env; 8 | use std::path::PathBuf; 9 | use std::sync::Arc; 10 | use tracing::info; 11 | 12 | #[derive(Parser, Debug)] 13 | struct Args { 14 | #[arg(short, long)] 15 | file: String, 16 | #[arg(short, long)] 17 | config: String, 18 | } 19 | 20 | #[tokio::main] 21 | async fn main() { 22 | 23 | // Reads the filepath from the command line argument (Check README) 24 | let args = Args::parse(); 25 | let file_path = PathBuf::from(args.file.trim()); 26 | let config_path = args.config.trim(); 27 | 28 | // install global collector configured based on RUST_LOG env var. 29 | tracing_subscriber::fmt::init(); 30 | 31 | // Initializes the server 32 | let parsed = TorrentParser::parse(&file_path).expect("parser could not find the file"); 33 | let config = Cfg::new(&config_path).expect("Config file not found or incomplete"); 34 | let (status, _status_reciever) = AtomicTorrentStatus::new(&parsed, config.clone()); 35 | let mut torrent_with_status = HashMap::new(); 36 | torrent_with_status.insert(parsed, Arc::new(status)); 37 | let client_peer_id = "client_peer_id".to_string(); 38 | let mut server = BtServer::new(torrent_with_status, config, client_peer_id); 39 | info!("Initializing server ..."); 40 | server.init().expect("Failed to initialize server"); 41 | } 42 | -------------------------------------------------------------------------------- /dtorrent/src/peer/bt_peer.rs: -------------------------------------------------------------------------------- 1 | use bencoder::bencode::Bencode; 2 | use std::io::Read; 3 | use std::io::Write; 4 | use std::net::TcpStream; 5 | 6 | use super::handshake::Handshake; 7 | 8 | /// `BtPeer` struct containing individual BtPeer information. 9 | /// 10 | /// To create a new `BtPeer` use the method builder `from()`. 11 | #[derive(Debug, Clone)] 12 | pub struct BtPeer { 13 | pub peer_id: Option>, 14 | pub ip: String, 15 | pub port: i64, 16 | pub info_hash: Option>, 17 | } 18 | 19 | impl PartialEq for BtPeer { 20 | fn eq(&self, other: &Self) -> bool { 21 | self.ip == other.ip && self.port == other.port 22 | } 23 | } 24 | 25 | impl Eq for BtPeer {} 26 | 27 | impl std::hash::Hash for BtPeer { 28 | fn hash(&self, state: &mut H) { 29 | self.ip.hash(state); 30 | self.port.hash(state); 31 | } 32 | } 33 | 34 | /// Posible `BtPeer` errors 35 | #[derive(Debug)] 36 | pub enum BtPeerError { 37 | InvalidPeerId, 38 | InvalidIp, 39 | InvalidPort, 40 | NotADict, 41 | HandshakeError, 42 | } 43 | 44 | impl BtPeer { 45 | /// Builds a new `BtPeer` decoding a bencoded Vec cointaining the BtPeer information. 46 | pub fn new(ip: String, port: i64) -> Self { 47 | Self { 48 | peer_id: None, 49 | ip, 50 | port, 51 | info_hash: None, 52 | } 53 | } 54 | 55 | /// Builds a new `BtPeer` from a bencoded peer from the tracker response peer list. 56 | /// 57 | /// 58 | /// It returns an `BtPeerError` if: 59 | /// - The peer ID is invalid. 60 | /// - The peer IP is invalid. 61 | /// - The peer Port is invalid. 62 | /// - The bencoded peer is not a Dict. 63 | pub fn from(bencode: Bencode) -> Result { 64 | let mut peer_id: Vec = Vec::new(); 65 | let mut ip: String = String::new(); 66 | let mut port: i64 = 0; 67 | 68 | let d = match bencode { 69 | Bencode::BDict(d) => d, 70 | _ => return Err(BtPeerError::NotADict), 71 | }; 72 | 73 | for (k, v) in d.iter() { 74 | if k == b"peer id" { 75 | peer_id = Self::create_peer_id(v)?; 76 | } else if k == b"ip" { 77 | ip = Self::create_ip(v)?; 78 | } else if k == b"port" { 79 | port = Self::create_port(v)?; 80 | } 81 | } 82 | 83 | Ok(BtPeer { 84 | peer_id: Some(peer_id), 85 | ip, 86 | port, 87 | info_hash: None, 88 | }) 89 | } 90 | 91 | fn create_peer_id(bencode: &Bencode) -> Result, BtPeerError> { 92 | let peer_id = match bencode { 93 | Bencode::BString(s) => s.clone(), 94 | _ => return Err(BtPeerError::InvalidPeerId), 95 | }; 96 | 97 | Ok(peer_id) 98 | } 99 | 100 | fn create_ip(bencode: &Bencode) -> Result { 101 | let ip = match bencode { 102 | Bencode::BString(s) => s, 103 | _ => return Err(BtPeerError::InvalidIp), 104 | }; 105 | 106 | let ip = match String::from_utf8(ip.to_vec()) { 107 | Ok(s) => s, 108 | Err(_) => return Err(BtPeerError::InvalidIp), 109 | }; 110 | 111 | Ok(ip) 112 | } 113 | 114 | fn create_port(bencode: &Bencode) -> Result { 115 | let port = match bencode { 116 | Bencode::BNumber(n) => *n, 117 | _ => return Err(BtPeerError::InvalidPort), 118 | }; 119 | 120 | Ok(port) 121 | } 122 | 123 | /// Reads a handshake from the peer and returns the info hash. 124 | /// 125 | /// It returns an error if the handshake could not be read or the handshake was not successful. 126 | pub fn receive_handshake(&mut self, stream: &mut TcpStream) -> Result, BtPeerError> { 127 | let mut buffer = [0; 68]; 128 | stream 129 | .read_exact(&mut buffer) 130 | .map_err(|_| BtPeerError::HandshakeError)?; 131 | 132 | let handshake = Handshake::from_bytes(&buffer).map_err(|_| BtPeerError::HandshakeError)?; 133 | 134 | self.info_hash = Some(handshake.info_hash.clone()); 135 | self.peer_id = Some(handshake.peer_id); 136 | 137 | Ok(handshake.info_hash) 138 | } 139 | 140 | /// Sends a handshake to the peer. 141 | /// 142 | /// It returns an error if the handshake could not be sent or the handshake was not successful. 143 | pub fn send_handshake( 144 | &mut self, 145 | stream: &mut TcpStream, 146 | info_hash: Vec, 147 | client_peer_id: String, 148 | ) -> Result<(), BtPeerError> { 149 | let handshake = Handshake::new(info_hash, client_peer_id.as_bytes().to_vec()); 150 | stream 151 | .write_all(&handshake.as_bytes()) 152 | .map_err(|_| BtPeerError::HandshakeError)?; 153 | Ok(()) 154 | } 155 | } 156 | 157 | #[cfg(test)] 158 | mod tests { 159 | use super::*; 160 | use std::collections::BTreeMap; 161 | 162 | #[test] 163 | fn test_from_bt_peer() { 164 | let mut dict = BTreeMap::new(); 165 | dict.insert(b"peer id".to_vec(), Bencode::BString(b"peer id".to_vec())); 166 | dict.insert(b"ip".to_vec(), Bencode::BString(b"127.0.0.1".to_vec())); 167 | dict.insert(b"port".to_vec(), Bencode::BNumber(6868)); 168 | 169 | let bencode = Bencode::BDict(dict); 170 | 171 | let bt_peer = BtPeer::from(bencode).unwrap(); 172 | 173 | assert_eq!(bt_peer.peer_id, Some(b"peer id".to_vec())); 174 | assert_eq!(bt_peer.ip, "127.0.0.1"); 175 | assert_eq!(bt_peer.port, 6868); 176 | } 177 | 178 | #[test] 179 | fn test_new_peer() { 180 | let bt_peer = BtPeer::new("127.0.0.1".to_string(), 6868); 181 | 182 | assert_eq!(bt_peer.peer_id, None); 183 | assert_eq!(bt_peer.ip, "127.0.0.1"); 184 | assert_eq!(bt_peer.port, 6868); 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /dtorrent/src/peer/handshake.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug)] 2 | pub enum FromHandshakeError { 3 | InvalidHandshake, 4 | } 5 | 6 | /// Represents a handshake message. 7 | /// Is the first message sent to start a connection with a peer. 8 | #[derive(Debug)] 9 | pub struct Handshake { 10 | pub pstrlen: u8, 11 | pub pstr: String, 12 | pub reserved: [u8; 8], 13 | pub info_hash: Vec, 14 | pub peer_id: Vec, 15 | } 16 | 17 | const PSTR: &str = "BitTorrent protocol"; 18 | 19 | impl Handshake { 20 | /// Creates a new `Handshake` message. 21 | pub fn new(info_hash: Vec, peer_id: Vec) -> Self { 22 | Self { 23 | pstrlen: 19, 24 | pstr: PSTR.to_string(), 25 | reserved: [0; 8], 26 | info_hash, 27 | peer_id, 28 | } 29 | } 30 | 31 | /// Converts a `Handshake` message to a byte array. 32 | pub fn as_bytes(&self) -> Vec { 33 | let mut bytes = vec![self.pstrlen]; 34 | bytes.extend(self.pstr.as_bytes()); 35 | bytes.extend(&self.reserved); 36 | bytes.extend(&self.info_hash); 37 | bytes.extend(&self.peer_id); 38 | bytes 39 | } 40 | 41 | /// Parses a byte array into a `Handshake` message. 42 | pub fn from_bytes(bytes: &[u8]) -> Result { 43 | if bytes.len() != 68 { 44 | return Err(FromHandshakeError::InvalidHandshake); 45 | } 46 | 47 | let pstrlen = bytes[0]; 48 | if pstrlen != 19 { 49 | return Err(FromHandshakeError::InvalidHandshake); 50 | } 51 | 52 | let pstr = String::from_utf8(bytes[1..pstrlen as usize + 1].to_vec()) 53 | .map_err(|_| FromHandshakeError::InvalidHandshake)?; 54 | let reserved = &bytes[pstrlen as usize + 1..pstrlen as usize + 9]; 55 | let info_hash = &bytes[pstrlen as usize + 9..pstrlen as usize + 29]; 56 | let peer_id = &bytes[pstrlen as usize + 29..]; 57 | 58 | Ok(Self { 59 | pstrlen, 60 | pstr, 61 | reserved: [ 62 | reserved[0], 63 | reserved[1], 64 | reserved[2], 65 | reserved[3], 66 | reserved[4], 67 | reserved[5], 68 | reserved[6], 69 | reserved[7], 70 | ], 71 | info_hash: info_hash.to_vec(), 72 | peer_id: peer_id.to_vec(), 73 | }) 74 | } 75 | } 76 | 77 | #[cfg(test)] 78 | mod tests { 79 | use super::*; 80 | 81 | #[test] 82 | fn test_as_bytes() { 83 | let expected_handshake_len = 68; 84 | let expected_pstrlen = 19; 85 | let expected_pstr = b"BitTorrent protocol".to_vec(); 86 | let expected_reserved = [0; 8]; 87 | 88 | let info_hash: Vec = (1..=20).collect(); 89 | let peer_id: Vec = (21..=40).collect(); 90 | let handshake = Handshake::new(info_hash.clone(), peer_id.clone()); 91 | 92 | let bytes = handshake.as_bytes(); 93 | 94 | assert_eq!(bytes.len(), expected_handshake_len); 95 | assert_eq!(bytes[0], expected_pstrlen); 96 | assert_eq!(bytes[1..20], expected_pstr); 97 | assert_eq!(bytes[20..28], expected_reserved); 98 | assert_eq!(bytes[28..48], info_hash); 99 | assert_eq!(bytes[48..], peer_id); 100 | } 101 | 102 | #[test] 103 | fn test_from_bytes() { 104 | let expected_pstrlen = 19; 105 | let expected_pstr = "BitTorrent protocol"; 106 | let expected_reserved = [0; 8]; 107 | 108 | let info_hash: Vec = (1..=20).collect(); 109 | let peer_id: Vec = (21..=40).collect(); 110 | let handshake = Handshake::new(info_hash.clone(), peer_id.clone()); 111 | let bytes = handshake.as_bytes(); 112 | 113 | let handshake = Handshake::from_bytes(&bytes).unwrap(); 114 | 115 | assert_eq!(handshake.pstrlen, expected_pstrlen); 116 | assert_eq!(handshake.pstr, expected_pstr); 117 | assert_eq!(handshake.reserved, expected_reserved); 118 | assert_eq!(handshake.info_hash, info_hash); 119 | assert_eq!(handshake.peer_id, peer_id); 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /dtorrent/src/peer/message_handler.rs: -------------------------------------------------------------------------------- 1 | use std::{io::Write, net::TcpStream, sync::Arc}; 2 | use tracing::info; 3 | 4 | use crate::{ 5 | torrent_handler::status::{AtomicTorrentStatus, AtomicTorrentStatusError}, 6 | torrent_parser::torrent::Torrent, 7 | }; 8 | 9 | use super::{ 10 | handshake::Handshake, 11 | peer_message::{Bitfield, Message, MessageId, Request}, 12 | }; 13 | 14 | #[derive(Debug)] 15 | pub enum MessageHandlerError { 16 | ErrorGettingBitfield(AtomicTorrentStatusError), 17 | ErrorGettingPiece(AtomicTorrentStatusError), 18 | HandshakeError, 19 | MessageError(MessageId), 20 | } 21 | 22 | /// Message handler for a peer session. 23 | /// 24 | /// It handles the handshake as well as the sending and receiving of messages from a peer. 25 | pub struct MessageHandler { 26 | torrent: Torrent, 27 | torrent_status: Arc, 28 | client_peer_id: String, 29 | } 30 | 31 | impl MessageHandler { 32 | pub fn new( 33 | torrent: Torrent, 34 | torrent_status: Arc, 35 | client_peer_id: String, 36 | ) -> MessageHandler { 37 | Self { 38 | torrent, 39 | torrent_status, 40 | client_peer_id, 41 | } 42 | } 43 | 44 | /// ------------------------------------------------------------------------------------------------ 45 | /// Receiving messages 46 | 47 | /// Handles a bitfield message received from the peer. 48 | pub fn handle_bitfield(&mut self, message: Message) -> Bitfield { 49 | Bitfield::new(message.payload) 50 | } 51 | 52 | /// Handles a piece message received from the peer. 53 | pub fn handle_piece(&mut self, message: Message) -> Vec { 54 | let block = &message.payload[8..]; 55 | block.to_vec() 56 | } 57 | 58 | // Returns the received piece index 59 | pub fn handle_have(&mut self, message: Message) -> u32 { 60 | let mut index: [u8; 4] = [0; 4]; 61 | index.copy_from_slice(&message.payload[0..4]); 62 | u32::from_be_bytes(index) 63 | } 64 | 65 | /// ------------------------------------------------------------------------------------------------ 66 | /// Sending messages 67 | 68 | /// Sends a piece message to the peer. 69 | pub fn send_piece( 70 | &mut self, 71 | index: u32, 72 | begin: u32, 73 | block: &[u8], 74 | stream: &mut TcpStream, 75 | ) -> Result<(), MessageHandlerError> { 76 | let mut payload = vec![]; 77 | payload.extend(index.to_be_bytes()); 78 | payload.extend(begin.to_be_bytes()); 79 | payload.extend(block); 80 | 81 | let piece_msg = Message::new(MessageId::Piece, payload); 82 | self.send(stream, piece_msg)?; 83 | 84 | info!("Sent piece: {} / Offset: {}", index, begin); 85 | 86 | Ok(()) 87 | } 88 | 89 | /// Sends a unchoked message to the peer. 90 | pub fn send_unchoked(&mut self, stream: &mut TcpStream) -> Result<(), MessageHandlerError> { 91 | let unchoked_msg = Message::new(MessageId::Unchoke, vec![]); 92 | self.send(stream, unchoked_msg)?; 93 | Ok(()) 94 | } 95 | 96 | /// Sends a bitfield message to the peer. 97 | pub fn send_bitfield(&mut self, stream: &mut TcpStream) -> Result<(), MessageHandlerError> { 98 | let bitfield = self 99 | .torrent_status 100 | .get_bitfield() 101 | .map_err(MessageHandlerError::ErrorGettingBitfield)?; 102 | 103 | let bitfield_msg = Message::new(MessageId::Bitfield, bitfield.get_vec()); 104 | self.send(stream, bitfield_msg)?; 105 | Ok(()) 106 | } 107 | 108 | /// Sends a request message to the peer. 109 | pub fn send_request( 110 | &self, 111 | index: u32, 112 | begin: u32, 113 | length: u32, 114 | stream: &mut TcpStream, 115 | ) -> Result<(), MessageHandlerError> { 116 | let payload = Request::new(index, begin, length).as_bytes(); 117 | 118 | let request_msg = Message::new(MessageId::Request, payload); 119 | self.send(stream, request_msg)?; 120 | Ok(()) 121 | } 122 | 123 | /// Sends an interested message to the peer. 124 | pub fn send_interested(&mut self, stream: &mut TcpStream) -> Result<(), MessageHandlerError> { 125 | let interested_msg = Message::new(MessageId::Interested, vec![]); 126 | self.send(stream, interested_msg)?; 127 | Ok(()) 128 | } 129 | 130 | /// Sends a cancel message to the peer. 131 | pub fn send_cancel( 132 | &mut self, 133 | index: u32, 134 | begin: u32, 135 | length: u32, 136 | stream: &mut TcpStream, 137 | ) -> Result<(), MessageHandlerError> { 138 | let mut payload = vec![]; 139 | payload.extend(index.to_be_bytes()); 140 | payload.extend(begin.to_be_bytes()); 141 | payload.extend(length.to_be_bytes()); 142 | 143 | let cancel_msg = Message::new(MessageId::Cancel, payload); 144 | self.send(stream, cancel_msg)?; 145 | 146 | info!("Cancel piece: {} / Offset: {}", index, begin); 147 | 148 | Ok(()) 149 | } 150 | 151 | pub fn send_have( 152 | &mut self, 153 | index: u32, 154 | stream: &mut TcpStream, 155 | ) -> Result<(), MessageHandlerError> { 156 | let mut payload = vec![]; 157 | payload.extend(index.to_be_bytes()); 158 | 159 | let have_msg = Message::new(MessageId::Have, payload); 160 | self.send(stream, have_msg)?; 161 | 162 | Ok(()) 163 | } 164 | 165 | /// Generic sending function. 166 | fn send(&self, stream: &mut TcpStream, message: Message) -> Result<(), MessageHandlerError> { 167 | stream 168 | .write_all(&message.as_bytes()) 169 | .map_err(|_| MessageHandlerError::MessageError(message.id))?; 170 | Ok(()) 171 | } 172 | 173 | /// ------------------------------------------------------------------------------------------------ 174 | /// Handshake 175 | 176 | /// Sends a handshake to the peer. 177 | /// 178 | /// It returns an error if the handshake could not be sent or the handshake was not successful. 179 | pub fn send_handshake(&mut self, stream: &mut TcpStream) -> Result<(), MessageHandlerError> { 180 | let info_hash = self 181 | .torrent 182 | .get_info_hash_as_bytes() 183 | .map_err(|_| MessageHandlerError::HandshakeError)?; 184 | 185 | let handshake = Handshake::new(info_hash, self.client_peer_id.as_bytes().to_vec()); 186 | stream 187 | .write_all(&handshake.as_bytes()) 188 | .map_err(|_| MessageHandlerError::HandshakeError)?; 189 | Ok(()) 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /dtorrent/src/peer/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod bt_peer; 2 | mod handshake; 3 | mod message_handler; 4 | pub mod peer_message; 5 | pub mod peer_session; 6 | pub mod session_status; 7 | -------------------------------------------------------------------------------- /dtorrent/src/peer/peer_message/bitfield.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use crate::torrent_handler::status::PieceStatus; 4 | 5 | /// Represents a Bitfield. 6 | /// 7 | /// It contains information about the pieces that the peer has. 8 | #[derive(Debug, Clone)] 9 | pub struct Bitfield { 10 | pub bitfield: Vec, 11 | } 12 | 13 | impl Bitfield { 14 | pub fn new(bitfield: Vec) -> Bitfield { 15 | Bitfield { bitfield } 16 | } 17 | 18 | /// Returns whether the bitfield has the piece with the given index. 19 | pub fn has_piece(&self, index: u32) -> bool { 20 | let byte_index = (index / 8) as usize; 21 | let byte = self.bitfield[byte_index]; 22 | 23 | let bit_index = 7 - (index % 8); // Gets the bit index in the byte (from the right) 24 | 25 | // Moves the corresponding bit to the rightmost side of the byte 26 | // and then checks if that last bit is 1 or 0 27 | let bit = (byte >> bit_index) & 1; 28 | bit != 0 29 | } 30 | 31 | // Returns whether the bitfield has all the pieces. 32 | pub fn is_complete(&self) -> bool { 33 | self.bitfield.iter().all(|byte| *byte == 0b1111_1111) 34 | } 35 | 36 | /// Creates a bitfield from pieces status 37 | pub fn from(pieces_status: &HashMap) -> Bitfield { 38 | let bytes_count = (pieces_status.len() + 7) / 8; 39 | let mut bitfield = vec![0; bytes_count]; 40 | 41 | for (piece_index, status) in pieces_status { 42 | if status == &PieceStatus::Finished { 43 | let byte_index = (piece_index / 8) as usize; 44 | let byte = bitfield[byte_index]; 45 | 46 | let bit_index = 7 - (piece_index % 8); // Gets the bit index in the byte (from the right) 47 | let bit = 1 << bit_index; // Shifts 1 to the left bit_index times 48 | 49 | bitfield[byte_index] = byte | bit; 50 | } 51 | } 52 | 53 | Self::new(bitfield) 54 | } 55 | 56 | /// Returns the indices difference between two bitfields of the same size. 57 | pub fn diff(&self, other: &Bitfield) -> Vec { 58 | let mut diff = vec![]; 59 | 60 | for (index, byte) in self.bitfield.iter().enumerate() { 61 | let other_byte = other.bitfield[index]; 62 | 63 | for bit_index in 0..8 { 64 | let bit = 1 << (7 - bit_index); 65 | let our_bit = (byte & bit) != 0; 66 | let other_bit = (other_byte & bit) != 0; 67 | 68 | if our_bit != other_bit { 69 | diff.push(index * 8 + bit_index as usize); 70 | } 71 | } 72 | } 73 | diff 74 | } 75 | 76 | /// Sets the indexth bit to the given value. 77 | pub fn set_bit(&mut self, index: u32, value: bool) { 78 | let byte_index = (index / 8) as usize; 79 | let byte = self.bitfield[byte_index]; 80 | 81 | let bit_index = 7 - (index % 8); // Gets the bit index in the byte (from the right) 82 | let bit = 1 << bit_index; // Shifts 1 to the left bit_index times 83 | 84 | if value { 85 | self.bitfield[byte_index] = byte | bit; 86 | } else { 87 | self.bitfield[byte_index] = byte & !bit; 88 | } 89 | } 90 | 91 | pub fn get_vec(&self) -> Vec { 92 | self.bitfield.clone() 93 | } 94 | } 95 | 96 | #[cfg(test)] 97 | mod tests { 98 | use super::*; 99 | 100 | #[test] 101 | fn test_bitfield_has_all_pieces() { 102 | let bitfield = Bitfield::new(vec![0b11111111, 0b11111111, 0b11111111, 0b11111111]); 103 | 104 | assert!(bitfield.has_piece(4)); 105 | } 106 | 107 | #[test] 108 | fn test_bitfield_has_one_piece() { 109 | let bitfield = Bitfield::new(vec![0b00000000, 0b00000010, 0b00000000, 0b00000000]); 110 | 111 | assert!(bitfield.has_piece(14)); 112 | } 113 | 114 | #[test] 115 | fn test_bitfield_not_has_piece() { 116 | let bitfield = Bitfield::new(vec![0b11111111, 0b11111111, 0b11111101, 0b11111111]); 117 | 118 | assert!(!bitfield.has_piece(22)); 119 | } 120 | 121 | #[test] 122 | fn test_bitfield_from_one_piece_finished() { 123 | let mut pieces_status = HashMap::new(); 124 | for i in 0..8 { 125 | pieces_status.insert(i, PieceStatus::Free); 126 | } 127 | 128 | pieces_status.insert(0, PieceStatus::Finished); 129 | 130 | let bitfield = Bitfield::from(&pieces_status); 131 | 132 | assert_eq!(bitfield.get_vec(), vec![0b1000_0000]); 133 | } 134 | 135 | #[test] 136 | fn test_bitfield_from_one_piece_finished_in_the_middle() { 137 | let mut pieces_status = HashMap::new(); 138 | for i in 0..8 { 139 | pieces_status.insert(i, PieceStatus::Free); 140 | } 141 | 142 | pieces_status.insert(3, PieceStatus::Finished); 143 | 144 | let bitfield = Bitfield::from(&pieces_status); 145 | 146 | assert_eq!(bitfield.get_vec(), vec![0b0001_0000]); 147 | } 148 | 149 | #[test] 150 | fn test_bitfield_from_all_pieces_finished() { 151 | let mut pieces_status = HashMap::new(); 152 | for i in 0..8 { 153 | pieces_status.insert(i, PieceStatus::Finished); 154 | } 155 | 156 | let bitfield = Bitfield::from(&pieces_status); 157 | 158 | assert_eq!(bitfield.get_vec(), vec![0b1111_1111]); 159 | } 160 | 161 | #[test] 162 | fn test_from_two_bytes() { 163 | let mut pieces_status = HashMap::new(); 164 | for i in 0..9 { 165 | pieces_status.insert(i, PieceStatus::Finished); 166 | } 167 | 168 | let bitfield = Bitfield::from(&pieces_status); 169 | 170 | assert_eq!(bitfield.get_vec(), vec![0b1111_1111, 0b1000_0000]); 171 | } 172 | 173 | #[test] 174 | fn test_from_two_bytes_complete() { 175 | let mut pieces_status = HashMap::new(); 176 | for i in 0..16 { 177 | pieces_status.insert(i, PieceStatus::Finished); 178 | } 179 | 180 | let bitfield = Bitfield::from(&pieces_status); 181 | 182 | assert_eq!(bitfield.get_vec(), vec![0b1111_1111, 0b1111_1111]); 183 | } 184 | 185 | #[test] 186 | fn test_diff() { 187 | let bitfield1 = Bitfield::new(vec![0b11111100, 0b11111111]); 188 | let bitfield2 = Bitfield::new(vec![0b00011100, 0b00111111]); 189 | 190 | assert_eq!(bitfield2.diff(&bitfield1), vec![0, 1, 2, 8, 9]); 191 | } 192 | 193 | #[test] 194 | fn test_equal_diff() { 195 | let bitfield1 = Bitfield::new(vec![0b11111100, 0b11111111]); 196 | let bitfield2 = Bitfield::new(vec![0b11111100, 0b11111111]); 197 | 198 | assert_eq!(bitfield2.diff(&bitfield1), vec![]); 199 | } 200 | 201 | #[test] 202 | fn test_set_bit_true() { 203 | let mut bitfield = Bitfield::new(vec![0b00000000]); 204 | bitfield.set_bit(0, true); 205 | 206 | assert_eq!(bitfield.get_vec(), vec![0b10000000]); 207 | } 208 | 209 | #[test] 210 | fn test_set_bit_false() { 211 | let mut bitfield = Bitfield::new(vec![0b11000000]); 212 | bitfield.set_bit(1, false); 213 | 214 | assert_eq!(bitfield.get_vec(), vec![0b10000000]); 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /dtorrent/src/peer/peer_message/message.rs: -------------------------------------------------------------------------------- 1 | // IDs of the messages defined in the protocol. 2 | #[derive(PartialEq, Debug, Clone)] 3 | pub enum MessageId { 4 | KeepAlive = -1, 5 | Choke = 0, 6 | Unchoke = 1, 7 | Interested = 2, 8 | NotInterested = 3, 9 | Have = 4, 10 | Bitfield = 5, 11 | Request = 6, 12 | Piece = 7, 13 | Cancel = 8, 14 | Port = 9, 15 | } 16 | 17 | /// The message that is sent to the peer. 18 | /// 19 | /// It contains the message ID and the payload. 20 | #[derive(Debug)] 21 | pub struct Message { 22 | pub id: MessageId, 23 | pub payload: Vec, 24 | } 25 | 26 | #[derive(Debug)] 27 | pub enum MessageError { 28 | InvalidMessage, 29 | } 30 | 31 | impl Message { 32 | /// Creates a new `Message` from a message ID and a payload. 33 | pub fn new(id: MessageId, payload: Vec) -> Self { 34 | Self { id, payload } 35 | } 36 | 37 | /// Parses a byte array into a `Message`. 38 | pub fn from_bytes(payload: &[u8]) -> Result { 39 | let id = match payload[0] { 40 | 0 => MessageId::Choke, 41 | 1 => MessageId::Unchoke, 42 | 2 => MessageId::Interested, 43 | 3 => MessageId::NotInterested, 44 | 4 => MessageId::Have, 45 | 5 => MessageId::Bitfield, 46 | 6 => MessageId::Request, 47 | 7 => MessageId::Piece, 48 | 8 => MessageId::Cancel, 49 | 9 => MessageId::Port, 50 | _ => return Err(MessageError::InvalidMessage), 51 | }; 52 | 53 | let msg_payload = if payload.len() > 1 { 54 | payload[1..].to_vec() 55 | } else { 56 | vec![] 57 | }; 58 | 59 | Ok(Self { 60 | id, 61 | payload: msg_payload, 62 | }) 63 | } 64 | 65 | /// Converts a `Message` to a byte array. 66 | pub fn as_bytes(&self) -> Vec { 67 | let len = self.payload.len() + 1; 68 | let len_bytes: [u8; 4] = (len as u32).to_be_bytes(); 69 | let mut bytes = vec![0; 4 + len]; 70 | bytes[0..4].copy_from_slice(&len_bytes); 71 | bytes[4] = self.id.clone() as u8; 72 | bytes[5..].copy_from_slice(&self.payload); 73 | bytes 74 | } 75 | } 76 | 77 | #[cfg(test)] 78 | mod tests { 79 | use super::*; 80 | 81 | #[test] 82 | fn test_message_unchoke_from_bytes() { 83 | let payload = 1u8.to_be_bytes(); 84 | let msg = Message::from_bytes(&payload).unwrap(); 85 | 86 | assert_eq!(msg.id, MessageId::Unchoke); 87 | assert_eq!(msg.payload, vec![]); 88 | } 89 | 90 | #[test] 91 | fn test_message_interested_from_bytes() { 92 | let payload = 2u8.to_be_bytes(); 93 | let msg = Message::from_bytes(&payload).unwrap(); 94 | 95 | assert_eq!(msg.id, MessageId::Interested); 96 | assert_eq!(msg.payload, vec![]); 97 | } 98 | 99 | #[test] 100 | fn test_message_request_as_bytes() { 101 | let index = 0u32.to_be_bytes(); 102 | let begin = 0u32.to_be_bytes(); 103 | let length = 16384u32.to_be_bytes(); 104 | let payload = [index, begin, length].concat(); 105 | let msg = Message::new(MessageId::Request, payload.clone()); 106 | 107 | let bytes = msg.as_bytes(); 108 | 109 | let len = 13u32.to_be_bytes(); 110 | let msg_type = 6u8.to_be_bytes(); 111 | let mut expected = vec![]; 112 | expected.extend(&len); 113 | expected.extend(&msg_type); 114 | expected.extend(&payload); 115 | 116 | assert_eq!(bytes, expected); 117 | } 118 | 119 | #[test] 120 | fn test_message_interested_as_bytes() { 121 | let msg = Message::new(MessageId::Interested, vec![]); 122 | 123 | let bytes = msg.as_bytes(); 124 | 125 | let len = 1u32.to_be_bytes(); 126 | let msg_type = 2u8.to_be_bytes(); 127 | let mut expected = vec![]; 128 | expected.extend(&len); 129 | expected.extend(&msg_type); 130 | 131 | assert_eq!(bytes, expected); 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /dtorrent/src/peer/peer_message/mod.rs: -------------------------------------------------------------------------------- 1 | mod bitfield; 2 | mod message; 3 | mod request; 4 | 5 | pub use self::bitfield::*; 6 | pub use self::message::*; 7 | pub use self::request::*; 8 | -------------------------------------------------------------------------------- /dtorrent/src/peer/peer_message/request.rs: -------------------------------------------------------------------------------- 1 | /// Represents the payload of a Request message. 2 | #[derive(Debug)] 3 | pub struct Request { 4 | index: u32, 5 | begin: u32, 6 | length: u32, 7 | } 8 | 9 | impl Request { 10 | /// Creates a new `Request` message. 11 | pub fn new(index: u32, begin: u32, length: u32) -> Self { 12 | Self { 13 | index, 14 | begin, 15 | length, 16 | } 17 | } 18 | 19 | /// Converts a `Request` message to a byte array. 20 | pub fn as_bytes(&self) -> Vec { 21 | let mut bytes = vec![0; 12]; 22 | bytes[0..4].copy_from_slice(&self.index.to_be_bytes()); 23 | bytes[4..8].copy_from_slice(&self.begin.to_be_bytes()); 24 | bytes[8..12].copy_from_slice(&self.length.to_be_bytes()); 25 | bytes 26 | } 27 | } 28 | 29 | #[cfg(test)] 30 | mod tests { 31 | use super::*; 32 | 33 | #[test] 34 | fn test_request_as_bytes() { 35 | let index = 0u32; 36 | let begin = 0u32; 37 | let length = 16384u32; 38 | let request = Request::new(index, begin, length); 39 | 40 | let bytes = request.as_bytes(); 41 | 42 | let mut expected = vec![]; 43 | expected.extend(&index.to_be_bytes()); 44 | expected.extend(&begin.to_be_bytes()); 45 | expected.extend(&length.to_be_bytes()); 46 | 47 | assert_eq!(bytes, expected); 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /dtorrent/src/peer/session_status.rs: -------------------------------------------------------------------------------- 1 | use super::peer_message::Bitfield; 2 | 3 | /// Represents our status in the peer session. 4 | #[derive(Debug, Clone)] 5 | pub struct SessionStatus { 6 | /// We are choked 7 | pub choked: bool, 8 | /// We are interested 9 | pub interested: bool, 10 | /// The other peer is choked by us 11 | pub peer_choked: bool, 12 | /// The other peer is interested in us 13 | pub peer_interested: bool, 14 | pub bitfield: Bitfield, 15 | pub download_speed: f64, 16 | pub upload_speed: f64, 17 | } 18 | 19 | impl SessionStatus { 20 | pub fn new(bitfield: Bitfield) -> Self { 21 | Self { 22 | choked: true, 23 | interested: false, 24 | peer_choked: true, 25 | peer_interested: false, 26 | bitfield, 27 | download_speed: 0.0, 28 | upload_speed: 0.0, 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /dtorrent/src/storage_manager/manager.rs: -------------------------------------------------------------------------------- 1 | use crate::config::cfg::Cfg; 2 | use std::fs::{self, File, OpenOptions}; 3 | use std::io::{Read, Seek, SeekFrom, Write}; 4 | use std::path::Path; 5 | 6 | trait WriteWithOffset { 7 | fn write_all_at(&mut self, buf: &[u8], offset: u64) -> Result<(), std::io::Error>; 8 | } 9 | 10 | impl WriteWithOffset for File { 11 | fn write_all_at(&mut self, buf: &[u8], offset: u64) -> Result<(), std::io::Error> { 12 | self.seek(SeekFrom::Start(offset))?; 13 | self.write_all(buf) 14 | } 15 | } 16 | 17 | trait ReadWithOffset { 18 | fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> Result<(), std::io::Error>; 19 | } 20 | 21 | impl ReadWithOffset for File { 22 | fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> Result<(), std::io::Error> { 23 | self.seek(SeekFrom::Start(offset))?; 24 | self.read_exact(buf) 25 | } 26 | } 27 | 28 | pub fn save_piece( 29 | name: String, 30 | piece: &[u8], 31 | piece_offset: u64, 32 | config: Cfg, 33 | ) -> Result<(), std::io::Error> { 34 | let save_directory = config.download_directory; 35 | if !Path::new(&save_directory).exists() { 36 | fs::create_dir_all(save_directory.clone())?; 37 | } 38 | let mut file = OpenOptions::new() 39 | .read(true) 40 | .write(true) 41 | .create(true) 42 | .open(save_directory + "/" + &name)?; 43 | 44 | file.write_all_at(piece, piece_offset)?; 45 | 46 | Ok(()) 47 | } 48 | 49 | /// Retrieves a block of data from a file at a given offset. 50 | /// 51 | /// # Arguments 52 | /// * `filename` - the name of the file to retrieve the data from. 53 | /// * `offset` - integer specifying the offset in bytes from the start of the file 54 | /// * `length` - integer specifying the requested length 55 | /// * `config` - the configuration of the application 56 | pub fn retrieve_block( 57 | filename: String, 58 | offset: u64, 59 | length: usize, 60 | config: Cfg, 61 | ) -> Result, std::io::Error> { 62 | let file_directory = config.download_directory; 63 | 64 | let mut file = OpenOptions::new() 65 | .read(true) 66 | .open(file_directory + "/" + &filename)?; 67 | 68 | let mut buffer = vec![0; length]; 69 | file.read_exact_at(&mut buffer, offset)?; 70 | 71 | Ok(buffer) 72 | } 73 | 74 | #[cfg(test)] 75 | mod tests { 76 | use std::fs::File; 77 | use std::io::Write; 78 | use std::path::Path; 79 | 80 | use super::*; 81 | 82 | const CONFIG_PATH: &str = "config.cfg"; 83 | 84 | #[test] 85 | fn retrieve_block_with_offset_zero() { 86 | let config = Cfg::new(CONFIG_PATH).unwrap(); 87 | 88 | let filename = "test_retrieve_block_01.txt"; 89 | let filepath = format!("{}/{}", config.download_directory, filename); 90 | let contents = "Hello, world!".as_bytes(); 91 | create_and_write_file(&config, filepath.as_str(), contents); 92 | 93 | let offset = 0; 94 | let length = 5; 95 | 96 | let block = retrieve_block(String::from(filename), offset, length, config) 97 | .map_err(|err| { 98 | fs::remove_file(&filepath).unwrap(); 99 | err 100 | }) 101 | .unwrap(); 102 | 103 | fs::remove_file(filepath).unwrap(); 104 | 105 | assert_eq!(5, block.len()); 106 | assert_eq!("Hello".as_bytes(), &block[..]); 107 | } 108 | 109 | #[test] 110 | fn retrieve_block_with_offset_in_the_middle() { 111 | let config = Cfg::new(CONFIG_PATH).unwrap(); 112 | 113 | let filename = "test_retrieve_block_02.txt"; 114 | let filepath = format!("{}/{}", config.download_directory, filename); 115 | let contents = "Hello, world!".as_bytes(); 116 | create_and_write_file(&config, filepath.as_str(), contents); 117 | 118 | let offset = 4; 119 | let length = 7; 120 | 121 | let block = retrieve_block(String::from(filename), offset, length, config) 122 | .map_err(|err| { 123 | fs::remove_file(&filepath).unwrap(); 124 | err 125 | }) 126 | .unwrap(); 127 | 128 | fs::remove_file(filepath).unwrap(); 129 | 130 | assert_eq!(7, block.len()); 131 | assert_eq!("o, worl".as_bytes(), &block[..]); 132 | } 133 | 134 | #[test] 135 | fn retrieve_block_with_offset_zero_and_length_equal_to_length_of_file() { 136 | let config = Cfg::new(CONFIG_PATH).unwrap(); 137 | 138 | let filename = "test_retrieve_block_03.txt"; 139 | let filepath = format!("{}/{}", config.download_directory, filename); 140 | let contents = "Hello, world!".as_bytes(); 141 | create_and_write_file(&config, filepath.as_str(), contents); 142 | 143 | let offset = 0; 144 | let length = contents.len(); 145 | 146 | let block = retrieve_block(String::from(filename), offset, length, config) 147 | .map_err(|err| { 148 | fs::remove_file(&filepath).unwrap(); 149 | err 150 | }) 151 | .unwrap(); 152 | 153 | fs::remove_file(filepath).unwrap(); 154 | 155 | assert_eq!(length, block.len()); 156 | assert_eq!("Hello, world!".as_bytes(), &block[..]); 157 | } 158 | 159 | #[test] 160 | fn retrieve_block_with_offset_zero_and_length_more_than_file_length() { 161 | let config = Cfg::new(CONFIG_PATH).unwrap(); 162 | 163 | let filename = "test_retrieve_block_04.txt"; 164 | let filepath = format!("{}/{}", config.download_directory, filename); 165 | let contents = "Hello, world!".as_bytes(); 166 | create_and_write_file(&config, filepath.as_str(), contents); 167 | 168 | let offset = 0; 169 | let length = contents.len() + 1; 170 | 171 | let io_error = retrieve_block(String::from(filename), offset, length, config).unwrap_err(); 172 | 173 | fs::remove_file(filepath).unwrap(); 174 | 175 | assert_eq!(io_error.kind(), std::io::ErrorKind::UnexpectedEof); 176 | } 177 | 178 | #[test] 179 | fn retrieve_block_with_offset_in_middle_and_length_more_than_file_length() { 180 | let config = Cfg::new(CONFIG_PATH).unwrap(); 181 | 182 | let filename = "test_retrieve_block_05.txt"; 183 | let filepath = format!("{}/{}", config.download_directory, filename); 184 | let contents = "Hello, world!".as_bytes(); 185 | create_and_write_file(&config, filepath.as_str(), contents); 186 | 187 | let offset = 0; 188 | let length = contents.len() + 1; 189 | 190 | let io_error = retrieve_block(String::from(filename), offset, length, config).unwrap_err(); 191 | 192 | fs::remove_file(filepath).unwrap(); 193 | 194 | assert_eq!(io_error.kind(), std::io::ErrorKind::UnexpectedEof); 195 | } 196 | 197 | #[test] 198 | fn retrieve_block_with_offset_zero_and_length_zero() { 199 | let config = Cfg::new(CONFIG_PATH).unwrap(); 200 | 201 | let filename = "test_retrieve_block_06.txt"; 202 | let filepath = format!("{}/{}", config.download_directory, filename); 203 | let contents = "Hello, world!".as_bytes(); 204 | create_and_write_file(&config, filepath.as_str(), contents); 205 | 206 | let offset = 0; 207 | let length = 0; 208 | 209 | let block = retrieve_block(String::from(filename), offset, length, config) 210 | .map_err(|err| { 211 | fs::remove_file(&filepath).unwrap(); 212 | err 213 | }) 214 | .unwrap(); 215 | 216 | fs::remove_file(filepath).unwrap(); 217 | 218 | assert_eq!(length, block.len()); 219 | assert_eq!("".as_bytes(), &block[..]); 220 | } 221 | 222 | #[test] 223 | fn retrieve_block_and_directory_does_not_exist() { 224 | let config = Cfg::new(CONFIG_PATH).unwrap(); 225 | 226 | let filename = "test_retrieve_block_07.txt"; 227 | 228 | let offset = 0; 229 | let length = 6; 230 | 231 | let io_error = retrieve_block(String::from(filename), offset, length, config).unwrap_err(); 232 | 233 | assert_eq!(io_error.kind(), std::io::ErrorKind::NotFound); 234 | } 235 | 236 | #[test] 237 | fn retrieve_block_and_file_does_not_exist() { 238 | let config = Cfg::new(CONFIG_PATH).unwrap(); 239 | 240 | let filename = "test_retrieve_block_08.txt"; 241 | create_downloads_dir_if_necessary(config.download_directory.as_str()); 242 | 243 | let offset = 0; 244 | let length = 5; 245 | 246 | let io_error = retrieve_block(String::from(filename), offset, length, config).unwrap_err(); 247 | 248 | assert_eq!(io_error.kind(), std::io::ErrorKind::NotFound); 249 | } 250 | 251 | fn create_and_write_file(config: &Cfg, path: &str, contents: &[u8]) { 252 | create_downloads_dir_if_necessary(config.download_directory.as_str()); 253 | 254 | let mut file = File::create(path).unwrap(); 255 | file.write_all(contents).unwrap(); 256 | } 257 | 258 | // ------------------------------------------------------------------------------------- 259 | 260 | #[test] 261 | fn save_file_creates_file_if_it_does_not_exist() { 262 | let file_name = "test_file_01.txt".to_string(); 263 | let config = Cfg::new(CONFIG_PATH).unwrap(); 264 | let path = format!("{}/{}", config.download_directory, &file_name); 265 | 266 | assert!(!Path::new(&path).exists()); 267 | assert!(save_piece( 268 | file_name, 269 | &[0x50u8, 0x65u8, 0x72u8, 0xF3u8, 0x6Eu8], 270 | 0, 271 | config 272 | ) 273 | .is_ok()); 274 | assert!(Path::new(&path).exists()); 275 | fs::remove_file(path).unwrap(); 276 | } 277 | 278 | #[test] 279 | fn write_in_nonexistent_file() { 280 | let file_name = "test_file_02.txt".to_string(); 281 | let config = Cfg::new(CONFIG_PATH).unwrap(); 282 | let path = format!("{}/{}", config.download_directory, &file_name); 283 | 284 | create_downloads_dir_if_necessary(config.download_directory.as_str()); 285 | 286 | assert!(!Path::new(&path).exists()); 287 | 288 | let content_to_write = vec![0x50u8, 0x65u8, 0x72u8, 0xF3u8, 0x6Eu8]; 289 | assert!(save_piece(file_name, &content_to_write, 0, config).is_ok()); 290 | assert!(Path::new(&path).exists()); 291 | 292 | read_file_and_assert_its_content_equals_expected_content(content_to_write, &path); 293 | 294 | fs::remove_file(path).unwrap(); 295 | } 296 | 297 | #[test] 298 | fn write_in_existing_file() { 299 | let file_name = "test_file_03.txt".to_string(); 300 | let config = Cfg::new(CONFIG_PATH).unwrap(); 301 | let path = format!("{}/{}", config.download_directory, &file_name); 302 | 303 | create_downloads_dir_if_necessary(config.download_directory.as_str()); 304 | 305 | File::create(&path).unwrap(); 306 | 307 | let content_to_write = vec![0x50u8, 0x65u8, 0x72u8, 0xF3u8, 0x6Eu8]; 308 | assert!(save_piece(file_name, &content_to_write, 0, config).is_ok()); 309 | 310 | read_file_and_assert_its_content_equals_expected_content(content_to_write, &path); 311 | 312 | fs::remove_file(path).unwrap(); 313 | } 314 | 315 | #[test] 316 | fn write_at_the_end_of_existing_file_that_already_has_contents() { 317 | let file_name = "test_file_04.txt".to_string(); 318 | let config = Cfg::new(CONFIG_PATH).unwrap(); 319 | let path = format!("{}/{}", config.download_directory, &file_name); 320 | 321 | create_downloads_dir_if_necessary(config.download_directory.as_str()); 322 | 323 | let mut file = File::create(&path).unwrap(); 324 | let previous_content = vec![0x56u8, 0x69u8, 0x76u8, 0x61u8, 0x20u8]; 325 | file.write_all(&previous_content).unwrap(); 326 | 327 | let content_to_write = vec![0x50u8, 0x65u8, 0x72u8, 0xF3u8, 0x6Eu8]; 328 | assert!(save_piece(file_name, &content_to_write, 5, config).is_ok()); 329 | 330 | read_file_and_assert_its_content_equals_expected_content( 331 | vec![ 332 | 0x56u8, 0x69u8, 0x76u8, 0x61u8, 0x20u8, 0x50u8, 0x65u8, 0x72u8, 0xF3u8, 0x6Eu8, 333 | ], 334 | &path, 335 | ); 336 | 337 | fs::remove_file(path).unwrap(); 338 | } 339 | 340 | #[test] 341 | fn write_between_pieces_of_existing_file_that_already_has_contents() { 342 | let file_name = "test_file_05.txt".to_string(); 343 | let config = Cfg::new(CONFIG_PATH).unwrap(); 344 | let path = format!("{}/{}", config.download_directory, &file_name); 345 | 346 | create_downloads_dir_if_necessary(config.download_directory.as_str()); 347 | 348 | let mut file = File::create(&path).unwrap(); 349 | let first_piece = vec![0x56u8, 0x69u8, 0x76u8, 0x61u8]; 350 | let second_piece = vec![0x20, 0x50u8, 0x65u8]; 351 | let third_piece = vec![0x72u8, 0xF3u8, 0x6Eu8]; 352 | 353 | file.write_all(&first_piece).unwrap(); 354 | file.write_all_at(&third_piece, 7).unwrap(); 355 | 356 | assert!(save_piece(file_name, &second_piece, 4, config).is_ok()); 357 | 358 | read_file_and_assert_its_content_equals_expected_content( 359 | vec![ 360 | 0x56u8, 0x69u8, 0x76u8, 0x61u8, 0x20u8, 0x50u8, 0x65u8, 0x72u8, 0xF3u8, 0x6Eu8, 361 | ], 362 | &path, 363 | ); 364 | 365 | fs::remove_file(path).unwrap(); 366 | } 367 | 368 | fn read_file_and_assert_its_content_equals_expected_content( 369 | expected_content: Vec, 370 | file_name: &str, 371 | ) { 372 | let content = fs::read(file_name).unwrap(); 373 | assert_eq!(content, expected_content); 374 | } 375 | 376 | fn create_downloads_dir_if_necessary(path: &str) { 377 | if !Path::new(path).exists() { 378 | fs::create_dir_all(path).unwrap(); 379 | } 380 | } 381 | } 382 | -------------------------------------------------------------------------------- /dtorrent/src/storage_manager/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod manager; 2 | -------------------------------------------------------------------------------- /dtorrent/src/torrent_handler/handler.rs: -------------------------------------------------------------------------------- 1 | use super::status::{AtomicTorrentStatus, AtomicTorrentStatusError}; 2 | use crate::{ 3 | config::cfg::Cfg, 4 | peer::{ 5 | bt_peer::BtPeer, 6 | peer_session::{PeerSession, PeerSessionError}, 7 | }, 8 | torrent_parser::torrent::Torrent, 9 | tracker::{ 10 | tracker_handler::{TrackerHandler, TrackerHandlerError}, 11 | tracker_response::TrackerResponse, 12 | }, 13 | }; 14 | use std::{ 15 | sync::{ 16 | mpsc::{self, Receiver}, 17 | Arc, 18 | }, 19 | thread, 20 | time::Duration, 21 | }; 22 | use tracing::{error, info, warn}; 23 | 24 | /// Struct for handling the torrent download. 25 | /// 26 | /// To create a new `TorrentHandler`, use TorrentHandler::new(torrent, config, logger_sender). 27 | #[derive(Debug)] 28 | pub struct TorrentHandler { 29 | torrent: Torrent, 30 | config: Cfg, 31 | torrent_status: Arc, 32 | torrent_status_receiver: Receiver, 33 | client_peer_id: String, 34 | } 35 | 36 | /// Posible torrent handler errors. 37 | #[derive(Debug)] 38 | pub enum TorrentHandlerError { 39 | TrackerError(TrackerHandlerError), 40 | TorrentStatusError(AtomicTorrentStatusError), 41 | PeerSessionError(PeerSessionError), 42 | TorrentStatusRecvError(mpsc::RecvError), 43 | } 44 | 45 | impl TorrentHandler { 46 | /// Creates a new `TorrentHandler` from a torrent, a config and a logger sender. 47 | pub fn new(torrent: Torrent, config: Cfg, client_peer_id: String) -> Self { 48 | let (torrent_status, torrent_status_receiver) = 49 | AtomicTorrentStatus::new(&torrent, config.clone()); 50 | 51 | Self { 52 | torrent_status: Arc::new(torrent_status), 53 | torrent, 54 | config, 55 | torrent_status_receiver, 56 | client_peer_id, 57 | } 58 | } 59 | 60 | /// Starts the torrent download. 61 | /// 62 | /// First it connects to the tracker and gets the peers. Then it connects to each peer and starts the download. 63 | /// 64 | /// # Errors 65 | /// 66 | /// - `TrackerErr` if there was a problem connecting to the tracker or getting the peers. 67 | /// - `TorrentStatusError` if there was a problem using the `Torrent Status`. 68 | /// - `TorrentStatusRecvError` if there was a problem receiving from the receiver of `Torrent Status`. 69 | pub fn handle(&mut self) -> Result<(), TorrentHandlerError> { 70 | let tracker_handler = TrackerHandler::new( 71 | self.torrent.clone(), 72 | self.config.tcp_port.into(), 73 | self.client_peer_id.clone(), 74 | ) 75 | .map_err(TorrentHandlerError::TrackerError)?; 76 | info!("Connected to tracker."); 77 | 78 | while !self.torrent_status.is_finished() { 79 | let peer_list = self.get_peers_list(&tracker_handler)?; 80 | info!("Tracker peer list obtained."); 81 | 82 | // Start connection with each peer 83 | for peer in peer_list { 84 | let current_peers = self.torrent_status.all_current_peers(); 85 | 86 | // If we reached the maximum number of simultaneous peers, wait until the status tells us that one disconnected. 87 | if current_peers >= self.config.max_peers_per_torrent as usize { 88 | // This while loop is done to prevent creating more peers than allowed when multiple peers are disconnected at the same time. 89 | self.torrent_status_receiver 90 | .recv() 91 | .map_err(TorrentHandlerError::TorrentStatusRecvError)?; 92 | while self 93 | .torrent_status_receiver 94 | .recv_timeout(Duration::from_nanos(1)) 95 | .is_ok() 96 | { 97 | continue; 98 | } 99 | } 100 | if self.torrent_status.is_finished() { 101 | break; 102 | } 103 | 104 | let connected_peers = self 105 | .torrent_status 106 | .get_connected_peers() 107 | .map_err(TorrentHandlerError::TorrentStatusError)?; 108 | 109 | // Avoid connecting to the same peer twice. 110 | if connected_peers.contains_key(&peer) { 111 | continue; 112 | } 113 | 114 | let current_peers = self.torrent_status.all_current_peers(); 115 | if current_peers < self.config.max_peers_per_torrent as usize { 116 | self.connect_to_peer(peer)?; 117 | } 118 | } 119 | } 120 | info!("Torrent download finished."); 121 | Ok(()) 122 | } 123 | 124 | /// Gets the status of the torrent. 125 | pub fn status(&self) -> Arc { 126 | self.torrent_status.clone() 127 | } 128 | 129 | fn get_peers_list( 130 | &self, 131 | tracker_handler: &TrackerHandler, 132 | ) -> Result, TorrentHandlerError> { 133 | let tracker_response = tracker_handler 134 | .get_peers_list() 135 | .map_err(TorrentHandlerError::TrackerError)?; 136 | 137 | self.update_total_peers(&tracker_response); 138 | 139 | Ok(tracker_response.peers) 140 | } 141 | 142 | /// Updates the torrent status with the number of total peers. 143 | /// 144 | /// If the tracker response did not contain the number of total peers, it will be set to the number of peers in the response. 145 | fn update_total_peers(&self, tracker_response: &TrackerResponse) { 146 | if tracker_response.complete == 0 && tracker_response.incomplete == 0 { 147 | self.torrent_status 148 | .update_total_peers(tracker_response.peers.len(), 0); 149 | } else { 150 | self.torrent_status.update_total_peers( 151 | tracker_response.complete as usize, 152 | tracker_response.incomplete as usize, 153 | ); 154 | } 155 | } 156 | 157 | fn connect_to_peer(&mut self, peer: BtPeer) -> Result<(), TorrentHandlerError> { 158 | self.torrent_status.peer_connecting(); 159 | let peer_name = format!("{}:{}", peer.ip, peer.port); 160 | 161 | let mut peer_session = PeerSession::new( 162 | peer.clone(), 163 | self.torrent.clone(), 164 | self.torrent_status.clone(), 165 | self.config.clone(), 166 | self.client_peer_id.clone(), 167 | ) 168 | .map_err(TorrentHandlerError::PeerSessionError)?; 169 | 170 | let builder = thread::Builder::new().name(format!( 171 | "Torrent: {} / Peer: {}", 172 | self.torrent.info.name, peer_name 173 | )); 174 | 175 | let join = builder.spawn(move || match peer_session.start_outgoing_seeder() { 176 | Ok(_) => (), 177 | Err(err) => { 178 | warn!("{:?}", err); 179 | } 180 | }); 181 | match join { 182 | Ok(_) => (), 183 | Err(err) => { 184 | error!("{:?}", err); 185 | self.torrent_status 186 | .peer_disconnected(&peer) 187 | .map_err(TorrentHandlerError::TorrentStatusError)?; 188 | } 189 | } 190 | Ok(()) 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /dtorrent/src/torrent_handler/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod handler; 2 | pub mod status; 3 | -------------------------------------------------------------------------------- /dtorrent/src/torrent_parser/info.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use bencoder::bencode::{Bencode, ToBencode}; 4 | 5 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] 6 | pub struct Info { 7 | pub length: i64, 8 | pub name: String, 9 | pub piece_length: i64, 10 | pub pieces: Vec, 11 | } 12 | 13 | #[derive(Debug, PartialEq)] 14 | pub enum FromInfoError { 15 | MissingLength, 16 | MissingName, 17 | MissingPieceLength, 18 | MissingPieces, 19 | NotADict, 20 | MultipleFilesNotSupported, 21 | } 22 | 23 | impl Info { 24 | pub fn from(bencode: &Bencode) -> Result { 25 | let mut name = String::new(); 26 | let mut length = 0; 27 | let mut piece_length = 0; 28 | let mut pieces = Vec::new(); 29 | 30 | let d = match bencode { 31 | Bencode::BDict(s) => s, 32 | _ => return Err(FromInfoError::NotADict), 33 | }; 34 | 35 | for (k, v) in d.iter() { 36 | if k == b"name" { 37 | name = Info::create_name(v)?; 38 | } else if k == b"length" { 39 | length = Info::create_length(v)?; 40 | } else if k == b"piece length" { 41 | piece_length = Info::create_piece_length(v)?; 42 | } else if k == b"pieces" { 43 | pieces = Info::create_pieces(v)?; 44 | } else if k == b"files" { 45 | return Err(FromInfoError::MultipleFilesNotSupported); 46 | } 47 | } 48 | 49 | Ok(Info { 50 | length, 51 | name, 52 | piece_length, 53 | pieces, 54 | }) 55 | } 56 | 57 | fn create_name(bencode: &Bencode) -> Result { 58 | let c = match bencode { 59 | &Bencode::BString(ref s) => s, 60 | _ => return Err(FromInfoError::MissingName), 61 | }; 62 | 63 | let name = match String::from_utf8(c.to_vec()) { 64 | Ok(s) => s, 65 | Err(_) => return Err(FromInfoError::MissingName), 66 | }; 67 | 68 | Ok(name) 69 | } 70 | 71 | fn create_length(bencode: &Bencode) -> Result { 72 | let c = match bencode { 73 | &Bencode::BNumber(ref s) => s, 74 | _ => return Err(FromInfoError::MissingLength), 75 | }; 76 | Ok(*c) 77 | } 78 | 79 | fn create_piece_length(bencode: &Bencode) -> Result { 80 | let c = match bencode { 81 | &Bencode::BNumber(ref s) => s, 82 | _ => return Err(FromInfoError::MissingPieceLength), 83 | }; 84 | Ok(*c) 85 | } 86 | 87 | fn create_pieces(bencode: &Bencode) -> Result, FromInfoError> { 88 | let c = match bencode { 89 | &Bencode::BString(ref s) => s, 90 | _ => return Err(FromInfoError::MissingPieces), 91 | }; 92 | Ok(c.to_vec()) 93 | } 94 | } 95 | 96 | impl ToBencode for Info { 97 | fn to_bencode(&self) -> Bencode { 98 | let mut info = BTreeMap::new(); 99 | info.insert(b"length".to_vec(), self.length.to_bencode()); 100 | info.insert(b"name".to_vec(), self.name.to_bencode()); 101 | info.insert(b"piece length".to_vec(), self.piece_length.to_bencode()); 102 | info.insert(b"pieces".to_vec(), self.pieces.to_bencode()); 103 | Bencode::BDict(info) 104 | } 105 | } 106 | 107 | #[cfg(test)] 108 | mod tests { 109 | use super::*; 110 | 111 | #[test] 112 | fn test_from_info_empty() { 113 | let bencode = Bencode::BDict(BTreeMap::new()); 114 | let info = Info::from(&bencode).unwrap(); 115 | assert_eq!(info.length, 0); 116 | assert_eq!(info.name, String::new()); 117 | assert_eq!(info.piece_length, 0); 118 | assert_eq!(info.pieces, Vec::new()); 119 | } 120 | 121 | #[test] 122 | fn test_from_info_full() { 123 | let mut info = BTreeMap::new(); 124 | info.insert(b"length".to_vec(), Bencode::BNumber(1)); 125 | info.insert(b"name".to_vec(), Bencode::BString(b"test1".to_vec())); 126 | info.insert(b"piece length".to_vec(), Bencode::BNumber(2)); 127 | info.insert(b"pieces".to_vec(), Bencode::BString(b"test2".to_vec())); 128 | let bencode = Bencode::BDict(info); 129 | 130 | let response = Info::from(&bencode).unwrap(); 131 | assert_eq!(response.length, 1); 132 | assert_eq!(response.name, "test1"); 133 | assert_eq!(response.piece_length, 2); 134 | assert_eq!(response.pieces, b"test2"); 135 | } 136 | 137 | #[test] 138 | fn test_from_info_with_multiple_files() { 139 | let mut info = BTreeMap::new(); 140 | info.insert(b"name".to_vec(), Bencode::BString(b"test1".to_vec())); 141 | info.insert(b"piece length".to_vec(), Bencode::BNumber(2)); 142 | info.insert(b"pieces".to_vec(), Bencode::BString(b"test2".to_vec())); 143 | info.insert(b"files".to_vec(), Bencode::BList(vec![])); 144 | let bencode = Bencode::BDict(info); 145 | 146 | let response = Info::from(&bencode).unwrap_err(); 147 | assert_eq!(response, FromInfoError::MultipleFilesNotSupported); 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /dtorrent/src/torrent_parser/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod info; 2 | pub mod parser; 3 | pub mod torrent; 4 | -------------------------------------------------------------------------------- /dtorrent/src/torrent_parser/parser.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::File, 3 | io::{BufReader, Error, Read}, 4 | path::Path, 5 | }; 6 | 7 | use super::torrent::{FromTorrentError, Torrent}; 8 | use bencoder::bencode::{Bencode, BencodeError}; 9 | 10 | #[derive(Debug)] 11 | pub enum ParseError { 12 | IoError(Error), 13 | BencodeError(BencodeError), 14 | FromTorrentError(FromTorrentError), 15 | } 16 | 17 | pub struct TorrentParser; 18 | 19 | impl TorrentParser { 20 | /// Given a path to a torrent file, it parses the file and returns a Torrent struct. 21 | /// 22 | /// # Errors 23 | /// 24 | /// * `ParseError::IoError` - An error occurred while reading the file 25 | /// * `ParseError::BencodeError` - An error occurred while parsing the bencode 26 | /// * `ParseError::FromTorrentError` - An error occurred while creating the Torrent struct 27 | pub fn parse(filepath: &Path) -> Result { 28 | let buffer = match TorrentParser::read_file(filepath) { 29 | Ok(buffer) => buffer, 30 | Err(e) => return Err(ParseError::IoError(e)), 31 | }; 32 | 33 | let bencode = match Bencode::decode(&buffer) { 34 | Ok(bencode) => bencode, 35 | Err(e) => return Err(ParseError::BencodeError(e)), 36 | }; 37 | 38 | let torrent = match Torrent::from(bencode) { 39 | Ok(torrent) => torrent, 40 | Err(e) => return Err(ParseError::FromTorrentError(e)), 41 | }; 42 | 43 | Ok(torrent) 44 | } 45 | 46 | fn read_file(filepath: &Path) -> Result, Error> { 47 | let file = File::open(filepath)?; 48 | let mut reader = BufReader::new(file); 49 | let mut buffer = Vec::new(); 50 | 51 | reader.read_to_end(&mut buffer)?; 52 | 53 | Ok(buffer) 54 | } 55 | } 56 | 57 | #[cfg(test)] 58 | mod tests { 59 | use super::*; 60 | use std::{fs, io::Write}; 61 | 62 | #[test] 63 | fn test_parse_torrent() { 64 | let filepath = "./test_parse_torrent.torrent"; 65 | let contents = 66 | b"d8:announce35:https://torrent.ubuntu.com/announce4:infod6:lengthi3654957056e4:name30:ubuntu-22.04-desktop-amd64.iso12:piece lengthi262144e6:pieces64:BC 07 C0 6A 9D BC 07 C0 6A 9D BC 07 C0 6A 9D BC 07 C0 6A 9Dee"; 67 | create_and_write_file(filepath, contents); 68 | 69 | let torrent = match TorrentParser::parse(Path::new(filepath)) { 70 | Ok(torrent) => torrent, 71 | Err(e) => { 72 | remove_file(filepath); 73 | panic!("{:?}", e); 74 | } 75 | }; 76 | 77 | assert_eq!(torrent.announce_url, "https://torrent.ubuntu.com/announce",); 78 | assert_eq!(torrent.info.length, 3654957056); 79 | assert_eq!(torrent.info.name, "ubuntu-22.04-desktop-amd64.iso"); 80 | assert_eq!(torrent.info.piece_length, 262144); 81 | assert_eq!( 82 | torrent.info_hash, 83 | "48442ddee1900ed8c8101bb8b2bd955060f1eabc" 84 | ); 85 | remove_file(filepath); 86 | } 87 | 88 | fn create_and_write_file(path: &str, contents: &[u8]) { 89 | let mut file = File::create(path).unwrap(); 90 | file.write_all(contents).unwrap(); 91 | } 92 | 93 | fn remove_file(path: &str) { 94 | fs::remove_file(path).unwrap(); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /dtorrent/src/torrent_parser/torrent.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Write; 2 | use std::{collections::BTreeMap, num::ParseIntError}; 3 | 4 | use sha1::{Digest, Sha1}; 5 | 6 | use bencoder::bencode::{Bencode, ToBencode}; 7 | 8 | use super::info::{FromInfoError, Info}; 9 | 10 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] 11 | pub struct Torrent { 12 | pub announce_url: String, 13 | pub info: Info, 14 | pub info_hash: String, 15 | } 16 | 17 | #[derive(Debug, PartialEq)] 18 | pub enum FromTorrentError { 19 | MissingAnnounce, 20 | MissingInfo, 21 | FromInfoError(FromInfoError), 22 | InfoHashError, 23 | NotADict, 24 | } 25 | 26 | impl Torrent { 27 | pub fn from(bencode: Bencode) -> Result { 28 | let mut announce_url = String::new(); 29 | let mut info: Option = None; 30 | 31 | let d = match bencode { 32 | Bencode::BDict(s) => s, 33 | _ => return Err(FromTorrentError::NotADict), 34 | }; 35 | 36 | for (k, v) in d.iter() { 37 | if k == b"announce" { 38 | announce_url = Torrent::create_announce(v)?; 39 | } else if k == b"info" { 40 | info = Some(Torrent::create_info(v)?); 41 | } 42 | } 43 | 44 | if announce_url.is_empty() { 45 | return Err(FromTorrentError::MissingAnnounce); 46 | } 47 | 48 | let info = match info { 49 | Some(x) => x, 50 | None => return Err(FromTorrentError::MissingInfo), 51 | }; 52 | 53 | let info_hash = Torrent::create_info_hash(&info)?; 54 | 55 | Ok(Torrent { 56 | announce_url, 57 | info, 58 | info_hash, 59 | }) 60 | } 61 | 62 | fn create_announce(bencode: &Bencode) -> Result { 63 | let announce_url = match bencode { 64 | Bencode::BString(s) => s, 65 | _ => return Err(FromTorrentError::MissingAnnounce), 66 | }; 67 | 68 | let announce_url = match String::from_utf8(announce_url.to_vec()) { 69 | Ok(s) => s, 70 | Err(_) => return Err(FromTorrentError::MissingAnnounce), 71 | }; 72 | 73 | Ok(announce_url) 74 | } 75 | 76 | fn create_info(bencode: &Bencode) -> Result { 77 | let info = match Info::from(bencode) { 78 | Ok(x) => x, 79 | Err(err) => return Err(FromTorrentError::FromInfoError(err)), 80 | }; 81 | 82 | Ok(info) 83 | } 84 | 85 | pub fn create_info_hash(info: &Info) -> Result { 86 | let bencoded_info = Bencode::encode(info); 87 | let hash = Sha1::digest(bencoded_info); 88 | 89 | let mut hex_string = String::with_capacity(hash.len() * 2); 90 | 91 | for b in hash { 92 | match write!(&mut hex_string, "{:02x}", b) { 93 | Ok(_) => (), 94 | Err(_) => return Err(FromTorrentError::InfoHashError), 95 | } 96 | } 97 | 98 | Ok(hex_string) 99 | } 100 | 101 | /// Returns the info hash of the torrent as a byte array. 102 | pub fn get_info_hash_as_bytes(&self) -> Result, ParseIntError> { 103 | Self::decode_hex(self.info_hash.as_str()) 104 | } 105 | 106 | fn decode_hex(s: &str) -> Result, ParseIntError> { 107 | (0..s.len()) 108 | .step_by(2) 109 | .map(|i| u8::from_str_radix(&s[i..i + 2], 16)) 110 | .collect() 111 | } 112 | 113 | /// Returns the name of the torrent. 114 | pub fn name(&self) -> String { 115 | self.info.name.clone() 116 | } 117 | 118 | /// Returns the size of pieces of the torrent. 119 | pub fn piece_length(&self) -> u32 { 120 | self.info.piece_length as u32 121 | } 122 | 123 | /// Returns the length in bytes of the torrent. 124 | pub fn length(&self) -> u32 { 125 | self.info.length as u32 126 | } 127 | 128 | /// Returns the number of pieces of the torrent. 129 | pub fn total_pieces(&self) -> u32 { 130 | (self.info.length as f64 / self.info.piece_length as f64).ceil() as u32 131 | } 132 | 133 | /// Returns the size of the last piece of the torrent. 134 | pub fn last_piece_size(&self) -> u32 { 135 | self.info.length as u32 % self.info.piece_length as u32 136 | } 137 | 138 | pub fn info_hash(&self) -> String { 139 | self.info_hash.clone() 140 | } 141 | } 142 | 143 | impl ToBencode for Torrent { 144 | fn to_bencode(&self) -> Bencode { 145 | let mut m = BTreeMap::new(); 146 | m.insert(b"announce_url".to_vec(), self.announce_url.to_bencode()); 147 | m.insert(b"info".to_vec(), self.info.to_bencode()); 148 | Bencode::BDict(m) 149 | } 150 | } 151 | 152 | #[cfg(test)] 153 | mod tests { 154 | use super::*; 155 | 156 | #[test] 157 | fn test_from_torrent_full() { 158 | let announce = String::from("http://example.com/announce"); 159 | let info_len = 10; 160 | let info_name = String::from("example"); 161 | let info_piece_len = 20; 162 | let info_pieces = String::from("test").into_bytes(); 163 | 164 | let info_bencode = build_info_bencode( 165 | info_len, 166 | info_name.clone().into_bytes(), 167 | info_piece_len, 168 | info_pieces.clone(), 169 | ); 170 | let torrent_bencode = 171 | build_torrent_bencode(announce.clone().into_bytes(), info_bencode.clone()); 172 | 173 | let info = Info::from(&Bencode::BDict(info_bencode)).unwrap(); 174 | let info_hash = Torrent::create_info_hash(&info).unwrap(); 175 | 176 | let torrent = Torrent::from(torrent_bencode).unwrap(); 177 | 178 | assert_eq!(torrent.announce_url, announce); 179 | assert_eq!(torrent.info.length, info_len); 180 | assert_eq!(torrent.info.name, info_name); 181 | assert_eq!(torrent.info.piece_length, info_piece_len); 182 | assert_eq!(torrent.info.pieces, info_pieces); 183 | assert_eq!(torrent.info_hash, info_hash); 184 | } 185 | 186 | #[test] 187 | fn test_from_torrent_empty() { 188 | let torrent_bencode = Bencode::BDict(BTreeMap::new()); 189 | 190 | let actual_err = Torrent::from(torrent_bencode).unwrap_err(); 191 | let expected_err = FromTorrentError::MissingAnnounce; 192 | 193 | assert_eq!(actual_err, expected_err); 194 | } 195 | 196 | #[test] 197 | fn test_from_torrent_missing_announce() { 198 | let mut m = BTreeMap::new(); 199 | m.insert(b"info".to_vec(), Bencode::BDict(BTreeMap::new())); 200 | let torrent_bencode = Bencode::BDict(m); 201 | 202 | let actual_err = Torrent::from(torrent_bencode).unwrap_err(); 203 | let expected_err = FromTorrentError::MissingAnnounce; 204 | 205 | assert_eq!(actual_err, expected_err); 206 | } 207 | 208 | #[test] 209 | fn test_from_torrent_missing_info() { 210 | let announce = String::from("http://example.com/announce").into_bytes(); 211 | let mut m = BTreeMap::new(); 212 | m.insert(b"announce".to_vec(), Bencode::BString(announce)); 213 | let torrent_bencode = Bencode::BDict(m); 214 | 215 | let actual_err = Torrent::from(torrent_bencode).unwrap_err(); 216 | let expected_err = FromTorrentError::MissingInfo; 217 | 218 | assert_eq!(actual_err, expected_err); 219 | } 220 | 221 | #[test] 222 | fn test_from_torrent_not_a_dict() { 223 | let torrent_bencode = Bencode::BString(String::from("test").into_bytes()); 224 | 225 | let actual_err = Torrent::from(torrent_bencode).unwrap_err(); 226 | let expected_err = FromTorrentError::NotADict; 227 | 228 | assert_eq!(actual_err, expected_err); 229 | } 230 | 231 | #[test] 232 | fn test_get_info_hash_as_bytes() { 233 | let info_hash = String::from("2c6b6858d61da9543d4231a71db4b1c9264b0685"); 234 | let info_hash_bytes = [ 235 | 44, 107, 104, 88, 214, 29, 169, 84, 61, 66, 49, 167, 29, 180, 177, 201, 38, 75, 6, 133, 236 | ]; 237 | 238 | let torrent = Torrent { 239 | announce_url: String::from("http://example.com/announce"), 240 | info: Info { 241 | length: 10, 242 | name: String::from("example"), 243 | piece_length: 20, 244 | pieces: String::from("test").into_bytes(), 245 | }, 246 | info_hash, 247 | }; 248 | 249 | assert_eq!(torrent.get_info_hash_as_bytes().unwrap(), info_hash_bytes); 250 | } 251 | 252 | #[test] 253 | fn test_name() { 254 | let torrent = build_test_torrent(); 255 | assert_eq!(torrent.name(), "example"); 256 | } 257 | 258 | #[test] 259 | fn test_piece_length() { 260 | let torrent = build_test_torrent(); 261 | assert_eq!(torrent.piece_length(), 10); 262 | } 263 | 264 | #[test] 265 | fn test_length() { 266 | let torrent = build_test_torrent(); 267 | assert_eq!(torrent.length(), 105); 268 | } 269 | 270 | #[test] 271 | fn test_total_pieces() { 272 | let torrent = build_test_torrent(); 273 | assert_eq!(torrent.total_pieces(), 11); 274 | } 275 | 276 | #[test] 277 | fn test_last_piece_size() { 278 | let torrent = build_test_torrent(); 279 | assert_eq!(torrent.last_piece_size(), 5); 280 | } 281 | 282 | fn build_info_bencode( 283 | length: i64, 284 | name: Vec, 285 | pieces_len: i64, 286 | pieces: Vec, 287 | ) -> BTreeMap, Bencode> { 288 | let mut info = BTreeMap::new(); 289 | info.insert(b"length".to_vec(), Bencode::BNumber(length)); 290 | info.insert(b"name".to_vec(), Bencode::BString(name)); 291 | info.insert(b"piece length".to_vec(), Bencode::BNumber(pieces_len)); 292 | info.insert(b"pieces".to_vec(), Bencode::BString(pieces)); 293 | 294 | info 295 | } 296 | 297 | fn build_torrent_bencode(announce: Vec, info: BTreeMap, Bencode>) -> Bencode { 298 | let mut dict = BTreeMap::new(); 299 | 300 | dict.insert(b"announce".to_vec(), Bencode::BString(announce)); 301 | dict.insert(b"info".to_vec(), Bencode::BDict(info)); 302 | 303 | Bencode::BDict(dict) 304 | } 305 | 306 | fn build_test_torrent() -> Torrent { 307 | Torrent { 308 | announce_url: String::from("http://example.com/announce"), 309 | info: Info { 310 | length: 105, 311 | name: String::from("example"), 312 | piece_length: 10, 313 | pieces: String::from("test").into_bytes(), 314 | }, 315 | info_hash: "info_hash".to_string(), 316 | } 317 | } 318 | } 319 | -------------------------------------------------------------------------------- /dtorrent/src/tracker/http/http_handler.rs: -------------------------------------------------------------------------------- 1 | use native_tls::Error; 2 | use native_tls::HandshakeError; 3 | use native_tls::TlsConnector; 4 | use std::io::Error as IOError; 5 | use std::io::{Read, Write}; 6 | use std::net::TcpStream; 7 | 8 | use super::query_params::QueryParams; 9 | use super::url_parser::TrackerUrl; 10 | 11 | /// `HttpHandler` struct to make **HTTP** requests. 12 | /// 13 | /// To create a new `HttpHandler` use the method builder `new()`. 14 | /// 15 | /// To make a **HTTPS** request use the method `https_request()`. 16 | /// 17 | /// To make a **HTTP** request use the method `http_request()`. 18 | #[derive(Debug)] 19 | pub struct HttpHandler { 20 | tracker_url: TrackerUrl, 21 | query_params: QueryParams, 22 | } 23 | 24 | /// Posible `HttpHandler` errors 25 | #[derive(Debug)] 26 | pub enum HttpHandlerError { 27 | CreateTlsConnectorError(Error), 28 | TcpStreamConnectError(IOError), 29 | TlsStreamConnectError(TlsStreamConnectError), 30 | ErrorWritingStream(IOError), 31 | ErrorReadingStream(IOError), 32 | } 33 | 34 | /// Posible `TlsStreamConnect` errors. 35 | /// 36 | /// `FatalError` is an error that should not continue the program. 37 | /// 38 | /// `BlockError` is an error that can be caused because the stream is performing I/O, 39 | /// it should be safe to call `handshake` at a later time. 40 | #[derive(Debug)] 41 | pub enum TlsStreamConnectError { 42 | FatalError, 43 | BlockError, 44 | } 45 | 46 | impl HttpHandler { 47 | /// Builds a new `HttpHandler` from a **TrackerUrl** and a **QueryParams** passed by paramaters. 48 | pub fn new(tracker_url: TrackerUrl, query_params: QueryParams) -> Self { 49 | Self { 50 | tracker_url, 51 | query_params, 52 | } 53 | } 54 | 55 | /// Makes a **HTTPS** request to the tracker url. 56 | /// 57 | /// On success it returns a `Vec` cointaining the tracker's response. 58 | /// 59 | /// It returns an `HttpHandlerError` if: 60 | /// - There was a problem creating a TlsConnector. 61 | /// - There was a problem connecting to the tracker_url. 62 | /// - There was a problem writing to the tracker stream. 63 | /// - There was a problem reading the tracker stream. 64 | pub fn https_request(&self) -> Result, HttpHandlerError> { 65 | let connector = match TlsConnector::new() { 66 | Ok(connector) => connector, 67 | Err(err) => return Err(HttpHandlerError::CreateTlsConnectorError(err)), 68 | }; 69 | let stream = self.connect_tcp_stream()?; 70 | let mut stream = match connector.connect(self.tracker_url.host.as_str(), stream) { 71 | Ok(stream) => stream, 72 | Err(err) => match err { 73 | HandshakeError::Failure(_) => { 74 | return Err(HttpHandlerError::TlsStreamConnectError( 75 | TlsStreamConnectError::FatalError, 76 | )) 77 | } 78 | HandshakeError::WouldBlock(_) => { 79 | return Err(HttpHandlerError::TlsStreamConnectError( 80 | TlsStreamConnectError::BlockError, 81 | )) 82 | } 83 | }, 84 | }; 85 | self.request_and_decode(&mut stream) 86 | } 87 | 88 | /// Makes a **HTTP** request to the tracker url. 89 | /// 90 | /// On success it returns a `Vec` cointaining the tracker's response. 91 | /// 92 | /// It returns an `HttpHandlerError` if: 93 | /// - There was a problem connecting to the tracker_url. 94 | /// - There was a problem writing to the tracker stream. 95 | /// - There was a problem reading the tracker stream. 96 | pub fn http_request(&self) -> Result, HttpHandlerError> { 97 | self.request_and_decode(&self.connect_tcp_stream()?) 98 | } 99 | 100 | fn connect_tcp_stream(&self) -> Result { 101 | let connect_url = format!("{}:{}", self.tracker_url.host, self.tracker_url.port); 102 | match TcpStream::connect(connect_url) { 103 | Ok(stream) => Ok(stream), 104 | Err(err) => Err(HttpHandlerError::TcpStreamConnectError(err)), 105 | } 106 | } 107 | 108 | fn request_and_decode(&self, mut stream: A) -> Result, HttpHandlerError> 109 | where 110 | A: Write + Read, 111 | { 112 | let query_params = self.query_params.build(); 113 | let mut request = format!( 114 | "GET /{}{} HTTP/1.1", 115 | self.tracker_url.endpoint, query_params 116 | ); 117 | request.push_str("\r\n"); 118 | request.push_str("Host: "); 119 | request.push_str(self.tracker_url.host.as_str()); 120 | request.push_str("\r\n"); 121 | request.push_str("User-Agent: LDTorrent/0.1"); 122 | request.push_str("\r\n"); 123 | request.push_str("\r\n"); 124 | 125 | match stream.write_all(request.as_bytes()) { 126 | Ok(_) => (), 127 | Err(err) => return Err(HttpHandlerError::ErrorWritingStream(err)), 128 | } 129 | let mut res = vec![]; 130 | match stream.read_to_end(&mut res) { 131 | Ok(_) => (), 132 | Err(err) => return Err(HttpHandlerError::ErrorReadingStream(err)), 133 | }; 134 | 135 | Ok(Self::parse_http_response(&res).to_vec()) 136 | } 137 | 138 | fn parse_http_response(res: &[u8]) -> &[u8] { 139 | for (i, b) in res.iter().enumerate() { 140 | if i + 3 > res.len() { 141 | break; 142 | } 143 | 144 | if *b == b"\r"[0] 145 | && res[i + 1] == b"\n"[0] 146 | && res[i + 2] == b"\r"[0] 147 | && res[i + 3] == b"\n"[0] 148 | { 149 | return &res[(i + 4)..]; 150 | } 151 | } 152 | res 153 | } 154 | } 155 | 156 | #[cfg(test)] 157 | mod tests { 158 | use crate::tracker::http::url_parser; 159 | 160 | use super::*; 161 | 162 | #[test] 163 | fn test_http_handler_https_request() { 164 | let http_handler = HttpHandler::new( 165 | url_parser::TrackerUrl::parse("https://torrent.ubuntu.com/announce").unwrap(), 166 | QueryParams::new( 167 | "e82753b6692c4f3f3646b055f70ee390309020e6".to_string(), 168 | 6969, 169 | 100, 170 | "-qB4500-k51bMCWVA(~!".to_string(), 171 | ), 172 | ); 173 | let response = http_handler.https_request().unwrap(); 174 | 175 | // d8:complete 176 | assert!(response.starts_with(&[100, 56, 58, 99, 111, 109, 112, 108, 101, 116, 101])); 177 | } 178 | 179 | #[test] 180 | fn test_bad_http_handler_https_request() { 181 | let http_handler = HttpHandler::new( 182 | url_parser::TrackerUrl::parse("https://torrent.ubuntu.com:443/announce").unwrap(), 183 | QueryParams::new( 184 | "info_hash_test_info_hash_test_info_hash_test".to_string(), 185 | 6969, 186 | 100, 187 | "test_peer_id".to_string(), 188 | ), 189 | ); 190 | let response = http_handler.https_request().unwrap(); 191 | 192 | // d14:failure 193 | assert!(response.starts_with(&[100, 49, 52, 58, 102, 97, 105, 108, 117, 114, 101])); 194 | } 195 | 196 | #[test] 197 | fn test_http_handler_http_request() { 198 | let http_handler = HttpHandler::new( 199 | url_parser::TrackerUrl::parse("http://vps02.net.orel.ru/announce").unwrap(), 200 | QueryParams::new( 201 | "f834824904be1854c89ba007c01678ff797f8dc7".to_string(), 202 | 6969, 203 | 100, 204 | "-qB4500-k51bMCWVA(~!".to_string(), 205 | ), 206 | ); 207 | let response = http_handler.http_request().unwrap(); 208 | 209 | // d8:complete 210 | assert!(response.starts_with(&[100, 56, 58, 99, 111, 109, 112, 108, 101, 116, 101])); 211 | } 212 | 213 | #[test] 214 | fn test_bad_http_handler_http_request() { 215 | let http_handler = HttpHandler::new( 216 | url_parser::TrackerUrl::parse("http://vps02.net.orel.ru/announce").unwrap(), 217 | QueryParams::new( 218 | "info_hash_test_info_hash_test_info_hash_test".to_string(), 219 | 6969, 220 | 100, 221 | "test_peer_id".to_string(), 222 | ), 223 | ); 224 | let response = http_handler.http_request().unwrap(); 225 | 226 | // Invalid Request 227 | assert!(response.starts_with(&[ 228 | 60, 116, 105, 116, 108, 101, 62, 73, 110, 118, 97, 108, 105, 100, 32, 82, 101, 113, 229 | 117, 101, 115, 116, 60, 47, 116, 105, 116, 108, 101, 62, 10 230 | ])); 231 | } 232 | } 233 | -------------------------------------------------------------------------------- /dtorrent/src/tracker/http/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod http_handler; 2 | pub mod query_params; 3 | pub mod url_parser; 4 | -------------------------------------------------------------------------------- /dtorrent/src/tracker/http/query_params.rs: -------------------------------------------------------------------------------- 1 | use url_encoder::url_encoder::encode; 2 | 3 | /// `QueryParams` struct containing the query parameters information. 4 | /// 5 | /// To create a new `TrackerResponse` use the method builder `new()`. 6 | /// 7 | /// To build the Query params string use the method 'build()'. 8 | #[derive(Debug)] 9 | pub struct QueryParams { 10 | info_hash: String, 11 | client_port: u32, 12 | info_length: i64, 13 | client_peer_id: String, 14 | } 15 | 16 | impl QueryParams { 17 | /// Creates a new `QueryParams` from an **info_hash**, **client_port** and **info_lenght** passed by parameters. 18 | pub fn new( 19 | info_hash: String, 20 | client_port: u32, 21 | info_length: i64, 22 | client_peer_id: String, 23 | ) -> QueryParams { 24 | QueryParams { 25 | info_hash, 26 | client_port, 27 | info_length, 28 | client_peer_id, 29 | } 30 | } 31 | 32 | /// Builds the QueryParams string and returns it. 33 | pub fn build(&self) -> String { 34 | format!( 35 | "?info_hash={}&peer_id={}&port={}&uploaded=0&downloaded=0&left={}&event=started", 36 | encode(self.info_hash.as_str()), 37 | self.client_peer_id, 38 | self.client_port, 39 | self.info_length 40 | ) 41 | } 42 | } 43 | 44 | #[cfg(test)] 45 | mod tests { 46 | use super::*; 47 | 48 | #[test] 49 | fn test_query_params_build() { 50 | let info_hash = "2c6b6858d61da9543d4231a71db4b1c9264b0685".to_string(); 51 | let client_port = 6969; 52 | let length = 100; 53 | let peer_id = "test_peer_id".to_string(); 54 | let query_params = 55 | QueryParams::new(info_hash.clone(), client_port, length, peer_id.clone()); 56 | 57 | assert_eq!( 58 | query_params.build(), 59 | format!( 60 | "?info_hash={}&peer_id={}&port={}&uploaded=0&downloaded=0&left={}&event=started", 61 | encode(info_hash.as_str()), 62 | peer_id, 63 | client_port, 64 | length 65 | ) 66 | ); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /dtorrent/src/tracker/http/url_parser.rs: -------------------------------------------------------------------------------- 1 | /// `TrackerUrl` struct containing a tracker url information. 2 | /// 3 | /// To create a new `TrackerUrl` use the method builder `parse()`. 4 | #[derive(Debug, PartialEq, Clone)] 5 | pub struct TrackerUrl { 6 | pub protocol: ConnectionProtocol, 7 | pub host: String, 8 | pub port: u32, 9 | pub endpoint: String, 10 | } 11 | 12 | /// Posible `TrackerUrl` Connection Protocol values. 13 | #[derive(Debug, PartialEq, Clone)] 14 | pub enum ConnectionProtocol { 15 | Http, 16 | Https, 17 | } 18 | 19 | /// Posible `TrackerUrl` Errors. 20 | #[derive(Debug, PartialEq)] 21 | pub enum TrackerUrlError { 22 | InvalidTrackerURL, 23 | UnsupportedConnectionProtocol, 24 | InvalidPortNumber, 25 | } 26 | 27 | impl TrackerUrl { 28 | /// Builds a new `TrackerUrl` from a &str tracker url. 29 | /// 30 | /// On success it returns a `TrackerUrl` cointaining the tracker's url information. 31 | /// 32 | /// It returns an `TrackerUrlError` if: 33 | /// - the url format is invalid. 34 | /// - The url connection protocol is unsupported. 35 | /// - the url port number is not a number. 36 | pub fn parse(url: &str) -> Result { 37 | let (url_without_protocol, protocol) = Self::identify_and_remove_protocol(url)?; 38 | 39 | let (url_without_endpoint, endpoint) = 40 | Self::identify_and_remove_endpoint(&url_without_protocol)?; 41 | 42 | let host = Self::identify_host(&url_without_endpoint)?; 43 | 44 | let port: u32 = if url_without_endpoint.contains(':') { 45 | Self::identify_port(&url_without_endpoint)? 46 | } else { 47 | match protocol { 48 | ConnectionProtocol::Https => 443, 49 | ConnectionProtocol::Http => 80, 50 | } 51 | }; 52 | 53 | Ok(Self { 54 | protocol, 55 | host, 56 | port, 57 | endpoint, 58 | }) 59 | } 60 | 61 | fn identify_and_remove_protocol( 62 | url: &str, 63 | ) -> Result<(String, ConnectionProtocol), TrackerUrlError> { 64 | let mut splitted_url = url.split("://"); 65 | 66 | let protocol = match splitted_url.next() { 67 | Some(protocol_name) => { 68 | if protocol_name == "http" { 69 | ConnectionProtocol::Http 70 | } else if protocol_name == "https" { 71 | ConnectionProtocol::Https 72 | } else { 73 | return Err(TrackerUrlError::UnsupportedConnectionProtocol); 74 | } 75 | } 76 | None => return Err(TrackerUrlError::InvalidTrackerURL), 77 | }; 78 | 79 | match splitted_url.next() { 80 | Some(url_without_protocol) => Ok((url_without_protocol.to_string(), protocol)), 81 | None => Err(TrackerUrlError::InvalidTrackerURL), 82 | } 83 | } 84 | 85 | fn identify_and_remove_endpoint(url: &str) -> Result<(String, String), TrackerUrlError> { 86 | let mut splitted_url = url.split('/'); 87 | 88 | let url_without_endpoint = match splitted_url.next() { 89 | Some(url_without_endpoint) => url_without_endpoint, 90 | None => return Err(TrackerUrlError::InvalidTrackerURL), 91 | }; 92 | 93 | match splitted_url.next() { 94 | Some(endpoint) => Ok((url_without_endpoint.to_string(), endpoint.to_string())), 95 | None => Err(TrackerUrlError::InvalidTrackerURL), 96 | } 97 | } 98 | 99 | fn identify_host(url: &str) -> Result { 100 | match url.split(':').next() { 101 | Some(host) => Ok(host.to_string()), 102 | None => Err(TrackerUrlError::InvalidTrackerURL), 103 | } 104 | } 105 | 106 | fn identify_port(url: &str) -> Result { 107 | match url.split(':').last() { 108 | Some(port) => match port.parse() { 109 | Ok(port_number) => Ok(port_number), 110 | Err(_) => Err(TrackerUrlError::InvalidPortNumber), 111 | }, 112 | None => Err(TrackerUrlError::InvalidTrackerURL), 113 | } 114 | } 115 | } 116 | 117 | #[cfg(test)] 118 | mod tests { 119 | use super::*; 120 | 121 | #[test] 122 | fn test_https_no_port() { 123 | let url = String::from("https://www.example.org/ann"); 124 | let parsed_tracker_url = TrackerUrl::parse(&url).unwrap(); 125 | 126 | assert_eq!(ConnectionProtocol::Https, parsed_tracker_url.protocol); 127 | assert_eq!("www.example.org", parsed_tracker_url.host); 128 | assert_eq!(443, parsed_tracker_url.port); 129 | assert_eq!("ann", parsed_tracker_url.endpoint); 130 | } 131 | 132 | #[test] 133 | fn test_http_no_port() { 134 | let url = String::from("http://www.example.org/ann"); 135 | let parsed_tracker_url = TrackerUrl::parse(&url).unwrap(); 136 | 137 | assert_eq!(ConnectionProtocol::Http, parsed_tracker_url.protocol); 138 | assert_eq!("www.example.org", parsed_tracker_url.host); 139 | assert_eq!(80, parsed_tracker_url.port); 140 | assert_eq!("ann", parsed_tracker_url.endpoint); 141 | } 142 | 143 | #[test] 144 | fn test_http_with_port() { 145 | let url = String::from("http://www.example.org:1337/ann"); 146 | let parsed_tracker_url = TrackerUrl::parse(&url).unwrap(); 147 | 148 | assert_eq!(ConnectionProtocol::Http, parsed_tracker_url.protocol); 149 | assert_eq!("www.example.org", parsed_tracker_url.host); 150 | assert_eq!(1337, parsed_tracker_url.port); 151 | assert_eq!("ann", parsed_tracker_url.endpoint); 152 | } 153 | 154 | #[test] 155 | fn test_https_with_port() { 156 | let url = String::from("https://www.example.org:1337/ann"); 157 | let parsed_tracker_url = TrackerUrl::parse(&url).unwrap(); 158 | 159 | assert_eq!(ConnectionProtocol::Https, parsed_tracker_url.protocol); 160 | assert_eq!("www.example.org", parsed_tracker_url.host); 161 | assert_eq!(1337, parsed_tracker_url.port); 162 | assert_eq!("ann", parsed_tracker_url.endpoint); 163 | } 164 | 165 | #[test] 166 | fn test_invalid_protocol() { 167 | let url = String::from("udp://www.example.org:1337/ann"); 168 | 169 | assert_eq!( 170 | TrackerUrl::parse(&url), 171 | Err(TrackerUrlError::UnsupportedConnectionProtocol) 172 | ); 173 | } 174 | 175 | #[test] 176 | fn test_invalid_port() { 177 | let url = String::from("https://www.example.org:12a/ann"); 178 | assert_eq!( 179 | TrackerUrl::parse(&url), 180 | Err(TrackerUrlError::InvalidPortNumber) 181 | ); 182 | } 183 | 184 | #[test] 185 | fn test_missing_path() { 186 | let url = String::from("https://www.example.org:123"); 187 | assert_eq!( 188 | TrackerUrl::parse(&url), 189 | Err(TrackerUrlError::InvalidTrackerURL) 190 | ); 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /dtorrent/src/tracker/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod http; 2 | pub mod tracker_handler; 3 | pub mod tracker_response; 4 | -------------------------------------------------------------------------------- /dtorrent/src/tracker/tracker_handler.rs: -------------------------------------------------------------------------------- 1 | use super::http::http_handler::{HttpHandler, HttpHandlerError}; 2 | use super::http::query_params::QueryParams; 3 | use super::http::url_parser::{ConnectionProtocol, TrackerUrl, TrackerUrlError}; 4 | use super::tracker_response::FromTrackerResponseError; 5 | use crate::torrent_parser::torrent::Torrent; 6 | use crate::tracker::tracker_response::TrackerResponse; 7 | 8 | /// `TrackerHandler` struct for communicating to a bt tracker. 9 | /// 10 | /// To create a new `TrackerHandler` use the method builder `new()`. 11 | /// 12 | /// To get the tracker's peer list use the method `get_peer_list()`. 13 | #[derive(Debug)] 14 | pub struct TrackerHandler { 15 | pub torrent: Torrent, 16 | pub tracker_url: TrackerUrl, 17 | pub client_port: u32, 18 | client_peer_id: String, 19 | } 20 | /// Posible `TrackerHandler` errors. 21 | #[derive(Debug)] 22 | pub enum TrackerHandlerError { 23 | HttpHandlerError(HttpHandlerError), 24 | FromTrackerResponseError(FromTrackerResponseError), 25 | UrlParseError(TrackerUrlError), 26 | } 27 | 28 | impl TrackerHandler { 29 | /// Builds a new `TrackerHandler` from a **Torrent** and a **client_port** passed by paramaters. 30 | /// 31 | /// It returns an `TrackerHandlerError` if: 32 | /// - There was an error parsing the torrent's announce_url. 33 | pub fn new( 34 | torrent: Torrent, 35 | client_port: u32, 36 | client_peer_id: String, 37 | ) -> Result { 38 | let tracker_url = match TrackerUrl::parse(torrent.announce_url.as_str()) { 39 | Ok(url) => url, 40 | Err(err) => return Err(TrackerHandlerError::UrlParseError(err)), 41 | }; 42 | 43 | Ok(Self { 44 | torrent, 45 | tracker_url, 46 | client_port, 47 | client_peer_id, 48 | }) 49 | } 50 | 51 | /// Gets the tracker's peers list. 52 | /// 53 | /// On success it returns a `TrackerResponse` struct cointaining the tracker's response. 54 | /// 55 | /// It returns an `TrackerHandlerError` if: 56 | /// - There was a problem writing to the tracker. 57 | /// - There was a problem reading the tracker's response. 58 | /// - There was a problem decoding the parser response. 59 | pub fn get_peers_list(&self) -> Result { 60 | let query_params = QueryParams::new( 61 | self.torrent.info_hash.clone(), 62 | self.client_port, 63 | self.torrent.info.length, 64 | self.client_peer_id.clone(), 65 | ); 66 | 67 | let http_handler = HttpHandler::new(self.tracker_url.clone(), query_params); 68 | 69 | let response = if self.tracker_url.protocol == ConnectionProtocol::Https { 70 | match http_handler.https_request() { 71 | Ok(response) => response, 72 | Err(err) => return Err(TrackerHandlerError::HttpHandlerError(err)), 73 | } 74 | } else { 75 | match http_handler.http_request() { 76 | Ok(response) => response, 77 | Err(err) => return Err(TrackerHandlerError::HttpHandlerError(err)), 78 | } 79 | }; 80 | match TrackerResponse::from(response) { 81 | Ok(tracker_response) => Ok(tracker_response), 82 | Err(err) => Err(TrackerHandlerError::FromTrackerResponseError(err)), 83 | } 84 | } 85 | } 86 | 87 | #[cfg(test)] 88 | mod tests { 89 | use crate::torrent_parser::info::Info; 90 | 91 | use super::*; 92 | 93 | #[test] 94 | fn test_get_peers_list() { 95 | let torrent = create_test_torrent( 96 | "https://torrent.ubuntu.com:443/announce", 97 | "e82753b6692c4f3f3646b055f70ee390309020e6", 98 | ); 99 | let test_port = 6969; 100 | let test_peer_id = "-qB4500-k51bMCWVA(~!".to_string(); 101 | 102 | let tracker_handler = TrackerHandler::new(torrent, test_port, test_peer_id).unwrap(); 103 | 104 | assert!(!tracker_handler.get_peers_list().unwrap().peers.is_empty()); 105 | } 106 | 107 | #[test] 108 | fn test_http_request() { 109 | let torrent = create_test_torrent( 110 | "http://vps02.net.orel.ru/announce", 111 | "f834824904be1854c89ba007c01678ff797f8dc7", 112 | ); 113 | let test_port = 6969; 114 | let test_peer_id = "-qB4500-k51bMCWVA(~!".to_string(); 115 | 116 | let tracker_handler = TrackerHandler::new(torrent, test_port, test_peer_id).unwrap(); 117 | 118 | assert!(!tracker_handler.get_peers_list().unwrap().peers.is_empty()); 119 | } 120 | 121 | // Auxiliar 122 | 123 | fn create_test_torrent(announce: &str, info_hash: &str) -> Torrent { 124 | let info = Info { 125 | length: 100, 126 | name: "test".to_string(), 127 | piece_length: 100, 128 | pieces: vec![], 129 | }; 130 | 131 | Torrent { 132 | announce_url: announce.to_string(), 133 | info, 134 | info_hash: info_hash.to_string(), 135 | } 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /dtorrent/src/tracker/tracker_response.rs: -------------------------------------------------------------------------------- 1 | use bencoder::bencode::{Bencode, BencodeError}; 2 | 3 | use crate::peer::bt_peer::{BtPeer, BtPeerError}; 4 | 5 | /// `TrackerResponse` struct containing a tracker response. 6 | /// 7 | /// To create a new `TrackerResponse` use the method builder `from()`. 8 | #[derive(Debug)] 9 | pub struct TrackerResponse { 10 | pub interval: i64, 11 | pub complete: i64, 12 | pub incomplete: i64, 13 | pub peers: Vec, 14 | } 15 | 16 | /// Posible `TrackerResponse` errors. 17 | #[derive(Debug)] 18 | pub enum FromTrackerResponseError { 19 | DecodeResponseError(BencodeError), 20 | InvalidInterval, 21 | InvalidComplete, 22 | InvalidIncomplete, 23 | InvalidPeers(BtPeerError), 24 | NotADict, 25 | NotAList, 26 | } 27 | 28 | impl TrackerResponse { 29 | /// Builds a new `TrackerResponse` decoding a bencoded Vec cointaining the tracker's response. 30 | /// 31 | /// It returns an `FromTrackerResponseError` if: 32 | /// - There was a problem decoding the parser response. 33 | /// - The bencoded response is not a dict. 34 | /// - The bencoded peers are not a list. 35 | /// - The tracker response interval is invalid. 36 | /// - The tracker response complete is invalid. 37 | /// - The tracker response incomplete is invalid. 38 | /// - The tracker response peers are invalid. 39 | pub fn from(response: Vec) -> Result { 40 | let mut interval = 0; 41 | let mut complete = 0; 42 | let mut incomplete = 0; 43 | let mut peers = Vec::new(); 44 | 45 | let decoded_res = match Bencode::decode(&response) { 46 | Ok(decoded_res) => decoded_res, 47 | Err(err) => return Err(FromTrackerResponseError::DecodeResponseError(err)), 48 | }; 49 | 50 | let d = match decoded_res { 51 | Bencode::BDict(d) => d, 52 | _ => return Err(FromTrackerResponseError::NotADict), 53 | }; 54 | 55 | for (k, v) in d.iter() { 56 | if k == b"interval" { 57 | interval = Self::create_interval(v)?; 58 | } else if k == b"complete" { 59 | complete = Self::create_complete(v)?; 60 | } else if k == b"incomplete" { 61 | incomplete = Self::create_incomplete(v)?; 62 | } else if k == b"peers" { 63 | peers = Self::create_peers(v)?; 64 | } 65 | } 66 | 67 | Ok(TrackerResponse { 68 | interval, 69 | complete, 70 | incomplete, 71 | peers, 72 | }) 73 | } 74 | 75 | fn create_interval(bencode: &Bencode) -> Result { 76 | let interval = match bencode { 77 | Bencode::BNumber(n) => *n, 78 | _ => return Err(FromTrackerResponseError::InvalidInterval), 79 | }; 80 | 81 | Ok(interval) 82 | } 83 | 84 | fn create_complete(bencode: &Bencode) -> Result { 85 | let complete = match bencode { 86 | Bencode::BNumber(n) => *n, 87 | _ => return Err(FromTrackerResponseError::InvalidComplete), 88 | }; 89 | 90 | Ok(complete) 91 | } 92 | 93 | fn create_incomplete(bencode: &Bencode) -> Result { 94 | let incomplete = match bencode { 95 | Bencode::BNumber(n) => *n, 96 | _ => return Err(FromTrackerResponseError::InvalidIncomplete), 97 | }; 98 | 99 | Ok(incomplete) 100 | } 101 | 102 | fn create_peers(bencode: &Bencode) -> Result, FromTrackerResponseError> { 103 | match bencode { 104 | Bencode::BList(list) => Self::create_peers_from_dict(list), 105 | Bencode::BString(str) => Self::create_peers_from_bstring(str), 106 | _ => Err(FromTrackerResponseError::NotAList), 107 | } 108 | } 109 | 110 | fn create_peers_from_dict(list: &[Bencode]) -> Result, FromTrackerResponseError> { 111 | let mut peers = Vec::new(); 112 | 113 | for p in list { 114 | let peer = BtPeer::from(p.clone()).map_err(FromTrackerResponseError::InvalidPeers)?; 115 | peers.push(peer); 116 | } 117 | 118 | Ok(peers) 119 | } 120 | 121 | fn create_peers_from_bstring(bstring: &[u8]) -> Result, FromTrackerResponseError> { 122 | Ok(bstring 123 | .chunks(6) 124 | .map(|chunk| { 125 | let ip = format!("{}.{}.{}.{}", chunk[0], chunk[1], chunk[2], chunk[3]); 126 | let port = u16::from_be_bytes([chunk[4], chunk[5]]) as i64; 127 | 128 | BtPeer::new(ip, port) 129 | }) 130 | .collect()) 131 | } 132 | } 133 | 134 | #[cfg(test)] 135 | mod tests { 136 | use super::*; 137 | use std::collections::BTreeMap; 138 | 139 | #[test] 140 | fn test_from_tracker_response() { 141 | let peer_dict = build_peer_dict(b"id1".to_vec(), b"127.0.0.1".to_vec(), 6868); 142 | let peer_dict2 = build_peer_dict(b"id2".to_vec(), b"127.0.0.2".to_vec(), 4242); 143 | 144 | let peers_list = vec![Bencode::BDict(peer_dict), Bencode::BDict(peer_dict2)]; 145 | 146 | let mut dict = BTreeMap::new(); 147 | dict.insert(b"interval".to_vec(), Bencode::BNumber(10)); 148 | dict.insert(b"complete".to_vec(), Bencode::BNumber(10)); 149 | dict.insert(b"incomplete".to_vec(), Bencode::BNumber(10)); 150 | dict.insert(b"peers".to_vec(), Bencode::BList(peers_list)); 151 | 152 | let response = Bencode::encode(&dict); 153 | let response_decoded = TrackerResponse::from(response).unwrap(); 154 | 155 | assert_eq!(response_decoded.interval, 10); 156 | assert_eq!(response_decoded.complete, 10); 157 | assert_eq!(response_decoded.incomplete, 10); 158 | assert_eq!(response_decoded.peers.len(), 2); 159 | } 160 | 161 | fn build_peer_dict(peer_id: Vec, ip: Vec, port: i64) -> BTreeMap, Bencode> { 162 | let mut peer_dict = BTreeMap::new(); 163 | peer_dict.insert(b"peer id".to_vec(), Bencode::BString(peer_id)); 164 | peer_dict.insert(b"ip".to_vec(), Bencode::BString(ip)); 165 | peer_dict.insert(b"port".to_vec(), Bencode::BNumber(port)); 166 | peer_dict 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /dtracker/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dtracker" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | chrono = "0.4" 10 | rand = "0.8.5" 11 | bencoder = {path = "../bencoder"} 12 | url_encoder = { path = "../url_encoder" } 13 | serde = { version = "1.0", features = ["derive"] } 14 | serde_json = "1.0" 15 | tracing = "0.1" 16 | tracing-subscriber = "0.3" 17 | -------------------------------------------------------------------------------- /dtracker/README.md: -------------------------------------------------------------------------------- 1 | ## Requirements 2 | 3 | To build the program it needs: 4 | 5 | - [Rust](https://www.rust-lang.org/) (and cargo) 6 | 7 | ## Running 8 | 9 | Run the program with `cargo` followed by the port on which to run the tracker: 10 | 11 | ```bash 12 | $ cargo run --bin dtracker 8080 13 | ``` 14 | 15 | ## Tests 16 | 17 | Run tests with `cargo`: 18 | 19 | ```bash 20 | $ cargo test --package dtracker 21 | ``` 22 | -------------------------------------------------------------------------------- /dtracker/config.cfg: -------------------------------------------------------------------------------- 1 | TCP_PORT=7878 2 | LOG_DIRECTORY=./dtracker_logs 3 | -------------------------------------------------------------------------------- /dtracker/src/announce/announce_request.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, num::ParseIntError, str::FromStr}; 2 | 3 | use super::announce_request_error::AnnounceRequestError; 4 | use crate::tracker_peer::event::PeerEvent; 5 | use url_encoder::url_encoder::decode; 6 | 7 | /// Struct representing the announce request to a tracker. 8 | /// 9 | /// # Fields 10 | /// * `info_hash`: urlencoded 20-byte SHA1 hash of the value of the info key from the Metainfo file. 11 | /// * `peer_id`: urlencoded 20-byte string used as a unique ID for the client, generated by the client at startup. 12 | /// * `port`: The port number the client is listening on. 13 | /// * `uploaded`: The total amount of data uploaded by the client, in bytes. 14 | /// * `downloaded`: The total amount of data downloaded by the client, in bytes. 15 | /// * `left`: The total size of the file, in bytes, that the client still has to download. 16 | /// * `compact`: If true, the response will be in compact format. 17 | /// * `no_peer_id`: If true, the response will not include the peer id. 18 | /// * `event`: *(Optional)* The event that the client is reporting. Possible values are: 19 | /// * `started`: The client has just started. 20 | /// * `stopped`: The client has just stopped. 21 | /// * `completed`: The client has just successfully downloaded the file. 22 | /// * `ip`: *(Optional)* The IP address of the client. If not present, the IP address of the client will be determined automatically. 23 | /// * `numwant`: The number of peers that the client would like to receive in the response. If absent, the client requests a default number of peers. 24 | /// * `key`: *(Optional)* The key used to identify the client. If absent, the client will be identified by its peer id. 25 | /// * `trackerid`: *(Optional)* The id of the tracker. If absent, the tracker will be identified by its IP address. 26 | #[derive(Debug, Clone)] 27 | pub struct AnnounceRequest { 28 | pub info_hash: [u8; 20], 29 | pub peer_id: [u8; 20], 30 | pub port: u16, 31 | pub uploaded: u64, 32 | pub downloaded: u64, 33 | pub left: u64, 34 | pub compact: bool, 35 | pub no_peer_id: bool, 36 | pub event: Option, 37 | pub ip: Option, 38 | pub numwant: u32, 39 | pub key: Option, 40 | pub tracker_id: Option, 41 | } 42 | 43 | const DEFAULT_NUMWANT: &str = "50"; 44 | 45 | impl AnnounceRequest { 46 | /// Creates a new AnnounceRequest from a HashMap containing the query parameters of the announce request. 47 | /// 48 | /// If the request is invalid, an error is returned. 49 | pub fn new_from(query_params: HashMap) -> Result { 50 | // Obligatory params: 51 | let info_hash = Self::get_info_hash(&query_params)?; 52 | let peer_id = Self::get_peer_id(&query_params)?; 53 | let port = Self::get_port(&query_params)?; 54 | let uploaded = Self::get_uploaded(&query_params)?; 55 | let downloaded = Self::get_downloaded(&query_params)?; 56 | let left = Self::get_left(&query_params)?; 57 | 58 | // Optional and default params: 59 | let compact = Self::get_compact(&query_params); 60 | let no_peer_id = Self::get_no_peer_id(&query_params); 61 | let event = Self::get_event(&query_params)?; 62 | let ip = Self::get_ip(&query_params); 63 | let numwant = Self::get_numwant(&query_params)?; 64 | let key = Self::get_key(&query_params); 65 | let tracker_id = Self::get_tracker_id(&query_params); 66 | 67 | Ok(Self { 68 | info_hash, 69 | peer_id, 70 | port, 71 | uploaded, 72 | downloaded, 73 | left, 74 | compact, 75 | no_peer_id, 76 | event, 77 | ip, 78 | numwant, 79 | key, 80 | tracker_id, 81 | }) 82 | } 83 | 84 | fn get_info_hash( 85 | query_params_map: &HashMap, 86 | ) -> Result<[u8; 20], AnnounceRequestError> { 87 | let info_hash = query_params_map.get("info_hash").map_or_else( 88 | || Err(AnnounceRequestError::InvalidInfoHash), 89 | |i| { 90 | Self::decode_hex(&decode(i)) 91 | .map_err(|_| AnnounceRequestError::InvalidInfoHash)? 92 | .try_into() 93 | .map_err(|_| AnnounceRequestError::InvalidInfoHash) 94 | }, 95 | )?; 96 | Ok(info_hash) 97 | } 98 | 99 | fn decode_hex(s: &str) -> Result, ParseIntError> { 100 | (0..s.len()) 101 | .step_by(2) 102 | .map(|i| u8::from_str_radix(&s[i..i + 2], 16)) 103 | .collect() 104 | } 105 | 106 | fn get_peer_id( 107 | query_params_map: &HashMap, 108 | ) -> Result<[u8; 20], AnnounceRequestError> { 109 | let peer_id = query_params_map.get("peer_id").map_or_else( 110 | || Err(AnnounceRequestError::InvalidPeerId), 111 | |i| { 112 | Self::decode_hex(&decode(i)) 113 | .map_err(|_| AnnounceRequestError::InvalidPeerId)? 114 | .try_into() 115 | .map_err(|_| AnnounceRequestError::InvalidPeerId) 116 | }, 117 | )?; 118 | Ok(peer_id) 119 | } 120 | 121 | fn get_port(query_params_map: &HashMap) -> Result { 122 | let port = query_params_map.get("port").map_or_else( 123 | || Err(AnnounceRequestError::InvalidPort), 124 | |p| { 125 | p.parse::() 126 | .map_err(|_| AnnounceRequestError::InvalidPort) 127 | }, 128 | )?; 129 | Ok(port) 130 | } 131 | 132 | fn get_uploaded( 133 | query_params_map: &HashMap, 134 | ) -> Result { 135 | let uploaded = query_params_map.get("uploaded").map_or_else( 136 | || Err(AnnounceRequestError::InvalidUploaded), 137 | |u| { 138 | u.parse::() 139 | .map_err(|_| AnnounceRequestError::InvalidUploaded) 140 | }, 141 | )?; 142 | Ok(uploaded) 143 | } 144 | 145 | fn get_downloaded( 146 | query_params_map: &HashMap, 147 | ) -> Result { 148 | let downloaded = query_params_map.get("downloaded").map_or_else( 149 | || Err(AnnounceRequestError::InvalidDownloaded), 150 | |d| { 151 | d.parse::() 152 | .map_err(|_| AnnounceRequestError::InvalidDownloaded) 153 | }, 154 | )?; 155 | Ok(downloaded) 156 | } 157 | 158 | fn get_left(query_params_map: &HashMap) -> Result { 159 | let left = query_params_map.get("left").map_or_else( 160 | || Err(AnnounceRequestError::InvalidLeft), 161 | |l| { 162 | l.parse::() 163 | .map_err(|_| AnnounceRequestError::InvalidLeft) 164 | }, 165 | )?; 166 | Ok(left) 167 | } 168 | 169 | fn get_compact(query_params_map: &HashMap) -> bool { 170 | query_params_map.get("compact").unwrap_or(&"0".to_string()) == "1" 171 | } 172 | 173 | fn get_no_peer_id(query_params_map: &HashMap) -> bool { 174 | query_params_map 175 | .get("no_peer_id") 176 | .unwrap_or(&"0".to_string()) 177 | == "1" 178 | } 179 | 180 | fn get_event( 181 | query_params_map: &HashMap, 182 | ) -> Result, AnnounceRequestError> { 183 | match query_params_map 184 | .get("event") 185 | .map(|e| PeerEvent::from_str(e).ok()) 186 | { 187 | Some(ev) => match ev { 188 | Some(ev) => Ok(Some(ev)), 189 | None => Err(AnnounceRequestError::InvalidEvent), 190 | }, 191 | None => Ok(None), 192 | } 193 | } 194 | 195 | fn get_ip(query_params_map: &HashMap) -> Option { 196 | query_params_map.get("ip").map(|ip| ip.to_string()) 197 | } 198 | 199 | fn get_numwant( 200 | query_params_map: &HashMap, 201 | ) -> Result { 202 | query_params_map 203 | .get("numwant") 204 | .unwrap_or(&DEFAULT_NUMWANT.to_string()) 205 | .parse::() 206 | .map_err(|_| AnnounceRequestError::InvalidNumwant) 207 | } 208 | 209 | fn get_key(query_params_map: &HashMap) -> Option { 210 | query_params_map.get("key").map(|s| s.to_string()) 211 | } 212 | 213 | fn get_tracker_id(query_params_map: &HashMap) -> Option { 214 | query_params_map.get("tracker_id").map(|s| s.to_string()) 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /dtracker/src/announce/announce_request_error.rs: -------------------------------------------------------------------------------- 1 | /// Possible errors that can occur when creating an AnnounceRequest. 2 | #[derive(Debug)] 3 | pub enum AnnounceRequestError { 4 | InvalidInfoHash, 5 | InvalidPeerId, 6 | InvalidPort, 7 | InvalidUploaded, 8 | InvalidDownloaded, 9 | InvalidLeft, 10 | InvalidIp, 11 | InvalidNumwant, 12 | InvalidKey, 13 | InvalidTrackerId, 14 | InvalidEvent, 15 | } 16 | 17 | impl ToString for AnnounceRequestError { 18 | fn to_string(&self) -> String { 19 | match self { 20 | AnnounceRequestError::InvalidInfoHash => "Invalid info_hash".to_string(), 21 | AnnounceRequestError::InvalidPeerId => "Invalid peer_id".to_string(), 22 | AnnounceRequestError::InvalidPort => "Invalid port".to_string(), 23 | AnnounceRequestError::InvalidUploaded => "Invalid uploaded".to_string(), 24 | AnnounceRequestError::InvalidDownloaded => "Invalid downloaded".to_string(), 25 | AnnounceRequestError::InvalidLeft => "Invalid left".to_string(), 26 | AnnounceRequestError::InvalidIp => "Invalid ip".to_string(), 27 | AnnounceRequestError::InvalidNumwant => "Invalid numwant".to_string(), 28 | AnnounceRequestError::InvalidKey => "Invalid key".to_string(), 29 | AnnounceRequestError::InvalidTrackerId => "Invalid tracker_id".to_string(), 30 | AnnounceRequestError::InvalidEvent => "Invalid event".to_string(), 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /dtracker/src/announce/announce_response.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::{BTreeMap, HashMap}, 3 | sync::Arc, 4 | }; 5 | 6 | use bencoder::bencode::ToBencode; 7 | 8 | use crate::{tracker_peer::peer::Peer, tracker_status::atomic_tracker_status::AtomicTrackerStatus}; 9 | 10 | use super::announce_request::AnnounceRequest; 11 | 12 | /// Struct representing the response of a tracker announce request. 13 | /// 14 | /// # Fields 15 | /// * `failure_reason`: If present, then no other keys may be present. The value is a human-readable error message as to why the request failed. 16 | /// * `warning_message`: Similar to failure reason, but the response still gets processed normally. The warning message is shown just like an error. 17 | /// * `interval`: Interval in seconds that the client should wait between sending regular requests to the tracker. 18 | /// * `min_interval`: Minimum announce interval. If present clients must not reannounce more frequently than this. 19 | /// * `tracker_id`: A string that the client should send back on its next announcements. If absent and a previous announce sent a tracker id, do not discard the old value; keep using it. 20 | /// * `complete`: number of peers with the entire file, i.e. seeders. 21 | /// * `incomplete`: number of non-seeder peers, aka "leechers". 22 | /// * `peers`: (dictionary model) The value is a list of dictionaries, each with the following keys: 23 | /// - **peer_id**: peer's self-selected ID, as described above for the tracker request (string) 24 | /// - **ip**: peer's IP address either IPv6 (hexed) or IPv4 (dotted quad) or DNS name (string) 25 | /// - **port**: peer's port number (integer) 26 | /// * `peers_binary`: peers: (binary model) Instead of using the dictionary model described above, the peers value may be a string consisting of multiples of 6 bytes. First 4 bytes are the IP address and last 2 bytes are the port number. All in network (big endian) notation. 27 | #[derive(Debug)] 28 | pub struct AnnounceResponse { 29 | pub failure_reason: Option, 30 | pub warning_message: Option, 31 | pub interval: u32, 32 | pub min_interval: Option, 33 | pub tracker_id: Option, 34 | pub complete: u32, 35 | pub incomplete: u32, 36 | pub peers: Vec, 37 | } 38 | 39 | impl AnnounceResponse { 40 | /// Creates a new AnnounceResponse from a HashMap containing the query parameters of the announce request. 41 | pub fn from( 42 | query_params: HashMap, 43 | tracker_status: Arc, 44 | peer_ip: String, 45 | ) -> Self { 46 | let announce_request = match AnnounceRequest::new_from(query_params) { 47 | Ok(announce_request) => announce_request, 48 | Err(announce_request_error) => { 49 | return Self::create_error_response(announce_request_error.to_string()) 50 | } 51 | }; 52 | 53 | let peer = Peer::from_request(announce_request.clone(), peer_ip); 54 | 55 | let active_peers = tracker_status.incoming_peer( 56 | announce_request.info_hash, 57 | peer, 58 | announce_request.numwant, 59 | ); 60 | 61 | // TODO: Handle announce_request.compact == true case. 62 | 63 | Self::create_success_response( 64 | active_peers.peers, 65 | active_peers.seeders, 66 | active_peers.leechers, 67 | ) 68 | } 69 | 70 | fn create_error_response(failure_reason: String) -> Self { 71 | Self { 72 | failure_reason: Some(failure_reason), 73 | warning_message: None, 74 | interval: 0, 75 | min_interval: None, 76 | tracker_id: None, 77 | complete: 0, 78 | incomplete: 0, 79 | peers: Vec::new(), 80 | } 81 | } 82 | 83 | fn create_success_response(peers_list: Vec, complete: u32, incomplete: u32) -> Self { 84 | Self { 85 | failure_reason: None, 86 | warning_message: None, 87 | interval: 0, 88 | min_interval: None, 89 | tracker_id: None, 90 | complete, 91 | incomplete, 92 | peers: peers_list, 93 | } 94 | } 95 | } 96 | 97 | impl ToBencode for AnnounceResponse { 98 | fn to_bencode(&self) -> bencoder::bencode::Bencode { 99 | let mut announce_response = BTreeMap::new(); 100 | if let Some(failure_reason) = &self.failure_reason { 101 | announce_response.insert(b"failure reason".to_vec(), failure_reason.to_bencode()); 102 | } 103 | if let Some(warning_message) = &self.warning_message { 104 | announce_response.insert(b"warning message".to_vec(), warning_message.to_bencode()); 105 | } 106 | announce_response.insert(b"interval".to_vec(), self.interval.to_bencode()); 107 | if let Some(min_interval) = &self.min_interval { 108 | announce_response.insert(b"min interval".to_vec(), min_interval.to_bencode()); 109 | } 110 | if let Some(tracker_id) = &self.tracker_id { 111 | announce_response.insert(b"tracker id".to_vec(), tracker_id.to_bencode()); 112 | } 113 | announce_response.insert(b"complete".to_vec(), self.complete.to_bencode()); 114 | announce_response.insert(b"incomplete".to_vec(), self.incomplete.to_bencode()); 115 | announce_response.insert(b"peers".to_vec(), self.peers.to_bencode()); 116 | announce_response.to_bencode() 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /dtracker/src/announce/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod announce_request; 2 | pub mod announce_request_error; 3 | pub mod announce_response; 4 | -------------------------------------------------------------------------------- /dtracker/src/bt_tracker/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod tracker; 2 | -------------------------------------------------------------------------------- /dtracker/src/bt_tracker/tracker.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::{io, thread::spawn}; 3 | 4 | use chrono::Duration; 5 | use tracing::info; 6 | 7 | use crate::{ 8 | http_server::server::Server, stats::stats_updater::StatsUpdater, 9 | tracker_status::atomic_tracker_status::AtomicTrackerStatus, 10 | }; 11 | 12 | /// Struct that represents the Tracker itself. 13 | /// 14 | /// Serves as a starting point for the application. 15 | pub struct BtTracker { 16 | server: Server, 17 | } 18 | 19 | #[derive(Debug)] 20 | pub enum BtTrackerError { 21 | CreatingServerError(io::Error), 22 | StartingServerError(io::Error), 23 | } 24 | 25 | const STATS_UPDATER_MINUTES_TIMEOUT: i64 = 1; 26 | 27 | impl BtTracker { 28 | /// Creates a new BtTracker 29 | pub fn init(port: u16) -> Result { 30 | let tracker_status = Arc::new(AtomicTrackerStatus::default()); 31 | 32 | let stats_updater = Self::spawn_stats_updater(tracker_status.clone()); 33 | 34 | let server = Server::init(tracker_status, stats_updater, port) 35 | .map_err(BtTrackerError::CreatingServerError)?; 36 | 37 | info!("Tracker started"); 38 | 39 | Ok(Self { server }) 40 | } 41 | 42 | /// Starts the server for handling requests. 43 | pub fn run(&self) -> Result<(), BtTrackerError> { 44 | self.server 45 | .serve() 46 | .map_err(BtTrackerError::StartingServerError) 47 | } 48 | 49 | fn spawn_stats_updater(tracker_status: Arc) -> Arc { 50 | let stats_updater = Arc::new(StatsUpdater::new( 51 | tracker_status, 52 | Duration::minutes(STATS_UPDATER_MINUTES_TIMEOUT), 53 | )); 54 | let updater = stats_updater.clone(); 55 | spawn(move || { 56 | updater.run(); 57 | }); 58 | stats_updater 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /dtracker/src/http/http_method.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | #[derive(Debug, PartialEq)] 4 | pub enum HttpMethod { 5 | Get, 6 | } 7 | 8 | impl FromStr for HttpMethod { 9 | type Err = (); 10 | 11 | fn from_str(s: &str) -> Result { 12 | match s { 13 | "GET" => Ok(HttpMethod::Get), 14 | _ => Err(()), 15 | } 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /dtracker/src/http/http_parser.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, str::FromStr}; 2 | 3 | use super::http_method::HttpMethod; 4 | 5 | /// A struct that represents a HTTP request. 6 | /// 7 | /// # Fields 8 | /// * `method`: The HTTP method of the request. 9 | /// * `endpoint`: The endpoint of the request. 10 | /// * `params`: The parameters of the request. 11 | pub struct Http { 12 | pub method: HttpMethod, 13 | pub endpoint: String, 14 | pub params: HashMap, 15 | } 16 | 17 | #[derive(Debug)] 18 | pub enum HttpError { 19 | ParseError, 20 | HttpMethodNotSupported, 21 | } 22 | 23 | impl Http { 24 | /// Parses a HTTP request. If the request is invalid, returns an error. 25 | pub fn parse(buffer: &[u8]) -> Result { 26 | let mut lines = buffer.split(|&b| b == b'\r'); 27 | let line = lines.next().ok_or(HttpError::ParseError)?; 28 | 29 | let mut line_split = line.split(|&b| b == b' '); 30 | let method = HttpMethod::from_str( 31 | &String::from_utf8_lossy(line_split.next().ok_or(HttpError::ParseError)?).to_string(), 32 | ) 33 | .map_err(|_| HttpError::HttpMethodNotSupported)?; 34 | 35 | let mut endpoint_split = line_split 36 | .next() 37 | .ok_or(HttpError::ParseError)? 38 | .split(|&b| b == b'?'); 39 | let endpoint = String::from_utf8_lossy(endpoint_split.next().ok_or(HttpError::ParseError)?) 40 | .to_string(); 41 | 42 | let query_params = endpoint_split.next().ok_or(HttpError::ParseError)?; 43 | let params = parse_params(query_params).map_err(|_| HttpError::ParseError)?; 44 | 45 | Ok(Http { 46 | method, 47 | endpoint, 48 | params, 49 | }) 50 | } 51 | } 52 | 53 | fn parse_params(query_params: &[u8]) -> Result, HttpError> { 54 | let mut params = HashMap::new(); 55 | let query_params = query_params.split(|&b| b == b'&'); 56 | 57 | for param in query_params { 58 | let mut param_split = param.split(|&b| b == b'='); 59 | let key = 60 | String::from_utf8_lossy(param_split.next().ok_or(HttpError::ParseError)?).to_string(); 61 | let value = 62 | String::from_utf8_lossy(param_split.next().ok_or(HttpError::ParseError)?).to_string(); 63 | params.insert(key, value); 64 | } 65 | 66 | Ok(params) 67 | } 68 | 69 | #[cfg(test)] 70 | mod tests { 71 | use super::*; 72 | 73 | #[test] 74 | fn test_parse_valid_request() { 75 | let buffer = "GET /announce?info_hash=%b1%11%81%3c%e6%0f%42%91%97%34%82%3d%f5%ec%20%bd%1e%04%e7%f7&peer_id=DTorrent:02284204893&port=6969&uploaded=0&downloaded=0&left=396361728&event=started HTTP/1.1\r\nHost: bttracker.debian.org\r\nUser-Agent: LDTorrent/0.1\r\n\r\n".as_bytes(); 76 | let http = Http::parse(buffer).unwrap(); 77 | let mut params = HashMap::new(); 78 | params.insert( 79 | "info_hash".to_string(), 80 | "%b1%11%81%3c%e6%0f%42%91%97%34%82%3d%f5%ec%20%bd%1e%04%e7%f7".to_string(), 81 | ); 82 | params.insert("peer_id".to_string(), "DTorrent:02284204893".to_string()); 83 | params.insert("port".to_string(), "6969".to_string()); 84 | params.insert("uploaded".to_string(), "0".to_string()); 85 | params.insert("downloaded".to_string(), "0".to_string()); 86 | params.insert("left".to_string(), "396361728".to_string()); 87 | params.insert("event".to_string(), "started".to_string()); 88 | 89 | assert_eq!(http.method, HttpMethod::from_str("GET").unwrap()); 90 | assert_eq!(http.endpoint, "/announce"); 91 | assert_eq!(http.params, params); 92 | } 93 | 94 | #[test] 95 | fn test_parse_request_without_record_cannot_be_parsed() { 96 | let buffer = "/announce?info_hash=%b1%11%81%3c%e6%0f%42%91%97%34%82%3d%f5%ec%20%bd%1e%04%e7%f7&peer_id=DTorrent:02284204893&port=6969&uploaded=0&downloaded=0&left=396361728&event=started HTTP/1.1\r\nHost: bttracker.debian.org\r\nUser-Agent: LDTorrent/0.1\r\n\r\n".as_bytes(); 97 | assert!(Http::parse(buffer).is_err()); 98 | } 99 | 100 | #[test] 101 | fn test_parse_request_without_query_cannot_be_parsed() { 102 | let buffer = 103 | "GET\r\nHost: bttracker.debian.org\r\nUser-Agent: LDTorrent/0.1\r\n\r\n".as_bytes(); 104 | assert!(Http::parse(buffer).is_err()); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /dtracker/src/http/http_status.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | #[derive(Debug, PartialEq)] 4 | pub enum HttpStatus { 5 | Ok, 6 | NotFound, 7 | BadRequest, 8 | } 9 | 10 | impl FromStr for HttpStatus { 11 | type Err = (); 12 | 13 | fn from_str(s: &str) -> Result { 14 | match s { 15 | "200 OK" => Ok(HttpStatus::Ok), 16 | "404 NOT FOUND" => Ok(HttpStatus::NotFound), 17 | "400 BAD REQUEST" => Ok(HttpStatus::BadRequest), 18 | _ => Err(()), 19 | } 20 | } 21 | } 22 | 23 | impl ToString for HttpStatus { 24 | fn to_string(&self) -> String { 25 | match self { 26 | Self::Ok => "200 OK".to_string(), 27 | Self::NotFound => "404 NOT FOUND".to_string(), 28 | Self::BadRequest => "400 BAD REQUEST".to_string(), 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /dtracker/src/http/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod http_method; 2 | pub mod http_parser; 3 | pub mod http_status; 4 | -------------------------------------------------------------------------------- /dtracker/src/http_server/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod request_handler; 2 | pub mod server; 3 | pub mod thread_pool; 4 | -------------------------------------------------------------------------------- /dtracker/src/http_server/request_handler.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | io::{Read, Write}, 3 | net::TcpStream, 4 | sync::Arc, 5 | }; 6 | 7 | use bencoder::bencode::Bencode; 8 | 9 | use crate::{ 10 | announce::announce_response::AnnounceResponse, 11 | http::{http_method::HttpMethod, http_parser::Http, http_status::HttpStatus}, 12 | stats::{stats_response::StatsResponse, stats_updater::StatsUpdater}, 13 | tracker_status::atomic_tracker_status::AtomicTrackerStatus, 14 | }; 15 | 16 | /// Struct that represents a connection capable of listening to requests and returning an answer. 17 | pub struct RequestHandler { 18 | pub stream: TcpStream, 19 | } 20 | 21 | #[derive(Debug)] 22 | pub enum RequestHandlerError { 23 | InvalidEndpointError, 24 | ParseHttpError, 25 | GettingPeerIpError, 26 | FromUtfError(std::string::FromUtf8Error), 27 | BadRequest, 28 | WritingResponseError, 29 | InvalidQueryParamError, 30 | InvalidStatsError, 31 | } 32 | 33 | impl RequestHandler { 34 | /// Returns a new RequestHandler. 35 | /// 36 | /// ## Arguments 37 | /// * `stream`: a TcpStream responsible of reading HTTP requests and sending a response. 38 | pub fn new(stream: TcpStream) -> RequestHandler { 39 | RequestHandler { stream } 40 | } 41 | 42 | /// Handles a HTTP request and sends back a response in a successful scenario. 43 | /// Returns a RequestHandleError in the event of a request to an invalid endpoint. 44 | /// 45 | /// ## Arguments 46 | /// * `tracker_status`: The status of the tracker at the moment of handling the request. 47 | pub fn handle( 48 | &mut self, 49 | tracker_status: Arc, 50 | stats_updater: Arc, 51 | ) -> Result<(), RequestHandlerError> { 52 | // TODO: read HTTP message length correctly 53 | let mut buf = [0; 1024]; 54 | let bytes_read = match self.stream.read(&mut buf) { 55 | Ok(bytes_read) => bytes_read, 56 | Err(_) => { 57 | self.send_bad_request()?; 58 | return Err(RequestHandlerError::BadRequest); 59 | } 60 | }; 61 | if bytes_read == 0 { 62 | self.send_bad_request()?; 63 | return Err(RequestHandlerError::BadRequest); 64 | } 65 | 66 | let http_request = match Http::parse(&buf).map_err(|_| RequestHandlerError::ParseHttpError) 67 | { 68 | Ok(http_request) => http_request, 69 | Err(_) => { 70 | self.send_bad_request()?; 71 | return Err(RequestHandlerError::BadRequest); 72 | } 73 | }; 74 | 75 | let (status_line, response) = if http_request.method.eq(&HttpMethod::Get) { 76 | let response = match http_request.endpoint.as_str() { 77 | "/announce" => { 78 | self.handle_announce(http_request, tracker_status, self.get_peer_ip()?) 79 | } 80 | "/stats" => match self.handle_stats(http_request, stats_updater) { 81 | Ok(response) => response, 82 | Err(_) => { 83 | self.send_bad_request()?; 84 | return Err(RequestHandlerError::BadRequest); 85 | } 86 | }, 87 | _ => { 88 | self.send_bad_request()?; 89 | return Err(RequestHandlerError::InvalidEndpointError); 90 | } 91 | }; 92 | (HttpStatus::Ok, response) 93 | } else { 94 | (HttpStatus::NotFound, "".as_bytes().to_vec()) 95 | }; 96 | 97 | self.send_response(response, status_line) 98 | .map_err(|_| RequestHandlerError::WritingResponseError)?; 99 | 100 | Ok(()) 101 | } 102 | 103 | fn send_bad_request(&mut self) -> Result<(), RequestHandlerError> { 104 | self.send_response("".as_bytes().to_vec(), HttpStatus::BadRequest) 105 | .map_err(|_| RequestHandlerError::WritingResponseError)?; 106 | Ok(()) 107 | } 108 | 109 | fn handle_announce( 110 | &self, 111 | http_request: Http, 112 | tracker_status: Arc, 113 | peer_ip: String, 114 | ) -> Vec { 115 | let response = AnnounceResponse::from(http_request.params, tracker_status, peer_ip); 116 | match response.failure_reason { 117 | Some(failure) => Bencode::encode(&failure), 118 | None => Bencode::encode(&response), 119 | } 120 | } 121 | 122 | fn handle_stats( 123 | &self, 124 | http_request: Http, 125 | stats_updater: Arc, 126 | ) -> Result, RequestHandlerError> { 127 | let response = StatsResponse::from(http_request.params, stats_updater) 128 | .map_err(|_| RequestHandlerError::InvalidStatsError)?; 129 | Ok(serde_json::to_string(&response) 130 | .map_err(|_| RequestHandlerError::InvalidStatsError)? 131 | .as_bytes() 132 | .to_vec()) 133 | } 134 | 135 | fn create_response(mut contents: Vec, status_line: HttpStatus) -> Vec { 136 | let response = format!( 137 | "HTTP/1.1 {}\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: {}\r\n\r\n", 138 | status_line.to_string(), 139 | contents.len(), 140 | ); 141 | let mut response = response.as_bytes().to_vec(); 142 | response.append(&mut contents); 143 | response 144 | } 145 | 146 | fn send_response(&mut self, contents: Vec, status_line: HttpStatus) -> std::io::Result<()> { 147 | let response = Self::create_response(contents, status_line); 148 | 149 | self.stream.write_all(&response)?; 150 | self.stream.flush()?; 151 | 152 | Ok(()) 153 | } 154 | 155 | fn get_peer_ip(&self) -> Result { 156 | Ok(self 157 | .stream 158 | .peer_addr() 159 | .map_err(|_| RequestHandlerError::GettingPeerIpError)? 160 | .ip() 161 | .to_string()) 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /dtracker/src/http_server/server.rs: -------------------------------------------------------------------------------- 1 | use std::{net::TcpListener, sync::Arc}; 2 | 3 | use crate::http_server::request_handler::RequestHandler; 4 | use crate::stats::stats_updater::StatsUpdater; 5 | use crate::{ 6 | http_server::thread_pool::pool::ThreadPool, 7 | tracker_status::atomic_tracker_status::AtomicTrackerStatus, 8 | }; 9 | use tracing::{error, info}; 10 | 11 | /// Struct that represents the HTTP Server that will listen to connections to the Tracker. 12 | /// 13 | /// ## Fields 14 | /// * `listener`: The TCP server binded to the socket, responsible of listening for connections. 15 | /// * `pool`: A thread pool that provides worker threads, in order to favor parallel execution. 16 | /// * `status`: Current status of the tracker. 17 | /// * `logger_sender`: To log using the Logger. 18 | pub struct Server { 19 | listener: TcpListener, 20 | pool: ThreadPool, 21 | status: Arc, 22 | stats_updater: Arc, 23 | port: u16, 24 | } 25 | 26 | impl Server { 27 | /// Creates a new `Server`. 28 | pub fn init( 29 | status: Arc, 30 | stats_updater: Arc, 31 | port: u16, 32 | ) -> std::io::Result { 33 | let listener = TcpListener::bind(format!("0.0.0.0:{}", port))?; 34 | Ok(Server { 35 | listener, 36 | pool: ThreadPool::new(1000), 37 | status, 38 | stats_updater, 39 | port, 40 | }) 41 | } 42 | 43 | /// Handles new connections to the server 44 | pub fn serve(&self) -> std::io::Result<()> { 45 | info!("Serving on http://0.0.0.0:{}", self.port); 46 | 47 | for stream in self.listener.incoming() { 48 | let stream = stream?; 49 | let mut request_handler = RequestHandler::new(stream); 50 | let status_clone = self.status.clone(); 51 | let stats_updater = self.stats_updater.clone(); 52 | let _ = self.pool.execute(move || { 53 | if let Err(error) = request_handler.handle(status_clone, stats_updater) { 54 | error!( 55 | "An error occurred while attempting to handle a request: {:?}", 56 | error 57 | ); 58 | } 59 | }); 60 | } 61 | Ok(()) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /dtracker/src/http_server/thread_pool/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod pool; 2 | pub mod worker; 3 | -------------------------------------------------------------------------------- /dtracker/src/http_server/thread_pool/pool.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{ 2 | mpsc::{self, channel, Sender}, 3 | Arc, Mutex, 4 | }; 5 | 6 | use crate::http_server::thread_pool::worker::{Message, Worker}; 7 | use tracing::{error, info}; 8 | 9 | pub enum ThreadPoolError { 10 | MessageSendError(mpsc::SendError), 11 | } 12 | 13 | /// Struct that represents a thread pool that spawns a specified number of worker threads and allows to process connections concurrently. 14 | /// Each idle thread in the pool is available to handle a task. 15 | /// When a thread is done processing its task, it is returned to the pool of idle threads, ready to handle a new task. 16 | pub struct ThreadPool { 17 | workers: Vec, 18 | sender: Sender, 19 | } 20 | 21 | impl ThreadPool { 22 | /// Creates a new ThreadPool with a given size. 23 | /// The size is the number of threads in the pool. 24 | /// If the size is zero or a negative number, the `new` function will panic. 25 | pub fn new(size: usize) -> ThreadPool { 26 | assert!(size > 0); 27 | 28 | let (sender, receiver) = channel(); 29 | 30 | let receiver = Arc::new(Mutex::new(receiver)); 31 | 32 | let mut workers = Vec::with_capacity(size); 33 | 34 | for id in 0..size { 35 | workers.push(Worker::new(id, Arc::clone(&receiver))); 36 | } 37 | 38 | ThreadPool { workers, sender } 39 | } 40 | 41 | /// Receives a closure and assigns it to a thread in the pool to run. 42 | pub fn execute(&self, closure: F) -> Result<(), ThreadPoolError> 43 | where 44 | F: FnOnce() + Send + 'static, 45 | { 46 | let job = Box::new(closure); 47 | 48 | self.sender 49 | .send(Message::NewJob(job)) 50 | .map_err(ThreadPoolError::MessageSendError)?; 51 | 52 | Ok(()) 53 | } 54 | } 55 | 56 | impl Drop for ThreadPool { 57 | fn drop(&mut self) { 58 | info!("Sending terminate message to all workers."); 59 | 60 | for _ in &self.workers { 61 | if self.sender.send(Message::Terminate).is_err() { 62 | error!("An error occurred while attempting to drop the thread pool."); 63 | }; 64 | } 65 | 66 | info!("Shutting down all workers."); 67 | 68 | for worker in &mut self.workers { 69 | info!("Shutting down worker {}", worker.id); 70 | if let Some(thread) = worker.thread.take() { 71 | if thread.join().is_err() { 72 | error!("An error occurred while attempting to join a thread pool thread."); 73 | }; 74 | } 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /dtracker/src/http_server/thread_pool/worker.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | sync::{mpsc::Receiver, Arc, Mutex}, 3 | thread, 4 | }; 5 | 6 | use tracing::info; 7 | 8 | type Job = Box; 9 | 10 | pub enum Message { 11 | NewJob(Job), 12 | Terminate, 13 | } 14 | /// Struct responsible for sending code from the ThreadPool to a Thread. 15 | pub struct Worker { 16 | // TODO: solve public attributes 17 | pub id: usize, 18 | pub thread: Option>, 19 | } 20 | 21 | impl Worker { 22 | /// Returns a new Worker instance that holds the `id` and a thread spawned with an empty closure. 23 | pub fn new(id: usize, receiver: Arc>>) -> Worker { 24 | let thread = thread::spawn(move || loop { 25 | while let Ok(message) = receiver.lock().unwrap().recv() { 26 | // unwrap is safe because we are the only one using the Receiver. 27 | match message { 28 | Message::NewJob(job) => { 29 | info!("Worker {} got a job; executing.", id); 30 | job(); 31 | } 32 | Message::Terminate => { 33 | info!("Worker {} was told to terminate.", id); 34 | break; 35 | } 36 | } 37 | } 38 | }); 39 | 40 | Worker { 41 | id, 42 | thread: Some(thread), 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /dtracker/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod announce; 2 | pub mod bt_tracker; 3 | pub mod http; 4 | pub mod http_server; 5 | pub mod stats; 6 | pub mod torrent_swarm; 7 | pub mod tracker_peer; 8 | pub mod tracker_status; 9 | -------------------------------------------------------------------------------- /dtracker/src/main.rs: -------------------------------------------------------------------------------- 1 | use dtracker::bt_tracker::tracker::BtTracker; 2 | use std::env; 3 | use tracing::error; 4 | 5 | fn main() { 6 | // install global collector configured based on RUST_LOG env var. 7 | tracing_subscriber::fmt::init(); 8 | 9 | if env::args().count() != 2 { 10 | return error!("Incorrect number of arguments. Only a port number should be passed"); 11 | }; 12 | let port = match env::args().last().unwrap() { 13 | s if s.parse::().is_ok() => s.parse::().unwrap(), 14 | _ => return error!("Invalid port number"), 15 | }; 16 | 17 | match BtTracker::init(port) { 18 | Ok(tracker) => match tracker.run() { 19 | Ok(_) => (), 20 | Err(e) => error!("Error: {:?}", e), 21 | }, 22 | Err(error) => { 23 | error!("Error: {:?}", error); 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /dtracker/src/stats/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod stats_response; 2 | pub mod stats_updater; 3 | -------------------------------------------------------------------------------- /dtracker/src/stats/stats_response.rs: -------------------------------------------------------------------------------- 1 | use super::stats_updater::StatsUpdater; 2 | use crate::tracker_status::current_tracker_stats::CurrentTrackerStats; 3 | use chrono::Duration; 4 | use serde::{Deserialize, Serialize}; 5 | use std::{collections::HashMap, sync::Arc}; 6 | 7 | /// Struct that represents the response of the stats request. 8 | /// 9 | /// ## Fields 10 | /// * `bucket_size_in_minutes`: The time interval in minutes of the bucket. 11 | /// * `content`: A `Vec` containing the history of the stats. 12 | #[derive(Debug, Serialize, Deserialize)] 13 | pub struct StatsResponse { 14 | pub bucket_size_in_minutes: i64, 15 | pub content: Vec, 16 | } 17 | 18 | /// Posible stats request errors. 19 | pub enum StatsResponseError { 20 | InvalidQueryParamError, 21 | } 22 | 23 | impl StatsResponse { 24 | /// Creates a new `StatsResponse` from the query parameters and a StatsUpdater. If the query parameters are invalid, an `InvalidQueryParamError` is returned. 25 | /// 26 | /// ## Returns 27 | /// * `Result`: The response of the stats request. 28 | pub fn from( 29 | query_params: HashMap, 30 | stats_updater: Arc, 31 | ) -> Result { 32 | let since_in_hours = query_params 33 | .get("since") 34 | .ok_or(StatsResponseError::InvalidQueryParamError)? 35 | .parse::() 36 | .map_err(|_| StatsResponseError::InvalidQueryParamError)?; 37 | 38 | let history = stats_updater.get_history(Duration::hours(since_in_hours as i64)); 39 | 40 | Ok(Self { 41 | bucket_size_in_minutes: stats_updater.get_timeout().num_minutes(), 42 | content: history, 43 | }) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /dtracker/src/stats/stats_updater.rs: -------------------------------------------------------------------------------- 1 | use chrono::Duration; 2 | use std::sync::{Mutex, MutexGuard}; 3 | use std::{sync::Arc, thread::sleep}; 4 | 5 | use crate::tracker_status::atomic_tracker_status::AtomicTrackerStatus; 6 | use crate::tracker_status::current_tracker_stats::CurrentTrackerStats; 7 | use tracing::{info, warn}; 8 | 9 | // for 1 month it takes 0.5 miliseconds to update the stats. And 0.5 Megabytes to store the stats. 10 | const MAX_DAYS_TO_KEEP_STATS: u64 = 30; 11 | 12 | /// Struct that represents the current status of the stats. 13 | #[derive(Debug)] 14 | pub struct StatsUpdater { 15 | stats_history: Mutex>, 16 | duration: chrono::Duration, 17 | tracker_status: Arc, 18 | } 19 | 20 | impl StatsUpdater { 21 | /// Creates a new `StatsUpdater`. 22 | pub fn new(tracker_status: Arc, timeout: Duration) -> Self { 23 | Self { 24 | duration: timeout, 25 | tracker_status, 26 | stats_history: Mutex::new(Vec::new()), 27 | } 28 | } 29 | 30 | /// Starts the loop that updates the stats every `duration` seconds and saves them in the history. 31 | pub fn run(&self) { 32 | loop { 33 | self.tracker_status.remove_inactive_peers(); 34 | let mut stats_history = self.lock_stats_history(); 35 | 36 | // If we reached the maximum number of days to keep stats, remove the oldest one. 37 | let max_secs_to_keep_stats = MAX_DAYS_TO_KEEP_STATS * 24 * 60 * 60; 38 | if self.duration.num_seconds() * stats_history.len() as i64 39 | > max_secs_to_keep_stats as i64 40 | { 41 | stats_history.rotate_left(1); 42 | stats_history.pop(); 43 | } 44 | 45 | stats_history.push(self.tracker_status.get_global_statistics()); 46 | info!("Stats updated"); 47 | let std_duration = match self.duration.to_std() { 48 | Ok(std_duration) => std_duration, 49 | Err(_) => { 50 | warn!("Error converting duration to std::time::Duration"); 51 | continue; 52 | } 53 | }; 54 | // Drop lock before sleeping. 55 | drop(stats_history); 56 | sleep(std_duration); 57 | } 58 | } 59 | 60 | /// Gets the history of the stats since a given time. If the is less than `since` histories, all the histories are returned. 61 | /// 62 | /// ## Returns 63 | /// * `Vec`: The history of the stats. The total number of torrents, seeders and leechers at a given time. 64 | pub fn get_history(&self, since: chrono::Duration) -> Vec { 65 | let stats_history = self.lock_stats_history(); 66 | let since_secs = since.num_seconds(); 67 | let timeout_secs = self.duration.num_seconds(); 68 | 69 | let number_of_histories_wanted = since_secs / timeout_secs; 70 | 71 | if number_of_histories_wanted > stats_history.len() as i64 { 72 | return stats_history.clone(); 73 | } 74 | stats_history[stats_history.len() - number_of_histories_wanted as usize..].to_vec() 75 | } 76 | 77 | /// Gets the duration timeout of the stats. 78 | pub fn get_timeout(&self) -> chrono::Duration { 79 | self.duration 80 | } 81 | 82 | fn lock_stats_history(&self) -> MutexGuard> { 83 | self.stats_history.lock().unwrap() // unwrap is safe because we are the only one who can modify the stats_history 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /dtracker/src/torrent_swarm/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod swarm; 2 | -------------------------------------------------------------------------------- /dtracker/src/torrent_swarm/swarm.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use chrono::{Duration, Local}; 4 | use rand::{seq::IteratorRandom, thread_rng}; 5 | 6 | use crate::tracker_peer::peer::Peer; 7 | 8 | type PeerId = [u8; 20]; 9 | 10 | /// Struct that represents the status of a torrent. 11 | /// 12 | /// ## Fields 13 | /// * `peer_timeout`: The time after which a peer is considered as inactive. 14 | /// * `seeders`: The current amount of seeders of the torrent. 15 | /// * `leechers`: The current amount of leechers of the torrent. 16 | #[derive(Debug, Clone)] 17 | pub struct Swarm { 18 | peers: HashMap, 19 | peer_timeout: Duration, 20 | seeders: u32, 21 | leechers: u32, 22 | } 23 | 24 | /// Struct that represents the response to an active peers request. 25 | /// 26 | /// ## Fields 27 | /// * `peers`: The current peers of the swarm. 28 | /// * `seeders`: The current amount of seeders of the swarm. 29 | /// * `leechers`: The current amount of leechers of the swarm. 30 | #[derive(Debug, Clone)] 31 | pub struct ActivePeers { 32 | pub peers: Vec, 33 | pub seeders: u32, 34 | pub leechers: u32, 35 | } 36 | 37 | impl Swarm { 38 | /// Creates a new swarm. 39 | /// 40 | /// ## Arguments 41 | /// * `peer_timeout`: The timeout for a peer to be considered inactive. 42 | pub fn new(peer_timeout: Duration) -> Self { 43 | Self { 44 | peers: HashMap::new(), 45 | peer_timeout, 46 | seeders: 0, 47 | leechers: 0, 48 | } 49 | } 50 | 51 | pub fn announce(&mut self, incoming_peer: Peer) { 52 | let old_peer = self.peers.insert(incoming_peer.id, incoming_peer.clone()); 53 | // If the peer was already in the swarm, we update it accordingly. 54 | 55 | if let Some(old_peer) = old_peer { 56 | if old_peer.is_leecher() { 57 | self.leechers -= 1; 58 | } else { 59 | self.seeders -= 1; 60 | } 61 | }; 62 | 63 | if incoming_peer.is_leecher() { 64 | self.leechers += 1; 65 | } else { 66 | self.seeders += 1; 67 | } 68 | } 69 | /// Returns an `ActivePeers` Struct containing a vector of active peers, the amount of seeders in the swarm and the amount of leechers in the swarm. 70 | /// 71 | /// ## Arguments 72 | /// * `wanted_peers`: The amount of active peers to include in the vector, unless the swarm does not contain as many active peers, in which case it equals the number of elements available. 73 | pub fn get_active_peers(&self, wanted_peers: u32) -> ActivePeers { 74 | let peers = self.peers.values().cloned(); 75 | 76 | let mut rng = thread_rng(); 77 | let active_peers = peers 78 | .into_iter() 79 | .choose_multiple(&mut rng, wanted_peers as usize); 80 | 81 | ActivePeers { 82 | peers: active_peers, 83 | seeders: self.seeders, 84 | leechers: self.leechers, 85 | } 86 | } 87 | 88 | /// Returns the current amount of seeders and leechers in the swarm. 89 | pub fn get_current_seeders_and_leechers(&self) -> (u32, u32) { 90 | (self.seeders, self.leechers) 91 | } 92 | 93 | /// Removes any inactive peers from the swarm. 94 | pub fn remove_inactive_peers(&mut self) { 95 | self.peers.retain(|_, peer| { 96 | let last_seen = peer.get_last_seen(); 97 | if Local::now().signed_duration_since(last_seen) > self.peer_timeout { 98 | if peer.is_leecher() { 99 | self.leechers -= 1; 100 | } else { 101 | self.seeders -= 1; 102 | } 103 | false 104 | } else { 105 | true 106 | } 107 | }); 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /dtracker/src/tracker_peer/event.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | /// Posible announce events that can be sent to the tracker. 4 | /// 5 | /// ## Fields 6 | /// * `started`: The peer has started downloading the torrent. 7 | /// * `stopped`: The peer has stopped downloading the torrent. 8 | /// * `completed`: The peer has completed downloading the torrent. 9 | #[derive(Debug, Clone, PartialEq, Eq)] 10 | pub enum PeerEvent { 11 | Started, 12 | Stopped, 13 | Completed, 14 | } 15 | 16 | impl FromStr for PeerEvent { 17 | type Err = (); 18 | fn from_str(s: &str) -> Result { 19 | match s { 20 | "started" => Ok(PeerEvent::Started), 21 | "stopped" => Ok(PeerEvent::Stopped), 22 | "completed" => Ok(PeerEvent::Completed), 23 | _ => Err(()), 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /dtracker/src/tracker_peer/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod event; 2 | pub mod peer; 3 | pub mod peer_status; 4 | -------------------------------------------------------------------------------- /dtracker/src/tracker_peer/peer.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use bencoder::bencode::ToBencode; 4 | use chrono::{DateTime, Local}; 5 | 6 | use crate::announce::announce_request::AnnounceRequest; 7 | 8 | use super::{event::PeerEvent, peer_status::PeerStatus}; 9 | 10 | /// Struct that represents a peer. 11 | /// 12 | /// ## Fields 13 | /// * `id`: The id of the peer. 14 | /// * `ip`: The ip of the peer. 15 | /// * `port`: The port of the peer. 16 | /// * `status`: The current status of the peer. 17 | /// * `key`: The key to use to differentiate between other peers *(Optional)*. 18 | #[derive(Debug, Clone)] 19 | pub struct Peer { 20 | pub id: [u8; 20], 21 | pub ip: String, 22 | pub port: u16, 23 | pub status: PeerStatus, 24 | pub key: Option, //link a wiki.theory.org: https://bit.ly/3aTXQ3u 25 | } 26 | impl Peer { 27 | /// Creates a new peer. 28 | pub fn new( 29 | id: [u8; 20], 30 | ip: String, 31 | port: u16, 32 | key: Option, 33 | status: PeerStatus, 34 | ) -> Peer { 35 | Peer { 36 | id, 37 | ip, 38 | port, 39 | status, 40 | key, 41 | } 42 | } 43 | 44 | /// Creates a new peer from an AnnounceRequest. 45 | pub fn from_request(request: AnnounceRequest, peer_ip: String) -> Self { 46 | let id = request.peer_id; 47 | let ip = match request.ip { 48 | Some(ip) => ip, 49 | None => peer_ip, 50 | }; 51 | let port = request.port; 52 | let key = request.key; 53 | 54 | let status = PeerStatus::new( 55 | request.uploaded, 56 | request.downloaded, 57 | request.left, 58 | request.event, 59 | ); 60 | 61 | Self::new(id, ip, port, key, status) 62 | } 63 | 64 | pub fn get_last_seen(&self) -> DateTime { 65 | self.status.last_seen() 66 | } 67 | 68 | /// Returns `true` if the given peer is acting as a leecher, `false` on the contrary. 69 | pub fn is_leecher(&self) -> bool { 70 | self.status.left > 0 71 | || (self.status.event != Some(PeerEvent::Completed) && self.status.event != None) 72 | } 73 | /// Returns `true` if the given peer is acting as a seeder, `false` on the contrary. 74 | pub fn is_seeder(&self) -> bool { 75 | self.status.left == 0 || self.status.event == Some(PeerEvent::Completed) 76 | } 77 | } 78 | 79 | impl ToBencode for Peer { 80 | fn to_bencode(&self) -> bencoder::bencode::Bencode { 81 | let mut peer = BTreeMap::new(); 82 | peer.insert(b"peer_id".to_vec(), self.id.to_vec().to_bencode()); 83 | peer.insert(b"ip".to_vec(), self.ip.to_bencode()); 84 | peer.insert(b"port".to_vec(), self.port.to_bencode()); 85 | peer.to_bencode() 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /dtracker/src/tracker_peer/peer_status.rs: -------------------------------------------------------------------------------- 1 | use super::event::PeerEvent; 2 | use chrono::{DateTime, Local}; 3 | 4 | /// Struct that represents a peer status. 5 | /// 6 | /// ## Fields 7 | /// * `uploaded`: The number of bytes uploaded by the peer. 8 | /// * `downloaded`: The number of bytes downloaded by the peer. 9 | /// * `left`: The number of bytes left to download. 10 | /// * `event`: The last event that the peer has sent *(Optional)*. 11 | /// * `last_seen`: The last time the peer status was updated. 12 | #[derive(Debug, Clone)] 13 | pub struct PeerStatus { 14 | pub uploaded: u64, 15 | pub downloaded: u64, 16 | pub left: u64, 17 | pub event: Option, 18 | pub last_seen: DateTime, 19 | } 20 | 21 | impl PeerStatus { 22 | /// Creates a new peer status. 23 | pub fn new(uploaded: u64, downloaded: u64, left: u64, event: Option) -> PeerStatus { 24 | PeerStatus { 25 | uploaded, 26 | downloaded, 27 | left, 28 | event, 29 | last_seen: Local::now(), 30 | } 31 | } 32 | 33 | pub fn last_seen(&self) -> DateTime { 34 | self.last_seen 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /dtracker/src/tracker_status/atomic_tracker_status.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | sync::{Mutex, MutexGuard}, 4 | }; 5 | 6 | use chrono::Duration; 7 | 8 | use crate::{ 9 | torrent_swarm::swarm::{ActivePeers, Swarm}, 10 | tracker_peer::peer::Peer, 11 | }; 12 | 13 | use super::current_tracker_stats::CurrentTrackerStats; 14 | 15 | const PEER_HOURS_TIMEOUT: i64 = 1; 16 | type InfoHash = [u8; 20]; 17 | 18 | /// Struct that represents the current status of the tracker. 19 | /// 20 | /// ## Fields 21 | /// * `torrents`: The current torrents supported by the tracker. The key is the torrent `Info Hash`. The value is the `Torrent Status`. 22 | #[derive(Debug)] 23 | pub struct AtomicTrackerStatus { 24 | torrent_swarms: Mutex>, 25 | } 26 | 27 | impl Default for AtomicTrackerStatus { 28 | /// Creates a new tracker status. 29 | fn default() -> Self { 30 | AtomicTrackerStatus { 31 | torrent_swarms: Mutex::new(HashMap::new()), 32 | } 33 | } 34 | } 35 | 36 | impl AtomicTrackerStatus { 37 | /// Adds or updates a peer for a torrent in the tracker status and returns an `ActivePeers` struct. 38 | /// 39 | /// ## Arguments 40 | /// * `info_hash`: The info hash of the torrent. 41 | /// * `peer`: The peer to add or update. 42 | /// * `numwant`: The number of peers wanted by the client. 43 | /// 44 | /// ## Returns 45 | /// * `ActivePeers`: Struct containing the peers of the torrent requested, the number of seeders and leechers. 46 | pub fn incoming_peer(&self, info_hash: InfoHash, peer: Peer, wanted_peers: u32) -> ActivePeers { 47 | let mut swarms = self.lock_swarms(); 48 | let torrent_swarm = swarms 49 | .entry(info_hash) 50 | .or_insert_with(|| Swarm::new(Duration::hours(PEER_HOURS_TIMEOUT))); 51 | 52 | torrent_swarm.announce(peer); 53 | 54 | torrent_swarm.get_active_peers(wanted_peers) 55 | } 56 | 57 | /// Gets the current statistics of the tracker. 58 | /// 59 | /// ## Returns 60 | /// * `CurrentTrackerStats`: Struct containing the total number of torrents, seeders and leechers. 61 | pub fn get_global_statistics(&self) -> CurrentTrackerStats { 62 | let swarms = self.lock_swarms(); 63 | 64 | let total_torrents = swarms.len() as u32; 65 | let mut global_seeders = 0; 66 | let mut global_leechers = 0; 67 | 68 | for swarm in swarms.values() { 69 | let (seeders, leechers) = swarm.get_current_seeders_and_leechers(); 70 | global_seeders += seeders; 71 | global_leechers += leechers; 72 | } 73 | 74 | CurrentTrackerStats::new(total_torrents, global_seeders, global_leechers) 75 | } 76 | 77 | /// Removes any inactive peers from each swarm. 78 | pub fn remove_inactive_peers(&self) { 79 | for swarm in self.lock_swarms().values_mut() { 80 | swarm.remove_inactive_peers(); 81 | } 82 | } 83 | 84 | fn lock_swarms(&self) -> MutexGuard> { 85 | self.torrent_swarms.lock().unwrap() // Unwrap is safe here because we're the only ones who call this function. 86 | } 87 | } 88 | 89 | #[cfg(test)] 90 | mod tests { 91 | use std::ops::Sub; 92 | 93 | use chrono::Local; 94 | 95 | use crate::tracker_peer::peer_status::PeerStatus; 96 | 97 | use super::*; 98 | 99 | #[test] 100 | fn test_incoming_seeder() { 101 | let tracker_status = AtomicTrackerStatus::default(); 102 | let a_seeder = create_test_seeder([0; 20]); 103 | let info_hash = [0; 20]; 104 | 105 | tracker_status.incoming_peer(info_hash, a_seeder, 50); 106 | 107 | assert_there_is_only_one_seeder(&tracker_status, info_hash); 108 | } 109 | 110 | #[test] 111 | fn test_incoming_leecher() { 112 | let tracker_status = AtomicTrackerStatus::default(); 113 | let a_leecher = create_test_leecher([0; 20]); 114 | let info_hash = [0; 20]; 115 | 116 | tracker_status.incoming_peer(info_hash, a_leecher, 50); 117 | 118 | assert_there_is_only_one_leecher(&tracker_status, info_hash); 119 | } 120 | 121 | #[test] 122 | fn test_multiple_incoming_peers_on_the_same_torrent() { 123 | let tracker_status = AtomicTrackerStatus::default(); 124 | let a_peer = create_test_seeder([0; 20]); 125 | let another_peer = create_test_leecher([1; 20]); 126 | let info_hash = [0; 20]; 127 | 128 | tracker_status.incoming_peer(info_hash, a_peer, 50); 129 | tracker_status.incoming_peer(info_hash, another_peer, 50); 130 | 131 | assert_there_are_only_these_peers(&tracker_status, info_hash, 1, 1); 132 | } 133 | 134 | #[test] 135 | fn test_returning_peer() { 136 | let tracker_status = AtomicTrackerStatus::default(); 137 | let peer_id = [0; 20]; 138 | let a_peer = create_test_leecher(peer_id); 139 | let info_hash = [0; 20]; 140 | 141 | tracker_status.incoming_peer(info_hash, a_peer, 50); 142 | tracker_status.incoming_peer(info_hash, create_test_seeder(peer_id), 50); 143 | 144 | assert_there_is_only_one_seeder(&tracker_status, info_hash); 145 | } 146 | 147 | #[test] 148 | fn test_peers_on_multiple_torrents() { 149 | let tracker_status = AtomicTrackerStatus::default(); 150 | let a_peer = create_test_leecher([0; 20]); 151 | let another_peer = create_test_leecher([1; 20]); 152 | let an_info_hash = [0; 20]; 153 | let another_info_hash = [1; 20]; 154 | 155 | tracker_status.incoming_peer(an_info_hash, a_peer, 50); 156 | tracker_status.incoming_peer(another_info_hash, another_peer, 50); 157 | 158 | assert_there_is_only_one_leecher(&tracker_status, an_info_hash); 159 | assert_there_is_only_one_leecher(&tracker_status, another_info_hash); 160 | } 161 | 162 | #[test] 163 | fn test_peer_can_get_inactive() { 164 | let tracker_status = AtomicTrackerStatus::default(); 165 | let peer_id = [0; 20]; 166 | let a_peer = create_test_seeder(peer_id); 167 | let an_info_hash = [0; 20]; 168 | tracker_status.incoming_peer(an_info_hash, a_peer, 50); 169 | 170 | let inactive_peer = create_inactive_peer(peer_id); 171 | tracker_status.incoming_peer(an_info_hash, inactive_peer, 50); 172 | tracker_status.remove_inactive_peers(); 173 | 174 | assert_there_are_only_these_peers(&tracker_status, an_info_hash, 0, 0); 175 | } 176 | 177 | fn assert_there_are_only_these_peers( 178 | status: &AtomicTrackerStatus, 179 | info_hash: [u8; 20], 180 | expected_seeders: u32, 181 | expected_leechers: u32, 182 | ) { 183 | let (active_peers, seeders, leechers) = 184 | get_active_peers_for(status, info_hash, 50).unwrap(); 185 | assert_eq!( 186 | active_peers.len(), 187 | (expected_seeders + expected_leechers) as usize 188 | ); 189 | assert_eq!(seeders, expected_seeders); 190 | assert_eq!(leechers, expected_leechers); 191 | } 192 | 193 | fn assert_there_is_only_one_seeder(status: &AtomicTrackerStatus, info_hash: [u8; 20]) { 194 | assert_there_are_only_these_peers(status, info_hash, 1, 0); 195 | let (active_peers, _, _) = get_active_peers_for(status, info_hash, 50).unwrap(); 196 | assert!(active_peers[0].is_seeder()); 197 | } 198 | 199 | fn assert_there_is_only_one_leecher(status: &AtomicTrackerStatus, info_hash: [u8; 20]) { 200 | assert_there_are_only_these_peers(status, info_hash, 0, 1); 201 | let (active_peers, _, _) = get_active_peers_for(status, info_hash, 50).unwrap(); 202 | assert!(active_peers[0].is_leecher()); 203 | } 204 | 205 | pub fn get_active_peers_for( 206 | status: &AtomicTrackerStatus, 207 | info_hash: [u8; 20], 208 | wanted_peers: u32, 209 | ) -> Option<(Vec, u32, u32)> { 210 | let all_swarms = status.lock_swarms(); 211 | let swarm = all_swarms.get(&info_hash)?; 212 | 213 | let active_peers = swarm.get_active_peers(wanted_peers); 214 | 215 | Some(( 216 | active_peers.peers, 217 | active_peers.seeders, 218 | active_peers.leechers, 219 | )) 220 | } 221 | 222 | fn create_test_seeder(peer_id: [u8; 20]) -> Peer { 223 | let peer_status = PeerStatus { 224 | uploaded: 0, 225 | downloaded: 0, 226 | left: 0, 227 | event: None, 228 | last_seen: Local::now(), 229 | }; 230 | 231 | Peer::new(peer_id, "0".to_string(), 0, None, peer_status) 232 | } 233 | 234 | fn create_test_leecher(peer_id: [u8; 20]) -> Peer { 235 | let peer_status = PeerStatus { 236 | uploaded: 0, 237 | downloaded: 0, 238 | left: 3000, 239 | event: None, 240 | last_seen: Local::now(), 241 | }; 242 | 243 | Peer::new(peer_id, "0".to_string(), 0, None, peer_status) 244 | } 245 | 246 | fn create_inactive_peer(peer_id: [u8; 20]) -> Peer { 247 | let old_date = Local::now().sub(Duration::hours(PEER_HOURS_TIMEOUT) * 2); 248 | let peer_status = PeerStatus { 249 | uploaded: 0, 250 | downloaded: 0, 251 | left: 0, 252 | event: None, 253 | last_seen: old_date, 254 | }; 255 | 256 | Peer::new(peer_id, "0".to_string(), 0, None, peer_status) 257 | } 258 | } 259 | -------------------------------------------------------------------------------- /dtracker/src/tracker_status/current_tracker_stats.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | 3 | /// Struct containing the current stats of the tracker. 4 | /// 5 | /// ## Fields 6 | /// * `torrents`: The total number of torrents in the tracker. 7 | /// * `seeders`: The total number of seeders in the tracker. 8 | /// * `leechers`: The total number of leechers in the tracker. 9 | #[derive(Debug, Clone, Copy, Serialize, Deserialize)] 10 | pub struct CurrentTrackerStats { 11 | pub torrents: u32, 12 | pub seeders: u32, 13 | pub leechers: u32, 14 | } 15 | 16 | impl CurrentTrackerStats { 17 | /// Creates a new `CurrentTrackerStats`. 18 | pub fn new(torrents: u32, seeders: u32, leechers: u32) -> Self { 19 | Self { 20 | torrents, 21 | seeders, 22 | leechers, 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /dtracker/src/tracker_status/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod atomic_tracker_status; 2 | pub mod current_tracker_stats; 3 | -------------------------------------------------------------------------------- /dtracker/templates/graph.js: -------------------------------------------------------------------------------- 1 | let chart; 2 | const trackerBaseURL = "http://127.0.0.1:8080"; 3 | 4 | const getTimeRanges = (interval_in_minutes, size) => { 5 | const ranges = []; 6 | const date = new Date(); 7 | 8 | for (let i = 0; i < size; i++) { 9 | const millis = i * interval_in_minutes * 60 * 1000; 10 | const new_date = new Date(date.getTime() - millis); 11 | ranges.push(new_date); 12 | } 13 | 14 | return ranges; 15 | }; 16 | 17 | const padArrayStartWithZeroes = (arr, len) => { 18 | return arr.concat(Array(len - arr.length).fill(0)); 19 | }; 20 | 21 | const buildLabels = (bucket_size_in_minutes, since, groupBy) => { 22 | return getTimeRanges( 23 | bucket_size_in_minutes * groupBy, 24 | (since * 60) / groupBy 25 | ); 26 | }; 27 | 28 | const buildData = (type, contentSorted, since, groupBy) => { 29 | const element = type.toLowerCase(); 30 | 31 | return padArrayStartWithZeroes( 32 | contentSorted.map((d) => d[element]).filter((_, i) => i % groupBy === 0), 33 | Math.ceil((since * 60) / groupBy) 34 | ); 35 | }; 36 | 37 | const buildDatasets = (content, since, groupBy) => { 38 | const contentSorted = content.reverse(); 39 | 40 | return [ 41 | { 42 | label: "Torrents", 43 | backgroundColor: "orange", 44 | borderColor: "orange", 45 | data: buildData("Torrents", contentSorted, since, groupBy), 46 | }, 47 | { 48 | label: "Seeders", 49 | backgroundColor: "blue", 50 | borderColor: "blue", 51 | data: buildData("Seeders", contentSorted, since, groupBy), 52 | }, 53 | { 54 | label: "Leechers", 55 | backgroundColor: "green", 56 | borderColor: "green", 57 | data: buildData("Leechers", contentSorted, since, groupBy), 58 | }, 59 | ]; 60 | }; 61 | 62 | const buildChart = (content, bucket_size_in_minutes, since, groupBy) => { 63 | const labels = buildLabels(bucket_size_in_minutes, since, groupBy); 64 | const datasets = buildDatasets( 65 | content, 66 | bucket_size_in_minutes, 67 | since, 68 | groupBy 69 | ); 70 | 71 | const data = { 72 | labels: labels, 73 | datasets: datasets, 74 | }; 75 | 76 | const config = { 77 | type: "line", 78 | data: data, 79 | options: { 80 | elements: { 81 | point: { 82 | radius: 0, 83 | }, 84 | }, 85 | interaction: { 86 | mode: "index", 87 | intersect: false, 88 | }, 89 | scales: { 90 | x: { 91 | type: "time", 92 | time: { 93 | unit: "minute", 94 | }, 95 | ticks: { 96 | major: { 97 | enabled: true, 98 | }, 99 | }, 100 | }, 101 | y: { 102 | beginAtZero: true, 103 | title: { 104 | display: true, 105 | text: "Count", 106 | }, 107 | }, 108 | }, 109 | }, 110 | }; 111 | 112 | const myChart = new Chart(document.getElementById("myChart"), config); 113 | 114 | return myChart; 115 | }; 116 | 117 | const fetchData = async (since) => { 118 | const res = await fetch(`${trackerBaseURL}/stats?since=${since}`, { 119 | method: "GET", 120 | }); 121 | return await res.json(); 122 | }; 123 | 124 | let since = 1; // default: Ultima hora 125 | let groupBy = 1; // default: Por minuto 126 | 127 | const updateChart = (res_data) => { 128 | chart.data.labels = buildLabels( 129 | res_data.bucket_size_in_minutes, 130 | since, 131 | groupBy 132 | ); 133 | 134 | const contentSorted = res_data.content.reverse(); 135 | chart.legend.legendItems.forEach((item, index) => { 136 | if (!item.hidden) { 137 | chart.data.datasets[index].data = buildData( 138 | item.text, 139 | contentSorted, 140 | since, 141 | groupBy 142 | ); 143 | } 144 | }); 145 | chart.update("none"); 146 | }; 147 | 148 | window.addEventListener("load", async (_e) => { 149 | let res_data = await fetchData(since); 150 | 151 | chart = buildChart( 152 | res_data.content, 153 | res_data.bucket_size_in_minutes, 154 | since, 155 | groupBy 156 | ); 157 | 158 | setInterval(() => { 159 | fetchData(since).then((res_data) => { 160 | updateChart(res_data); 161 | }); 162 | }, 500); 163 | }); 164 | 165 | const selectSince = document.querySelector("#since"); 166 | const selectGroupBy = document.querySelector("#groupBy"); 167 | 168 | selectSince.addEventListener("change", async (e) => { 169 | since = Number(e.target.value); 170 | const res_data = await fetchData(since); 171 | updateChart(res_data); 172 | }); 173 | 174 | selectGroupBy.addEventListener("change", async (e) => { 175 | groupBy = Number(e.target.value); 176 | const res_data = await fetchData(since); 177 | updateChart(res_data); 178 | }); 179 | -------------------------------------------------------------------------------- /dtracker/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | dTracker | Statistics 5 | 6 | 7 | 8 | 9 | 10 | 11 |
12 | 18 | 19 | 25 |
26 | 27 |
28 | 29 |
30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /torrents/file1.torrent: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lambdaclass/libtorrent-rs/2620a925104c20183ea5ebe4018f5630c88980ab/torrents/file1.torrent -------------------------------------------------------------------------------- /torrents/file2.torrent: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lambdaclass/libtorrent-rs/2620a925104c20183ea5ebe4018f5630c88980ab/torrents/file2.torrent -------------------------------------------------------------------------------- /torrents/file3.torrent: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lambdaclass/libtorrent-rs/2620a925104c20183ea5ebe4018f5630c88980ab/torrents/file3.torrent -------------------------------------------------------------------------------- /url_encoder/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "url_encoder" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | -------------------------------------------------------------------------------- /url_encoder/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod url_encoder; 2 | -------------------------------------------------------------------------------- /url_encoder/src/url_encoder.rs: -------------------------------------------------------------------------------- 1 | /// Takes an hex string and applies Percent-Encoding, returning an encoded version. 2 | pub fn encode(hex_string: &str) -> String { 3 | if hex_string.is_empty() { 4 | return hex_string.to_string(); 5 | } 6 | let mut encoded_hex_string = hex_string 7 | .chars() 8 | .collect::>() 9 | .chunks(2) 10 | .map(|c| c.iter().collect::()) 11 | .collect::>() 12 | .join("%"); 13 | encoded_hex_string.insert(0, '%'); 14 | encoded_hex_string 15 | } 16 | 17 | /// Takes an encoded string and decodes it. 18 | pub fn decode(hex_str: &str) -> String { 19 | let mut out = Vec::new(); 20 | let mut iter = hex_str.chars(); 21 | 22 | while let Some(c) = iter.next() { 23 | match c { 24 | '%' => { 25 | let c1 = iter.next().unwrap(); 26 | let c2 = iter.next().unwrap(); 27 | out.push(c1.to_string().to_lowercase()); 28 | out.push(c2.to_string().to_lowercase()); 29 | } 30 | _ => out.push(format!("{:x}", c.to_string().as_bytes()[0])), 31 | } 32 | } 33 | 34 | out.join("") 35 | } 36 | 37 | #[cfg(test)] 38 | mod tests { 39 | use super::*; 40 | 41 | #[test] 42 | fn test_encode_empty_string_returns_empty_string() { 43 | assert_eq!("", encode("")); 44 | } 45 | 46 | #[test] 47 | fn test_encode_info_hash() { 48 | let info_hash = "2c6b6858d61da9543d4231a71db4b1c9264b0685"; 49 | let expected_info_hash = "%2c%6b%68%58%d6%1d%a9%54%3d%42%31%a7%1d%b4%b1%c9%26%4b%06%85"; 50 | 51 | assert_eq!(expected_info_hash, encode(info_hash)); 52 | } 53 | 54 | #[test] 55 | fn test_hex_decoder() { 56 | let infohash = "%124Vx%9A%BC%DE%F1%23Eg%89%AB%CD%EF%124Vx%9A"; 57 | let infohash_bytes = super::decode(infohash); 58 | assert_eq!(infohash_bytes, "123456789abcdef123456789abcdef123456789a"); 59 | } 60 | } 61 | --------------------------------------------------------------------------------