├── Cargo.toml ├── README.md └── src ├── crawler.rs ├── fetch.rs ├── main.rs └── parse.rs /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "linkcrawler" 3 | version = "0.1.0" 4 | authors = ["tensor-programming "] 5 | 6 | [dependencies] 7 | hyper = "0.10.1" 8 | html5ever = "0.21.0" 9 | url = "0.2.37" -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rust_web_crawler 2 | 3 | # A simple multi-threaded webcrawler built in Rust. 4 | 5 | ## Run `cargo run http://website.name/` to run the app, run `cargo build` to build an executable file. 6 | 7 | ### Check out the Youtube Tutorial for this [Rust Tutorial](https://youtu.be/LNABJvABhos). Here is our [Youtube Channel](https://www.youtube.com/channel/UCYqCZOwHbnPwyjawKfE21wg) Subscribe for more content. 8 | 9 | ### Check out our blog at [tensor-programming.com](http://tensor-programming.com/). 10 | 11 | ### Our [Twitter](https://twitter.com/TensorProgram), our [facebook](https://www.facebook.com/Tensor-Programming-1197847143611799/) and our [Steemit](https://steemit.com/@tensor). 12 | -------------------------------------------------------------------------------- /src/crawler.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | use std::sync::{Arc, Mutex}; 3 | use std::sync::mpsc::{channel, Receiver, Sender}; 4 | use std::thread; 5 | use url::Url; 6 | 7 | use fetch::{fetch_all_urls, url_status, UrlState}; 8 | 9 | const THREADS: i32 = 20; 10 | 11 | pub struct Crawler { 12 | to_visit: Arc>>, 13 | active_count: Arc>, 14 | url_states: Receiver, 15 | } 16 | 17 | impl Iterator for Crawler { 18 | type Item = UrlState; 19 | 20 | fn next(&mut self) -> Option { 21 | loop { 22 | match self.url_states.try_recv() { 23 | Ok(state) => return Some(state), 24 | Err(_) => { 25 | let to_visit_val = self.to_visit.lock().unwrap(); 26 | let active_count_val = self.active_count.lock().unwrap(); 27 | 28 | if to_visit_val.is_empty() && *active_count_val == 0 { 29 | return None; 30 | } else { 31 | continue; 32 | } 33 | } 34 | } 35 | } 36 | } 37 | } 38 | 39 | fn crawl_worker_thread( 40 | domain: &str, 41 | to_visit: Arc>>, 42 | visited: Arc>>, 43 | active_count: Arc>, 44 | url_states: Sender, 45 | ) { 46 | loop { 47 | let current; 48 | { 49 | let mut to_visit_val = to_visit.lock().unwrap(); 50 | let mut active_count_val = active_count.lock().unwrap(); 51 | if to_visit_val.is_empty() { 52 | if *active_count_val > 0 { 53 | continue; 54 | } else { 55 | break; 56 | } 57 | }; 58 | current = to_visit_val.pop().unwrap(); 59 | *active_count_val += 1; 60 | assert!(*active_count_val <= THREADS); 61 | } 62 | 63 | { 64 | let mut visited_val = visited.lock().unwrap(); 65 | if visited_val.contains(¤t) { 66 | let mut active_count_val = active_count.lock().unwrap(); 67 | *active_count_val -= 1; 68 | continue; 69 | } else { 70 | visited_val.insert(current.to_owned()); 71 | } 72 | } 73 | 74 | let state = url_status(&domain, ¤t); 75 | if let UrlState::Accessible(ref url) = state.clone() { 76 | if url.domain() == Some(&domain) { 77 | let new_urls = fetch_all_urls(&url); 78 | 79 | let mut to_visit_val = to_visit.lock().unwrap(); 80 | for new_url in new_urls { 81 | to_visit_val.push(new_url); 82 | } 83 | } 84 | } 85 | 86 | { 87 | let mut active_count_val = active_count.lock().unwrap(); 88 | *active_count_val -= 1; 89 | assert!(*active_count_val >= 0); 90 | } 91 | 92 | url_states.send(state).unwrap(); 93 | } 94 | } 95 | 96 | pub fn crawl(domain: &str, start_url: &Url) -> Crawler { 97 | let to_visit = Arc::new(Mutex::new(vec![start_url.serialize()])); 98 | let active_count = Arc::new(Mutex::new(0)); 99 | let visited = Arc::new(Mutex::new(HashSet::new())); 100 | 101 | let (tx, rx) = channel(); 102 | 103 | let crawler = Crawler { 104 | to_visit: to_visit.clone(), 105 | active_count: active_count.clone(), 106 | url_states: rx, 107 | }; 108 | 109 | for _ in 0..THREADS { 110 | let domain = domain.to_owned(); 111 | let to_visit = to_visit.clone(); 112 | let visited = visited.clone(); 113 | let active_count = active_count.clone(); 114 | let tx = tx.clone(); 115 | 116 | thread::spawn(move || { 117 | crawl_worker_thread(&domain, to_visit, visited, active_count, tx); 118 | }); 119 | } 120 | 121 | crawler 122 | } 123 | -------------------------------------------------------------------------------- /src/fetch.rs: -------------------------------------------------------------------------------- 1 | extern crate hyper; 2 | extern crate url; 3 | 4 | use std::io::Read; 5 | use std::thread; 6 | use std::time::Duration; 7 | use std::sync::mpsc::channel; 8 | use std::fmt; 9 | 10 | use self::hyper::Client; 11 | use self::hyper::status::StatusCode; 12 | use self::url::{ParseResult, Url, UrlParser}; 13 | 14 | use parse; 15 | 16 | const TIMEOUT: u64 = 10; 17 | 18 | #[derive(Debug, Clone)] 19 | pub enum UrlState { 20 | Accessible(Url), 21 | BadStatus(Url, StatusCode), 22 | ConnectionFailed(Url), 23 | TimedOut(Url), 24 | Malformed(String), 25 | } 26 | 27 | impl fmt::Display for UrlState { 28 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 29 | match *self { 30 | UrlState::Accessible(ref url) => format!("!! {}", url).fmt(f), 31 | UrlState::BadStatus(ref url, ref status) => format!("x {} ({})", url, status).fmt(f), 32 | UrlState::ConnectionFailed(ref url) => format!("x {} (connection failed)", url).fmt(f), 33 | UrlState::TimedOut(ref url) => format!("x {} (timed out)", url).fmt(f), 34 | UrlState::Malformed(ref url) => format!("x {} (malformed)", url).fmt(f), 35 | } 36 | } 37 | } 38 | 39 | fn build_url(domain: &str, path: &str) -> ParseResult { 40 | let base_url_string = format!("http://{}", domain); 41 | let base_url = Url::parse(&base_url_string).unwrap(); 42 | 43 | let mut raw_url_parser = UrlParser::new(); 44 | let url_parser = raw_url_parser.base_url(&base_url); 45 | 46 | url_parser.parse(path) 47 | } 48 | 49 | 50 | pub fn url_status(domain: &str, path: &str) -> UrlState { 51 | match build_url(domain, path) { 52 | Ok(url) => { 53 | let (tx, rx) = channel(); 54 | let req_tx = tx.clone(); 55 | let u = url.clone(); 56 | 57 | thread::spawn(move || { 58 | let client = Client::new(); 59 | let url_string = url.serialize(); 60 | let resp = client.get(&url_string).send(); 61 | 62 | let _ = req_tx.send(match resp { 63 | Ok(r) => if let StatusCode::Ok = r.status { 64 | UrlState::Accessible(url) 65 | } else { 66 | UrlState::BadStatus(url, r.status) 67 | }, 68 | Err(_) => UrlState::ConnectionFailed(url), 69 | }); 70 | }); 71 | 72 | thread::spawn(move || { 73 | thread::sleep(Duration::from_secs(TIMEOUT)); 74 | let _ = tx.send(UrlState::TimedOut(u)); 75 | }); 76 | 77 | rx.recv().unwrap() 78 | } 79 | Err(_) => UrlState::Malformed(path.to_owned()), 80 | } 81 | } 82 | 83 | pub fn fetch_url(url: &Url) -> String { 84 | let client = Client::new(); 85 | 86 | let url_string = url.serialize(); 87 | let mut res = client 88 | .get(&url_string) 89 | .send() 90 | .ok() 91 | .expect("could not fetch URL"); 92 | 93 | let mut body = String::new(); 94 | match res.read_to_string(&mut body) { 95 | Ok(_) => body, 96 | Err(_) => String::new(), 97 | } 98 | } 99 | 100 | pub fn fetch_all_urls(url: &Url) -> Vec { 101 | let html_src = fetch_url(url); 102 | let dom = parse::parse_html(&html_src); 103 | 104 | parse::get_urls(dom.document) 105 | } 106 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate html5ever; 2 | extern crate url; 3 | 4 | use std::env; 5 | use std::io::stdout; 6 | use std::io::Write; 7 | use url::Url; 8 | 9 | use fetch::UrlState; 10 | 11 | mod parse; 12 | mod fetch; 13 | mod crawler; 14 | 15 | fn main() { 16 | let args: Vec<_> = env::args().collect(); 17 | if args.len() > 1 { 18 | let start_url_string = &args[1]; 19 | 20 | let start_url = Url::parse(start_url_string).unwrap(); 21 | let domain = start_url 22 | .domain() 23 | .expect("I can't find a domain in your URL"); 24 | 25 | let mut success_count = 0; 26 | let mut fail_count = 0; 27 | 28 | for url_state in crawler::crawl(&domain, &start_url) { 29 | match url_state { 30 | UrlState::Accessible(_) => { 31 | success_count += 1; 32 | } 33 | status => { 34 | fail_count += 1; 35 | println!("{}", status); 36 | } 37 | } 38 | 39 | print!("Succeeded: {} Failed: {}\r", success_count, fail_count); 40 | stdout().flush().unwrap(); 41 | } 42 | } else { 43 | println!("Please provide a URL."); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/parse.rs: -------------------------------------------------------------------------------- 1 | use std::string::String; 2 | 3 | use html5ever::tendril::TendrilSink; 4 | use html5ever::parse_document; 5 | use html5ever::rcdom::{Handle, NodeData, RcDom}; 6 | use html5ever::interface::Attribute; 7 | 8 | pub fn parse_html(source: &str) -> RcDom { 9 | parse_document(RcDom::default(), Default::default()) 10 | .from_utf8() 11 | .read_from(&mut source.as_bytes()) 12 | .unwrap() 13 | } 14 | 15 | pub fn get_urls(handle: Handle) -> Vec { 16 | let mut urls = vec![]; 17 | let mut anchor_tags = vec![]; 18 | 19 | get_elements_by_name(handle, "a", &mut anchor_tags); 20 | 21 | for node in anchor_tags { 22 | if let NodeData::Element { ref attrs, .. } = node { 23 | for attr in attrs.borrow().iter() { 24 | let Attribute { 25 | ref name, 26 | ref value, 27 | } = *attr; 28 | if &*(name.local) == "href" { 29 | urls.push(value.to_string()); 30 | } 31 | } 32 | } 33 | } 34 | 35 | urls 36 | } 37 | 38 | fn get_elements_by_name(handle: Handle, element_name: &str, out: &mut Vec) { 39 | let node = handle; 40 | 41 | if let NodeData::Element { 42 | ref name, 43 | ref attrs, 44 | ref template_contents, 45 | .. 46 | } = node.data 47 | { 48 | if &*(name.local) == element_name { 49 | out.push(NodeData::Element { 50 | name: name.clone(), 51 | attrs: attrs.clone(), 52 | template_contents: template_contents.clone(), 53 | mathml_annotation_xml_integration_point: false, 54 | }); 55 | } 56 | } 57 | 58 | for n in node.children.borrow().iter() { 59 | get_elements_by_name(n.clone(), element_name, out); 60 | } 61 | } 62 | --------------------------------------------------------------------------------