├── .gitignore ├── Cargo.toml ├── README.md └── src ├── execute └── mod.rs ├── lib.rs ├── limiter └── mod.rs └── scrape └── mod.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "quick_crawler" 3 | version = "0.1.2" 4 | authors = ["Michael Khirallah "] 5 | edition = "2018" 6 | 7 | license = "Apache-2.0/MIT" 8 | repository = "https://github.com/mkralla11/quick_crawler" 9 | homepage = "https://github.com/mkralla11/quick_crawler" 10 | description = "QuickCrawler is a Rust crate that provides a completely async, declarative web crawler with domain-specific request rate-limiting built-in." 11 | categories = ["network-programming", "asynchronous", "concurrency"] 12 | readme = "README.md" 13 | 14 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 15 | 16 | [dependencies] 17 | futures = { version = "0.3.1", features = ["compat"] } 18 | #futures01 = { package = "futures", version = "0.1", optional = true } 19 | #futures-preview = { version = "0.3.0-alpha.19", features = ["compat"] } 20 | futures-core-preview="0.3.0-alpha.19" 21 | async-std = {version = "1.4.0", features = ["unstable"]} 22 | scraper = {version = "0.11.0"} 23 | 24 | debug_stub_derive = "0.3.0" 25 | surf = {version = "1.0.3"} 26 | pin-utils = {version = "0.1.0-alpha.4"} 27 | 28 | ratelimit_futures = {version = "0.0.1"} 29 | ratelimit_meter = {version = "4.0.1"} 30 | dashmap = "3.4.4" 31 | url = "2.1.1" 32 | mockito = "0.23.1" -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # QuickCrawler 2 | 3 | 4 | QuickCrawler is a Rust crate that provides a completely async, declarative web crawler 5 | with domain-specific request rate-limiting built-in. 6 | 7 | # Examples 8 | 9 | Let's say you are trying to crawl a subset of pages for a given domain: 10 | 11 | `https://bike-site.com/search?q=red-bikes` 12 | 13 | and a regular GET request will return: 14 | 15 | ```html 16 | 17 | 18 |
19 | 20 | cool red bike 1 21 | 22 | cool red bike 2 23 | 24 | 25 | cool red bike 3 26 | 27 |
28 | 29 | other cool red bike 4 30 | 31 |
32 |
33 | 34 | 35 | ``` 36 | 37 | 38 | and when navigating to *links 1 through 3* on that page, EACH PAGE returns: 39 | 40 | ```html 41 | 42 | 43 |
44 |
45 | The best bike ever. 46 |
47 | 55 |
56 | 57 | 58 | ``` 59 | 60 | 61 | and when navigating to *the last link* on that page, it returns: 62 | 63 | ```html 64 | 65 | 66 |
67 |
68 | The best bike ever. 69 |
70 | 78 |
79 | 80 | 81 | ``` 82 | 83 | 84 | QuickCrawler declaratively helps you crawl, and scrape data from each of the given pages with ease: 85 | 86 | 87 | ```rust, no_run 88 | use quick_crawler::{ 89 | QuickCrawler, 90 | QuickCrawlerBuilder, 91 | limiter::Limiter, 92 | scrape::{ 93 | ResponseLogic::Parallel, 94 | StartUrl, 95 | Scrape, 96 | ElementUrlExtractor, 97 | ElementDataExtractor 98 | } 99 | }; 100 | 101 | 102 | fn main() { 103 | let mut builder = QuickCrawlerBuilder::new(); 104 | 105 | 106 | let start_urls = vec![ 107 | StartUrl::new() 108 | .url("https://bike-site.com/search?q=red-bikes") 109 | .method("GET") 110 | .response_logic(Parallel(vec![ 111 | // All Scrapers below will be provided the html page response body 112 | Scrape::new() 113 | .find_elements_with_urls(".bike-item") 114 | .extract_urls_from_elements(ElementUrlExtractor::Attr("href".to_string())) 115 | // now setup the logic to execute on each of the return html pages 116 | .response_logic(Parallel(vec![ 117 | Scrape::new() 118 | .find_elements_with_data(".awesome-bike .bike-info") 119 | .extract_data_from_elements(ElementDataExtractor::Text) 120 | .store(|vec: Vec| async move { 121 | println!("store bike info in DB: {:?}", vec); 122 | }), 123 | Scrape::new() 124 | .find_elements_with_data(".bike-specs li") 125 | .extract_data_from_elements(ElementDataExtractor::Text) 126 | .store(|vec: Vec| async move { 127 | println!("store bike specs in DB: {:?}", vec); 128 | }), 129 | ])), 130 | Scrape::new() 131 | .find_elements_with_urls(".bike-other-item") 132 | .extract_urls_from_elements(ElementUrlExtractor::Attr("href".to_string())) 133 | .response_logic(Parallel(vec![ 134 | Scrape::new() 135 | .find_elements_with_data(".other-bike .other-bike-info") 136 | .extract_data_from_elements(ElementDataExtractor::Text) 137 | .store(|vec: Vec| async move { 138 | println!("store other bike info in DB: {:?}", vec); 139 | }), 140 | Scrape::new() 141 | .find_elements_with_data(".other-bike-specs li") 142 | .extract_data_from_elements(ElementDataExtractor::Text) 143 | .store(|vec: Vec| async move { 144 | println!("store other bike specs in DB: {:?}", vec); 145 | }), 146 | ])) 147 | ]) 148 | ) 149 | // more StartUrl::new 's if you feel ambitious 150 | ] ; 151 | 152 | // It's smart to use a limiter - for now automatically set to 3 request per second per domain. 153 | // This will soon be configurable. 154 | 155 | let limiter = Limiter::new(); 156 | 157 | builder 158 | .with_start_urls( 159 | start_urls 160 | ) 161 | .with_limiter( 162 | limiter 163 | ) 164 | // Optionally configure how to make a request and return an html string 165 | .with_request_handler( 166 | |config: RequestHandlerConfig| async move { 167 | // ... use any request library, like reqwest 168 | surf::get(config.url.clone()).recv_string().await.map_err(|_| QuickCrawlerError::RequestErr) 169 | } 170 | ); 171 | let crawler = builder.finish().map_err(|_| "Builder could not finish").expect("no error"); 172 | 173 | // QuickCrawler is async, so choose your favorite executor. 174 | // (Tested and working for both async-std and tokio) 175 | let res = async_std::task::block_on(async { 176 | crawler.process().await 177 | }); 178 | 179 | } 180 | 181 | ``` 182 | 183 | 184 | 185 | # Contribute 186 | 187 | clone the repo. 188 | 189 | To run tests: 190 | 191 | cargo watch -x check -x 'test -- --nocapture' 192 | 193 | See the `src/lib.rs` test for example usage. 194 | 195 | # Thank You For Using! 196 | 197 | If you use this crate and you found it helpful to your project, please star it! 198 | 199 | 200 | # License 201 | 202 | MIT 203 | -------------------------------------------------------------------------------- /src/execute/mod.rs: -------------------------------------------------------------------------------- 1 | // use futures::stream::{self, StreamExt}; 2 | use std::mem::replace; 3 | use scraper::{Selector}; 4 | 5 | use std::sync::{Arc}; 6 | 7 | use crate::{DynRequestHandler, RequestHandlerConfig}; 8 | use crate::limiter::Limiter; 9 | use url::Url; 10 | use std::future::Future; 11 | 12 | use std::pin::Pin; 13 | 14 | 15 | use futures::future::{join}; 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | use async_std::{task}; 25 | extern crate scraper; 26 | use scraper::{Html}; 27 | use async_std::sync::{Sender}; 28 | 29 | 30 | use crate::scrape::{StartUrl, ResponseLogic::{self, Parallel}, ElementUrlExtractor, ElementDataExtractor, Ops::{self, *}}; 31 | use crate::{DataFromScraperValue, QuickCrawlerError::{self, *}}; 32 | 33 | 34 | async fn limit_url_via>(limiter: &Option>, url: S) -> Result<(), QuickCrawlerError> { 35 | 36 | if limiter.is_some() { 37 | let base = Url::parse(&url.into()).map_err(|_| QuickCrawlerError::ParseDomainErr)?; 38 | // println!("unwrapping {:?}", url.into().clone()); 39 | let domain = match base.host_str() { 40 | Some(d) =>d, 41 | None => return Err(QuickCrawlerError::ParseDomainErr) 42 | }; 43 | // println!("host_str {:?}", domain); 44 | // println!("unwrapped {:?}", domain.clone()); 45 | limiter.as_ref().unwrap().limit(domain).await; 46 | }; 47 | Ok(()) 48 | } 49 | 50 | 51 | 52 | pub async fn execute_deep_scrape<'a>(start_url: &StartUrl, data_sender: Sender, limiter: Option>, request_handler: Arc)-> Result<(), QuickCrawlerError> 53 | { 54 | let url = match &start_url.url { 55 | Some(url)=>url, 56 | None=>{ 57 | return Err(NoUrlInStartUrlErr) 58 | } 59 | }; 60 | 61 | let req = match &start_url.method { 62 | // Some(m) if m == "GET" =>"STUB URL REQ GET".to_string(), 63 | // Some(m) if m == "POST" =>"STUB URL REQ POST".to_string(), 64 | // // FOR LIVE RESULTS 65 | Some(m) if m == "GET" =>surf::get(url), 66 | Some(m) if m == "POST" =>surf::post(url), 67 | Some(m)=>{ 68 | return Err(InvalidStartUrlMethodErr(m.to_string())) 69 | } 70 | None=>{ 71 | return Err(NoStartUrlMethodErr) 72 | } 73 | }; 74 | 75 | 76 | limit_url_via(&limiter, url).await?; 77 | 78 | // // FOR LIVE RESULTS 79 | 80 | let html_str = request_handler.call(RequestHandlerConfig{url: url.to_string()}).await.map_err(|_| QuickCrawlerError::RequestErr)?; 81 | 82 | 83 | let response_logic = match &start_url.response_logic { 84 | Some(response_logic)=>response_logic.clone(), 85 | None=>{ 86 | return Err(NoResponseLogicErr) 87 | } 88 | }; 89 | 90 | handle_response_logic(&response_logic, url.clone(), html_str, data_sender, limiter, request_handler).await?; 91 | 92 | Ok(()) 93 | } 94 | 95 | 96 | 97 | 98 | async fn handle_response_logic<'a>(response_logic: &'a ResponseLogic, original_url: String, html_str: String, data_sender: Sender, limiter: Option>, request_handler: Arc) -> Result<(), QuickCrawlerError> 99 | 100 | { 101 | match response_logic { 102 | Parallel(par_items) => { 103 | use futures::stream::{self, StreamExt, TryStreamExt}; 104 | // loop over each Item in array 105 | let res: Result<(), QuickCrawlerError> = Box::pin(stream::iter(par_items).map(|item| (item, original_url.clone(), data_sender.clone(), html_str.clone(), limiter.clone(), request_handler.clone())).map(Ok).try_for_each_concurrent( 106 | /* limit */ 4, 107 | |(scrape, original_url, sender, html_str, limiter, request_handler)| async move { 108 | handle_scrape(&scrape.executables, original_url, html_str, sender, limiter, request_handler).await 109 | } 110 | )).await; 111 | res 112 | } 113 | _ => { 114 | return Err(UnknownResponseLogicErr) 115 | } 116 | } 117 | } 118 | 119 | // fn get_domain_from_url(url: &str)-> Result{ 120 | // let base = Url::parse(url.into()).map_err(|_| QuickCrawlerError::ParseDomainErr)?; 121 | // let domain = match base.host_str() { 122 | // Some(d) => format!("{}://{}", base.scheme(), d).to_string(), 123 | // None => return Err(QuickCrawlerError::ParseDomainErr) 124 | // }; 125 | 126 | // let domain = match base.port() { 127 | // Some(p) => format!("{}:{}", domain, p).to_string(), 128 | // None => domain 129 | // }; 130 | 131 | // Ok(domain.to_string()) 132 | // } 133 | 134 | fn construct_full_url(original: &str, href: &str)-> Result{ 135 | // println!("domain: {:?} - href: {:?}", domain, href); 136 | let res = Url::parse(href.into()); 137 | // println!("base {:?}", base); 138 | let full_url = match res { 139 | Ok(d) => href.to_string(), 140 | Err(url::ParseError::RelativeUrlWithoutBase) => { 141 | let base = Url::parse(original).map_err(|_| QuickCrawlerError::ParseDomainErr)?; 142 | base.join(href).map_err(|_| QuickCrawlerError::ParseDomainErr)?.to_string() 143 | }, 144 | _ => return Err(QuickCrawlerError::ParseDomainErr) 145 | }; 146 | // println!("full_url: {:?}", full_url); 147 | Ok(full_url.clone()) 148 | } 149 | 150 | 151 | #[derive(Debug, Clone)] 152 | struct HtmlContainer { 153 | original_url: String, 154 | html_str: String, 155 | url_node_strs: Vec, 156 | data_node_strs: Vec, 157 | node_urls: Vec, 158 | data_items: Vec, 159 | } 160 | 161 | 162 | impl HtmlContainer{ 163 | fn new(original_url: String, html_str: String)-> HtmlContainer { 164 | HtmlContainer{ 165 | original_url, 166 | html_str, 167 | url_node_strs: vec![], 168 | data_node_strs: vec![], 169 | node_urls: vec![], 170 | data_items: vec![], 171 | // next_: vec![], 172 | } 173 | } 174 | 175 | fn get_original_url(&self)-> String { 176 | self.original_url.to_string() 177 | } 178 | } 179 | 180 | use futures::future::{BoxFuture, FutureExt}; 181 | 182 | 183 | 184 | 185 | fn find_node_strs(pred: &Selector, html_str: &str) -> Vec { 186 | let mut node_strs = Vec::new(); 187 | // let node_strs = replace(&mut container.node_strs, vec![]); 188 | Html::parse_fragment(html_str).select(pred).for_each(|node| { 189 | node_strs.push(node.html().replace('\n', "").trim().to_owned()); 190 | }); 191 | return node_strs 192 | } 193 | 194 | fn find_urls(ex: &ElementUrlExtractor, node_strs: &Vec) -> Vec { 195 | let mut urls = Vec::new(); 196 | // let node_strs = replace(&mut container.node_strs, vec![]); 197 | node_strs.iter().for_each(|node| { 198 | let node_el = Html::parse_fragment(&node); 199 | 200 | match ex { 201 | ElementUrlExtractor::Attr(target_attr) => { 202 | node_el.root_element().children().for_each(|child| { 203 | child 204 | .value() 205 | .as_element() 206 | .and_then(|el| el.attr(target_attr)) 207 | .map(|url| { 208 | // println!("url {:?}", url); 209 | urls.push(url.to_owned()); 210 | }); 211 | }) 212 | } 213 | }; 214 | }); 215 | 216 | return urls 217 | } 218 | 219 | fn find_data(ex: &ElementDataExtractor, node_strs: &Vec) -> Vec { 220 | let mut urls = Vec::new(); 221 | // let node_strs = replace(&mut container.node_strs, vec![]); 222 | node_strs.iter().for_each(|node| { 223 | let node_el = Html::parse_fragment(&node); 224 | 225 | match ex { 226 | ElementDataExtractor::Text => { 227 | // let element_value = Html::parse_fragment(&node).root_element().value(); 228 | urls.extend(node_el.root_element().text().map(|item| item.trim().to_string()).collect::>()); 229 | } 230 | }; 231 | }); 232 | 233 | return urls 234 | } 235 | 236 | 237 | 238 | fn handle_scrape<'a>(executables: &'a Vec>, original_url: String, html_str: String, data_sender: Sender, limiter: Option>, request_handler: Arc)-> BoxFuture<'a, Result<(), QuickCrawlerError>> 239 | { 240 | Box::pin(async move { 241 | 242 | let mut container = HtmlContainer::new(original_url.clone(), html_str.clone()); 243 | 244 | for executable in executables.iter() { 245 | // println!("executable {:?}", i); 246 | match &**executable { 247 | UrlSelector(selector_str)=>{ 248 | let node_strs = find_node_strs(&selector_str, &container.html_str); 249 | replace(&mut container.url_node_strs, node_strs); 250 | } 251 | DataSelector(selector_str)=>{ 252 | // println!("Pred!"); 253 | let node_strs = find_node_strs(&selector_str, &container.html_str); 254 | replace(&mut container.data_node_strs, node_strs); 255 | } 256 | UrlExtractor(ex)=>{ 257 | let urls = find_urls(ex, &container.url_node_strs); 258 | replace(&mut container.node_urls, urls); 259 | } 260 | DataExtractor(ex)=>{ 261 | let data_items = find_data(ex, &container.data_node_strs); 262 | replace(&mut container.data_items, data_items); 263 | } 264 | Ops::ResponseLogic(response_logic)=>{ 265 | 266 | // println!("ResponseLogic!"); 267 | 268 | use futures::stream::{self, StreamExt, TryStreamExt}; 269 | 270 | // let hrefs = container.node_urls; 271 | 272 | 273 | // Can't figure out how to remove this block on because 274 | // of Scraper crate dependency that uses Cells :( 275 | // println!("{:?}", container.node_urls); 276 | // let (sender, receiver) = channel::(5); 277 | let original_url = container.get_original_url().clone(); 278 | let res: Result<(), QuickCrawlerError> = Box::pin(stream::iter(&container.node_urls).map(|href| (original_url.clone(), href.clone(), data_sender.clone(), response_logic.clone(), limiter.clone(), request_handler.clone())).map(Ok).try_for_each_concurrent( 279 | /* limit */ 5, 280 | |(original_url, href, data_sender, response_logic, limiter, request_handler)| async move { 281 | // println!("here {:?}", href); 282 | let full_url = construct_full_url(&original_url, &href)?; 283 | limit_url_via(&limiter, &full_url).await?; 284 | 285 | // // FOR LIVE RESULTS 286 | let html_str = request_handler.call(RequestHandlerConfig{url: full_url.to_string()}).await.map_err(|_| QuickCrawlerError::RequestErr)?; 287 | 288 | // let html_str = surf::get(&full_url).recv_string().await.map_err(|_| QuickCrawlerError::RequestErr)?; 289 | // let html_str = format!("
{} test ingredent
{} test ingredent
  • step: {}
  • ", i, i, i); 290 | handle_response_logic(&response_logic, full_url, html_str, data_sender, limiter, request_handler).await 291 | 292 | // async_std::task::yield_now().await; 293 | // Ok(()) 294 | } 295 | )).await; 296 | 297 | res?; 298 | } 299 | Store(f)=>{ 300 | let res = container.data_items.iter().map(|x| x.to_string()).collect::>(); 301 | f.call(res).await; 302 | } 303 | } 304 | 305 | } 306 | Ok(()) 307 | }) 308 | } 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # QuickCrawler 2 | //! QuickCrawler is a Rust crate that provides a completely async, declarative web crawler 3 | //! with domain-specific request rate-limiting built-in. 4 | //! 5 | //! # Examples 6 | //! 7 | //! Let's say you are trying to crawl a subset of pages for a given domain: 8 | //! 9 | //! `https://bike-site.com/search?q=red-bikes` 10 | //! 11 | //! and a regular GET request will return: 12 | //! 13 | //! ```html 14 | //! 15 | //! 16 | //! 31 | //! 32 | //! 33 | //! ``` 34 | //! 35 | //! 36 | //! and when navigating to *links 1 through 3* on that page, EACH PAGE returns: 37 | //! 38 | //! ```html 39 | //! 40 | //! 41 | //!
    42 | //!
    43 | //! The best bike ever. 44 | //!
    45 | //!
      46 | //!
    • 47 | //! Super fast. 48 | //!
    • 49 | //!
    • 50 | //! Jumps high. 51 | //!
    • 52 | //!
    53 | //!
    54 | //! 55 | //! 56 | //! ``` 57 | //! 58 | //! 59 | //! and when navigating to *the last link* on that page, it returns: 60 | //! 61 | //! ```html 62 | //! 63 | //! 64 | //!
    65 | //!
    66 | //! The best bike ever. 67 | //!
    68 | //!
      69 | //!
    • 70 | //! Super slow. 71 | //!
    • 72 | //!
    • 73 | //! Doesn't jump. 74 | //!
    • 75 | //!
    76 | //!
    77 | //! 78 | //! 79 | //! ``` 80 | //! 81 | //! 82 | //! QuickCrawler declaratively helps you crawl, and scrape data from each of the given pages with ease: 83 | //! 84 | //! 85 | //! ```rust, no_run 86 | //! use quick_crawler::{ 87 | //! QuickCrawler, 88 | //! QuickCrawlerBuilder, 89 | //! limiter::Limiter, 90 | //! RequestHandlerConfig, 91 | //! QuickCrawlerError, 92 | //! scrape::{ 93 | //! ResponseLogic::Parallel, 94 | //! StartUrl, 95 | //! Scrape, 96 | //! ElementUrlExtractor, 97 | //! ElementDataExtractor 98 | //! } 99 | //! }; 100 | //! 101 | //! 102 | //! fn main() { 103 | //! let mut builder = QuickCrawlerBuilder::new(); 104 | //! 105 | //! 106 | //! let start_urls = vec![ 107 | //! StartUrl::new() 108 | //! .url("https://bike-site.com/search?q=red-bikes") 109 | //! .method("GET") 110 | //! .response_logic(Parallel(vec![ 111 | //! // All Scrapers below will be provided the html page response body 112 | //! Scrape::new() 113 | //! .find_elements_with_urls(".bike-item") 114 | //! .extract_urls_from_elements(ElementUrlExtractor::Attr("href".to_string())) 115 | //! // now setup the logic to execute on each of the return html pages 116 | //! .response_logic(Parallel(vec![ 117 | //! Scrape::new() 118 | //! .find_elements_with_data(".awesome-bike .bike-info") 119 | //! .extract_data_from_elements(ElementDataExtractor::Text) 120 | //! .store(|vec: Vec| async move { 121 | //! println!("store bike info in DB: {:?}", vec); 122 | //! }), 123 | //! Scrape::new() 124 | //! .find_elements_with_data(".bike-specs li") 125 | //! .extract_data_from_elements(ElementDataExtractor::Text) 126 | //! .store(|vec: Vec| async move { 127 | //! println!("store bike specs in DB: {:?}", vec); 128 | //! }), 129 | //! ])), 130 | //! Scrape::new() 131 | //! .find_elements_with_urls(".bike-other-item") 132 | //! .extract_urls_from_elements(ElementUrlExtractor::Attr("href".to_string())) 133 | //! .response_logic(Parallel(vec![ 134 | //! Scrape::new() 135 | //! .find_elements_with_data(".other-bike .other-bike-info") 136 | //! .extract_data_from_elements(ElementDataExtractor::Text) 137 | //! .store(|vec: Vec| async move { 138 | //! println!("store other bike info in DB: {:?}", vec); 139 | //! }), 140 | //! Scrape::new() 141 | //! .find_elements_with_data(".other-bike-specs li") 142 | //! .extract_data_from_elements(ElementDataExtractor::Text) 143 | //! .store(|vec: Vec| async move { 144 | //! println!("store other bike specs in DB: {:?}", vec); 145 | //! }), 146 | //! ])) 147 | //! ]) 148 | //! ) 149 | //! // more StartUrl::new 's if you feel ambitious 150 | //! ] ; 151 | //! 152 | //! // It's smart to use a limiter - for now automatically set to 3 request per second per domain. 153 | //! // This will soon be configurable. 154 | //! 155 | //! let limiter = Limiter::new(); 156 | //! 157 | //! builder 158 | //! .with_start_urls( 159 | //! start_urls 160 | //! ) 161 | //! .with_limiter( 162 | //! limiter 163 | //! ) 164 | //! // Optionally configure how to make a request and return an html string 165 | //! .with_request_handler( 166 | //! |config: RequestHandlerConfig| async move { 167 | //! // ... use any request library, like reqwest 168 | //! surf::get(config.url.clone()).recv_string().await.map_err(|_| QuickCrawlerError::RequestErr) 169 | //! } 170 | //! ); 171 | //! 172 | //! let crawler = builder.finish().map_err(|_| "Builder could not finish").expect("no error"); 173 | //! 174 | //! // QuickCrawler is async, so choose your favorite executor. 175 | //! // (Tested and working for both async-std and tokio) 176 | //! let res = async_std::task::block_on(async { 177 | //! crawler.process().await 178 | //! }); 179 | //! } 180 | //! ``` 181 | 182 | 183 | 184 | use std::future::Future; 185 | use std::sync::{Arc, Mutex}; 186 | use std::pin::Pin; 187 | mod execute; 188 | use crate::execute::execute_deep_scrape; 189 | pub mod limiter; 190 | use limiter::Limiter; 191 | 192 | #[macro_use] 193 | extern crate debug_stub_derive; 194 | 195 | 196 | pub mod scrape; 197 | use crate::scrape::{ResponseLogic::Parallel, StartUrl, Scrape, ElementUrlExtractor, ElementDataExtractor}; 198 | 199 | use futures::stream::{self, StreamExt, TryStreamExt, Iter as StreamIter}; 200 | use futures::{ready, Stream}; 201 | // use futures::channel::mpsc::channel; 202 | use async_std::sync::{channel, Receiver, Sender}; 203 | use async_std::task::{Context, Poll, sleep}; 204 | 205 | use futures::future::{join, BoxFuture}; 206 | 207 | use std::time::Duration; 208 | 209 | #[derive(Debug, PartialEq)] 210 | pub enum QuickCrawlerError { 211 | NoStartUrls, 212 | NoUrlInStartUrlErr, 213 | ParseDomainErr, 214 | SurfRequestErr, 215 | RequestErr, 216 | NoStartUrlMethodErr, 217 | InvalidStartUrlMethodErr(String), 218 | NoResponseLogicErr, 219 | UnknownResponseLogicErr 220 | } 221 | 222 | // #[derive(Debug)] 223 | pub struct QuickCrawler<'a> 224 | { 225 | start_urls: StreamIter>, 226 | limiter: Option>, 227 | request_handler: Arc 228 | } 229 | 230 | // #[derive(Debug)] 231 | // struct StartUrls 232 | // { 233 | // data: Vec 234 | // } 235 | 236 | pub struct QuickCrawlerBuilder 237 | { 238 | start_urls: Option>, 239 | limiter: Option>, 240 | request_handler: Option> 241 | } 242 | 243 | 244 | 245 | impl QuickCrawlerBuilder 246 | { 247 | pub fn new() -> QuickCrawlerBuilder{ 248 | QuickCrawlerBuilder { 249 | start_urls: None, 250 | limiter: None, 251 | request_handler: None 252 | } 253 | } 254 | 255 | pub fn with_start_urls<'a>(&'a mut self, start_urls: Vec) -> &'a mut QuickCrawlerBuilder { 256 | self.start_urls = Some(start_urls); 257 | self 258 | } 259 | 260 | 261 | 262 | pub fn with_limiter<'a>(&'a mut self, limiter: Limiter) -> &'a mut QuickCrawlerBuilder { 263 | self.limiter = Some(Arc::new(limiter)); 264 | self 265 | } 266 | 267 | pub fn with_request_handler<'a>(&'a mut self, request_handler: impl RequestHandler) -> &'a mut QuickCrawlerBuilder { 268 | self.request_handler = Some(Arc::new(request_handler)); 269 | self 270 | } 271 | 272 | 273 | pub fn finish(&self) -> Result { 274 | let data = self.start_urls.as_ref().ok_or(QuickCrawlerError::NoStartUrls)?; 275 | 276 | let request_handler = match self.request_handler.clone() { 277 | Some(r) => r, 278 | None => { 279 | Arc::new(|config: RequestHandlerConfig| async move { 280 | // ... use any request library, like reqwest 281 | surf::get(config.url.clone()).recv_string().await.map_err(|_| QuickCrawlerError::RequestErr) 282 | }) 283 | } 284 | }; 285 | 286 | 287 | Ok( 288 | QuickCrawler { 289 | start_urls: stream::iter(data), 290 | limiter: self.limiter.clone(), 291 | request_handler: request_handler 292 | } 293 | ) 294 | } 295 | } 296 | 297 | pub struct RequestHandlerConfig { 298 | pub url: String 299 | } 300 | 301 | 302 | pub trait RequestHandler: Send + Sync + 'static { 303 | /// Invoke the endpoint within the given context 304 | fn call<'a>(&'a self, config: RequestHandlerConfig) -> BoxFuture<'a, Result>; 305 | } 306 | 307 | pub type DynRequestHandler = dyn RequestHandler; 308 | 309 | impl RequestHandler for F 310 | where 311 | F: Fn(RequestHandlerConfig) -> Fut, 312 | Fut: Future> + Send + 'static, 313 | { 314 | fn call<'a>(&'a self, config: RequestHandlerConfig) -> BoxFuture<'a, Result> { 315 | let fut = (self)(config); 316 | Box::pin(async move { fut.await }) 317 | 318 | } 319 | } 320 | 321 | 322 | // impl BuilderWithStartUrls for QuickCrawlerBuilder 323 | // { 324 | // fn with(&mut self, start_urls: StartUrls) -> &QuickCrawlerBuilder { 325 | // self.start_urls = Some(start_urls); 326 | // self 327 | // } 328 | // } 329 | 330 | // impl BuilderWithLimiter for QuickCrawlerBuilder 331 | // { 332 | // fn with(&mut self, limiter: Limiter) -> &QuickCrawlerBuilder { 333 | // self.limiter = Some(limiter); 334 | // self 335 | // } 336 | // } 337 | 338 | 339 | 340 | #[derive(Debug)] 341 | pub enum DataFromScraperValue{ 342 | Complete, 343 | DataFromScraper { 344 | text: String 345 | } 346 | } 347 | 348 | trait DataDistributorComplete{ 349 | fn is_complete_sentinal(&self) -> bool; 350 | } 351 | 352 | impl DataDistributorComplete for DataFromScraperValue { 353 | fn is_complete_sentinal(&self) -> bool { 354 | match self { 355 | Self::Complete => true, 356 | _ => false, 357 | } 358 | } 359 | } 360 | 361 | // #[derive(Debug, Clone, PartialEq)] 362 | // struct DataFromScraper { 363 | // url: String, 364 | // count: usize 365 | // } 366 | 367 | 368 | pub struct DataDistributor { 369 | receiver: Receiver 370 | } 371 | 372 | impl DataDistributor { 373 | fn new(receiver: Receiver) -> DataDistributor { 374 | DataDistributor { 375 | receiver 376 | } 377 | } 378 | } 379 | 380 | 381 | impl Stream for DataDistributor { 382 | /// The type of the value yielded by the stream. 383 | type Item = DataFromScraperValue; 384 | 385 | /// Attempt to resolve the next item in the stream. 386 | /// Retuns `Poll::Pending` if not ready, `Poll::Ready(Some(x))` if a value 387 | /// is ready, and `Poll::Ready(None)` if the stream has completed. 388 | fn poll_next<'a>(mut self: Pin<&mut Self>, cx: &mut Context<'_>) 389 | -> Poll> { 390 | let Self { 391 | receiver 392 | } = &mut *self; 393 | 394 | let empty = receiver.is_empty(); 395 | // println!("here! {:?}", empty); 396 | if empty { 397 | // cx.waker().clone().wake(); 398 | return Poll::Pending; 399 | } 400 | else { 401 | let mut unwrapped_fut = Box::pin(async move { 402 | receiver.recv().await.unwrap() 403 | }); 404 | 405 | 406 | let res = ready!(unwrapped_fut.as_mut().poll(cx)); 407 | 408 | match res.is_complete_sentinal() { 409 | true => { 410 | // println!("poll NONE match (done): {:?}", res); 411 | return Poll::Ready(None); 412 | } 413 | _ => { 414 | // println!("poll some match: {:?}", res); 415 | return Poll::Ready(Some(res)); 416 | } 417 | } 418 | 419 | // return Poll::Ready(Some( 420 | // DataFromScraper{ 421 | // url: "test".into(), 422 | // count: 1 423 | // } 424 | // )); 425 | } 426 | } 427 | } 428 | 429 | 430 | 431 | 432 | 433 | 434 | async fn dispatch<'a>( 435 | data_to_manager_sender: Sender, 436 | start_url: &'a StartUrl, 437 | limiter: Option>, 438 | request_handler: Arc 439 | ) -> Result<(), QuickCrawlerError> 440 | { 441 | // let mut count = count.lock().unwrap(); 442 | // *count += 1; 443 | // let val = *count; 444 | // std::mem::drop(count); 445 | // println!("about to send"); 446 | 447 | // if should_delay { 448 | // println!("delaying..."); 449 | // sleep(Duration::from_secs(1)).await; 450 | // } 451 | 452 | // println!("sending"); 453 | // let _res = data_to_manager_sender.send( 454 | // DataFromScraperValue::DataFromScraper{ 455 | // url: start_url.url.clone().unwrap(), 456 | // count: val 457 | // } 458 | // ).await; 459 | execute_deep_scrape(&start_url, data_to_manager_sender, limiter, request_handler).await?; 460 | // async_std::task::yield_now().await; 461 | // println!("in loop: {:?} {:?} {:?}", start_url.url, val, res); 462 | 463 | // let res: Result<(), ()> = Ok(()); 464 | // res 465 | Ok(()) 466 | } 467 | 468 | impl<'a> QuickCrawler<'a> 469 | { 470 | pub async fn process(self) -> Result, QuickCrawlerError> { 471 | 472 | // let stream = &self.start_urls; 473 | // let count = Arc::new(Mutex::new(0usize)); 474 | let (data_to_manager_sender, data_to_manager_receiver) = channel::(100); 475 | 476 | let limiter = self.limiter; 477 | let request_handler = self.request_handler; 478 | 479 | let stream_senders_fut: Pin>>> = Box::pin(self.start_urls.map(|url| (data_to_manager_sender.clone(), url, limiter.clone(), request_handler.clone())).map(Ok).try_for_each_concurrent( 480 | 3, 481 | |(data_to_manager_sender, start_url, limiter, request_handler)| async move { 482 | // let i = i + 1; 483 | let res = dispatch(data_to_manager_sender, start_url, limiter, request_handler).await; 484 | async_std::task::yield_now().await; 485 | res 486 | } 487 | )); 488 | 489 | 490 | 491 | 492 | 493 | // let collect_fut = collect_results_for_receiver(data_to_manager_receiver); 494 | let data_distributor_stream = DataDistributor::new(data_to_manager_receiver); 495 | let data_distributor_stream_fut: Pin>>>= Box::pin(data_distributor_stream.collect()); 496 | // let res = data_to_manager_receiver.recv().await.ok_or("Error 3")?; 497 | let data_to_manager_sender2 = data_to_manager_sender.clone(); 498 | let stream_complete_fut = async move { 499 | let res = stream_senders_fut.await; 500 | let _res = data_to_manager_sender2.send( 501 | DataFromScraperValue::Complete 502 | ).await; 503 | // println!("finished sender stream {:?}", res); 504 | res 505 | }; 506 | 507 | let (data, _) = join(data_distributor_stream_fut, stream_complete_fut).await; 508 | // println!("outside loop: {:?}", data); 509 | Ok(data) 510 | } 511 | // fn add_url(&mut self, url: String) -> &Self { 512 | // let new_stream = stream::iter(vec![url]); 513 | // self.start_urls = self.start_urls.chain(url); 514 | // self 515 | // } 516 | } 517 | 518 | 519 | 520 | 521 | 522 | #[cfg(test)] 523 | mod tests { 524 | use super::*; 525 | use async_std::{task}; 526 | 527 | use mockito::{mock, server_address, Matcher}; 528 | // #[test] 529 | // fn with_start_urls() { 530 | // let mut builder = QuickCrawlerBuilder::new(); 531 | 532 | 533 | 534 | // builder.with( 535 | // StartUrls{ 536 | // data: vec!["https://www.google.com".into()] 537 | // } 538 | // ); 539 | // // assert_eq!(builder.start_urls.as_ref().unwrap(), &start_urls_1); 540 | // } 541 | 542 | 543 | 544 | #[test] 545 | fn with_start_urls_finished() -> () { 546 | let base_url = &mockito::server_url(); 547 | let start_path = "/search?q=dinner"; 548 | let path1 = "/compilation/another-meal-1"; 549 | let path2 = "/compilation/another-meal-2"; 550 | let path3 = "/compilation/another-meal-3"; 551 | // relative path 552 | let path4 = "../compilation/other-meal-1"; 553 | 554 | let start_url = format!("{}{}", base_url, start_path); 555 | let url1 = format!("{}{}", base_url, path1); 556 | let url2 = format!("{}{}", base_url, path2); 557 | let url3 = format!("{}{}", base_url, path3); 558 | let url4 = format!("{}{}", base_url, path4); 559 | 560 | // Make sure to support RELATIVE PATH 561 | // (as shown below using path4 variable) 562 | let _m1 = mock("GET", start_path) 563 | .with_body( 564 | format!(r#" 565 | 566 | 581 | 582 | "#, url1, url2, url3, path4) 583 | ) 584 | .create(); 585 | 586 | 587 | let _m2 = mock("GET", Matcher::Regex(r"^/compilation/another-meal-1$".to_string())) 588 | .with_body( 589 | format!(r#" 590 |
    591 |
    592 | set 1: test ingredient 1 593 |
    594 |
    595 | set 1: test ingredient 2 596 |
    597 |
    598 |
  • 599 | set 1: step 1 600 |
  • 601 |
    602 |
    603 | "#) 604 | ) 605 | .create(); 606 | 607 | let _m2 = mock("GET", Matcher::Regex(r"^/compilation/another-meal-(2|3)$".to_string())) 608 | .with_body( 609 | format!(r#" 610 |
    611 |
    612 | set 2: test ingredient 1 613 |
    614 |
    615 | set 2: test ingredient 2 616 |
    617 |
    618 |
  • 619 | set 2: step 1 620 |
  • 621 |
  • 622 | set 2: step 2 623 |
  • 624 |
    625 |
    626 | "#) 627 | ) 628 | .create(); 629 | 630 | let _m3 = mock("GET", Matcher::Regex(r"^/compilation/other-meal-1$".to_string())) 631 | .with_body( 632 | format!(r#" 633 |
    634 |
    635 | other ingredient 1 636 |
    637 |
    638 | other ingredient 2 639 |
    640 |
    641 |
  • 642 | other step 1 643 |
  • 644 |
  • 645 | other step 2 646 |
  • 647 |
  • 648 | other step 3 649 |
  • 650 |
    651 |
    652 | "#) 653 | ) 654 | .create(); 655 | // format!("
    {} test ingredent
    {} test ingredent
  • step: {}
  • ", i, i, i); 656 | 657 | 658 | 659 | 660 | let mut builder = QuickCrawlerBuilder::new(); 661 | 662 | // println!("the start_url {}", start_url); 663 | 664 | let start_urls = vec![ 665 | StartUrl::new() 666 | .url(start_url) 667 | .method("GET") 668 | .response_logic(Parallel(vec![ 669 | // will be provided an html page 670 | Scrape::new() 671 | .find_elements_with_urls(".feed-item") 672 | .extract_urls_from_elements(ElementUrlExtractor::Attr("href".to_string())) 673 | .response_logic(Parallel(vec![ 674 | Scrape::new() 675 | .find_elements_with_data(".ingredients-prep .ingredient") 676 | .extract_data_from_elements(ElementDataExtractor::Text) 677 | .store(|vec: Vec| async move { 678 | println!("store ingredients: {:?}", vec); 679 | }), 680 | Scrape::new() 681 | .find_elements_with_data(".ingredients-prep .prep-steps li") 682 | .extract_data_from_elements(ElementDataExtractor::Text) 683 | .store(|vec: Vec| async move { 684 | println!("store prep-steps: {:?}", vec); 685 | }), 686 | ])), 687 | Scrape::new() 688 | .find_elements_with_urls(".other-feed-item") 689 | .extract_urls_from_elements(ElementUrlExtractor::Attr("href".to_string())) 690 | .response_logic(Parallel(vec![ 691 | Scrape::new() 692 | .find_elements_with_data(".ingredients-prep .ingredient") 693 | .extract_data_from_elements(ElementDataExtractor::Text) 694 | .store(|vec: Vec| async move { 695 | println!("store ingredients: {:?}", vec); 696 | }), 697 | Scrape::new() 698 | .find_elements_with_data(".ingredients-prep .prep-steps li") 699 | .extract_data_from_elements(ElementDataExtractor::Text) 700 | .store(|vec: Vec| async move { 701 | println!("store prep-steps: {:?}", vec); 702 | }), 703 | ])) 704 | ]) 705 | ) 706 | // more StartUrl::new 's 707 | ] ; 708 | 709 | 710 | let limiter = Limiter::new(); 711 | 712 | builder 713 | .with_start_urls( 714 | start_urls 715 | ) 716 | .with_limiter( 717 | limiter 718 | ) 719 | .with_request_handler( 720 | |config: RequestHandlerConfig| async move { 721 | // ... use any request library, like reqwest 722 | surf::get(config.url.clone()).recv_string().await.map_err(|_| QuickCrawlerError::RequestErr) 723 | } 724 | ); 725 | 726 | 727 | let crawler = builder.finish().map_err(|_| "Builder could not finish").expect("no error"); 728 | let res = task::block_on(async { 729 | crawler.process().await 730 | }); 731 | 732 | println!("{:?}", res); 733 | assert_eq!(res.is_ok(), true); 734 | 735 | } 736 | 737 | // #[test] 738 | // // #[should_panic] 739 | // fn no_start_urls_finished() { 740 | // let builder = QuickCrawlerBuilder::new(); 741 | 742 | 743 | // let scraper_result = builder.finish(); 744 | // assert!(scraper_result.is_err()); 745 | // // assert_eq!(scraper_result, Err(QuickCrawlerError::NoStartUrls)); 746 | // } 747 | } 748 | -------------------------------------------------------------------------------- /src/limiter/mod.rs: -------------------------------------------------------------------------------- 1 | use dashmap::DashMap; 2 | use futures::prelude::*; 3 | use futures::future::{join_all}; 4 | use std::time::{Instant}; 5 | 6 | use ratelimit_meter::{DirectRateLimiter, LeakyBucket}; 7 | use ratelimit_futures::Ratelimit; 8 | use std::num::NonZeroU32; 9 | use futures::{ 10 | compat::{Future01CompatExt, Stream01CompatExt}, 11 | future::{TryFutureExt}, 12 | }; 13 | 14 | 15 | #[derive(Debug, Clone)] 16 | pub struct Limiter { 17 | hash: DashMap, 18 | } 19 | 20 | 21 | impl Limiter { 22 | pub fn new() -> Self { 23 | let hash = DashMap::new(); 24 | // let lim = DirectRateLimiter::::per_second(NonZeroU32::new(1).unwrap()); 25 | Self { 26 | hash 27 | } 28 | } 29 | 30 | 31 | pub async fn limit>(&self, id: S) -> () { 32 | let id = id.into(); 33 | let lim = match self.hash.get(&id) { 34 | Some(limiter)=>{ 35 | // println!("next limiter!"); 36 | limiter 37 | } 38 | None=>{ 39 | let lim = DirectRateLimiter::::per_second(NonZeroU32::new(3).unwrap()); 40 | // println!("first limiter!"); 41 | self.hash.insert(id.clone(), lim); 42 | self.hash.get(&id).unwrap() 43 | } 44 | }; 45 | { 46 | let mut lim = lim.clone(); 47 | let ratelimit_future = Ratelimit::new(&mut lim); 48 | ratelimit_future.compat().await.expect("ratelimit_future unknown error"); 49 | }; 50 | } 51 | } 52 | 53 | 54 | 55 | 56 | 57 | #[cfg(test)] 58 | mod tests { 59 | use super::*; 60 | use async_std::{task}; 61 | 62 | #[test] 63 | fn no_start_urls_finished() { 64 | 65 | 66 | let res = task::block_on(async { 67 | 68 | 69 | let limiter = Limiter::new(); 70 | 71 | 72 | 73 | let c = || async move { 74 | let mut futs = Vec::new(); 75 | futs.push(limiter.limit("domain.com")); 76 | futs.push(limiter.limit("domain.com")); 77 | futs.push(limiter.limit("domain.com")); 78 | futs.push(limiter.limit("domain.com")); 79 | 80 | 81 | println!("before limit"); 82 | let start = Instant::now(); 83 | join_all(futs).await; 84 | let duration = start.elapsed(); 85 | println!("after limit {:?}", duration); 86 | }; 87 | c().await; 88 | 89 | }); 90 | assert!(true); 91 | } 92 | } -------------------------------------------------------------------------------- /src/scrape/mod.rs: -------------------------------------------------------------------------------- 1 | extern crate scraper; 2 | use scraper::{Selector}; 3 | // use futures_core::future::Future; 4 | // use futures::future::FutureExt; 5 | // use futures::future::Shared; 6 | use std::future::Future; 7 | use std::pin::Pin; 8 | use futures::future::{BoxFuture}; 9 | 10 | // use pin_utils::{unsafe_pinned, unsafe_unpinned}; 11 | 12 | // #[derive(Clone)] 13 | pub enum ResponseLogic 14 | { 15 | Parallel(Vec), 16 | Serial(Vec) 17 | } 18 | 19 | // #[derive(Debug)] 20 | pub struct StartUrl 21 | { 22 | pub url: Option, 23 | pub method: Option, 24 | pub response_logic: Option 25 | } 26 | 27 | 28 | 29 | pub trait Store: Send + Sync + 'static { 30 | /// Invoke the endpoint within the given context 31 | fn call<'a>(&'a self, data: Vec) -> BoxFuture<'a, ()>; 32 | } 33 | 34 | pub type DynStore = dyn Store; 35 | 36 | impl Store for F 37 | where 38 | F: Fn(Vec) -> Fut, 39 | Fut: Future + Send + 'static, 40 | { 41 | fn call<'a>(&'a self, data: Vec) -> BoxFuture<'a, ()> { 42 | let fut = (self)(data); 43 | Box::pin(async move { fut.await; }) 44 | } 45 | } 46 | 47 | 48 | impl StartUrl 49 | { 50 | pub fn new() -> StartUrl{ 51 | StartUrl { 52 | url: None, 53 | method: None, 54 | response_logic: None 55 | } 56 | } 57 | pub fn url>(mut self, url: S) -> Self { 58 | self.url = Some(url.into()); 59 | self 60 | } 61 | pub fn method>(mut self, method: S) -> Self { 62 | self.method = Some(method.into()); 63 | self 64 | } 65 | pub fn response_logic(mut self, response_logic: ResponseLogic) -> Self { 66 | self.response_logic = Some(response_logic); 67 | self 68 | } 69 | // pub fn to_owned(&self) -> Self { 70 | // *self 71 | // } 72 | } 73 | 74 | // #[derive(Clone)] 75 | pub struct Scrape 76 | { 77 | pub executables: Vec>, 78 | // text: Text 79 | } 80 | 81 | // use std::fmt; 82 | // impl fmt::Debug for dyn Predicate { 83 | // fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 84 | // write!(f, "Predicate") 85 | // } 86 | // } 87 | 88 | 89 | // #[derive(Clone)] 90 | pub enum Ops 91 | { 92 | // Pred(Selector), 93 | UrlSelector(Selector), 94 | UrlExtractor(ElementUrlExtractor), 95 | DataSelector(Selector), 96 | DataExtractor(ElementDataExtractor), 97 | ResponseLogic(ResponseLogic), 98 | Store(Box) 99 | } 100 | 101 | 102 | // struct S 103 | // where 104 | // F: std::future::Future, 105 | // { 106 | // foo: fn(u8) -> F, 107 | // } 108 | 109 | pub enum ElementUrlExtractor { 110 | Attr(String) 111 | } 112 | 113 | pub enum ElementDataExtractor { 114 | Text 115 | } 116 | 117 | impl Scrape 118 | { 119 | // unsafe_unpinned!(executables: C); 120 | 121 | pub fn new() -> Scrape { 122 | Scrape { 123 | executables: vec![] 124 | } 125 | } 126 | // pub fn find>(mut self, predicate: S) -> Self { 127 | // self.executables.push(Box::new(Ops::Pred(Selector::parse(&predicate.into()).unwrap()))); 128 | // self 129 | // } 130 | pub fn find_elements_with_data>(mut self, predicate: S) -> Self { 131 | self.executables.push(Box::new(Ops::DataSelector(Selector::parse(&predicate.into()).unwrap()))); 132 | self 133 | } 134 | pub fn extract_data_from_elements(mut self, extractor: ElementDataExtractor) -> Self { 135 | self.executables.push(Box::new(Ops::DataExtractor(extractor))); 136 | self 137 | } 138 | 139 | pub fn find_elements_with_urls>(mut self, predicate: S) -> Self { 140 | self.executables.push(Box::new(Ops::UrlSelector(Selector::parse(&predicate.into()).unwrap()))); 141 | self 142 | } 143 | pub fn extract_urls_from_elements(mut self, extractor: ElementUrlExtractor) -> Self { 144 | self.executables.push(Box::new(Ops::UrlExtractor(extractor))); 145 | self 146 | } 147 | pub fn response_logic(mut self, resp_logic: ResponseLogic) -> Self { 148 | self.executables.push(Box::new(Ops::ResponseLogic(resp_logic))); 149 | self 150 | } 151 | 152 | pub fn store( 153 | mut self, 154 | c: impl Store 155 | ) -> Self 156 | { 157 | self.executables.push(Box::new(Ops::Store(Box::new(c)))); 158 | self 159 | } 160 | } 161 | 162 | 163 | 164 | --------------------------------------------------------------------------------