├── benchmarks ├── src │ └── lib.rs ├── Cargo.toml ├── results │ └── parallel │ │ ├── 256.log │ │ ├── 128.log │ │ ├── 512.log │ │ ├── 1024.log │ │ ├── 2048.log │ │ └── 64.log ├── tools │ └── crunch.pl ├── README.md ├── benches │ └── compare_methods.rs └── Cargo.lock ├── .gitignore ├── clippy.toml ├── test ├── bugreports.rs ├── version.rs ├── tests.rs ├── reentrant_wait.rs ├── io_blocking.rs ├── prelude_api.rs └── early_cleanup.rs ├── .travis.yml ├── LICENSE-MIT ├── src ├── stack_cache.rs ├── errors.rs ├── io.rs ├── wrappers.rs ├── switch.rs ├── lib.rs ├── prelude.rs └── coroutine.rs ├── Cargo.toml ├── CHANGELOG.md ├── examples ├── echo_server.rs └── chat_server.rs ├── README.md ├── TODO ├── .appveyor.yml └── LICENSE-APACHE /benchmarks/src/lib.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | */target/ 3 | **/*.rs.bk 4 | tags 5 | -------------------------------------------------------------------------------- /clippy.toml: -------------------------------------------------------------------------------- 1 | # The lint is kind of stupid, because hex literals don't convey the general size 2 | literal-representation-threshold = 10000000000 3 | -------------------------------------------------------------------------------- /test/bugreports.rs: -------------------------------------------------------------------------------- 1 | use corona::prelude::*; 2 | 3 | #[test] 4 | fn bug_4_weird_stack_size_assert() { 5 | Coroutine::new() 6 | .stack_size(10_000) 7 | .run(|| {}) 8 | .expect("failed") 9 | } 10 | -------------------------------------------------------------------------------- /test/version.rs: -------------------------------------------------------------------------------- 1 | #[test] 2 | fn test_readme_deps() { 3 | assert_markdown_deps_updated!("README.md"); 4 | } 5 | 6 | #[test] 7 | fn test_html_root_url() { 8 | assert_html_root_url_updated!("src/lib.rs"); 9 | } 10 | -------------------------------------------------------------------------------- /test/tests.rs: -------------------------------------------------------------------------------- 1 | extern crate corona; 2 | extern crate futures; 3 | extern crate tokio; 4 | #[macro_use] 5 | extern crate version_sync; 6 | 7 | mod bugreports; 8 | mod early_cleanup; 9 | #[cfg(feature = "blocking-wrappers")] 10 | mod io_blocking; 11 | mod prelude_api; 12 | mod reentrant_wait; 13 | mod version; 14 | -------------------------------------------------------------------------------- /benchmarks/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "benchmarks" 3 | version = "0.1.0" 4 | authors = ["Michal 'vorner' Vaner "] 5 | workspace = ".." 6 | publish = false 7 | 8 | [dependencies] 9 | 10 | [dev-dependencies] 11 | corona = { path = ".." } 12 | futures-await = "~0.1" 13 | futures-cpupool = "~0.1" 14 | lazy_static = "~1" 15 | may = "~0.2" 16 | net2 = "~0.2" 17 | num_cpus = "~1" 18 | tokio = "~0.1" 19 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | cache: cargo 3 | rust: 4 | - stable 5 | - beta 6 | - nightly 7 | os: 8 | - linux 9 | - osx 10 | 11 | before_script: 12 | - | 13 | (test "$TRAVIS_RUST_VERSION" != nightly || travis_wait cargo install --force clippy || true) 14 | 15 | script: 16 | - | 17 | export PATH="$PATH":~/.cargo/bin && 18 | export RUST_BACKTRACE=1 && 19 | export CARGO_INCREMENTAL=1 && 20 | cargo build && 21 | cargo test && 22 | cargo test --release && 23 | cargo doc --no-deps && 24 | (test "$TRAVIS_RUST_VERSION" != nightly || cargo clippy -- --deny clippy) 25 | 26 | matrix: 27 | allow_failures: 28 | - rust: nightly 29 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 tokio-jsonrpc developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /src/stack_cache.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | use std::collections::HashMap; 3 | 4 | use context::stack::ProtectedFixedSizeStack; 5 | 6 | use errors::StackError; 7 | 8 | thread_local! { 9 | static CACHE: RefCell>> = 10 | RefCell::new(HashMap::new()); 11 | } 12 | 13 | /// Get a stack of the given size. 14 | /// 15 | /// Retrieve it from the cache or create a new one, if none is available. 16 | /// 17 | /// The cache is thread local. 18 | pub(crate) fn get(size: usize) -> Result { 19 | CACHE.with(|c| { 20 | let mut cell = c.borrow_mut(); 21 | cell.get_mut(&size) 22 | .and_then(|v| v.pop().map(Ok)) 23 | .unwrap_or_else(|| { 24 | ProtectedFixedSizeStack::new(size) 25 | }) 26 | }) 27 | } 28 | 29 | /// Put a stack into the cache, for future reuse. 30 | /// 31 | /// The cache is thread local and the stack will be returned in some future 32 | /// [`get`](function.get.html) call. 33 | pub(crate) fn put(stack: ProtectedFixedSizeStack) { 34 | let len = stack.len(); 35 | CACHE.with(|c| c.borrow_mut().entry(len).or_insert_with(Vec::new).push(stack)); 36 | } 37 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "corona" 3 | # Also increase the version in src/lib html root attribute 4 | version = "0.4.3" 5 | authors = ["Michal 'vorner' Vaner "] 6 | description = "Coroutine and Async/Await support for tokio-based futures" 7 | documentation = "https://docs.rs/corona" 8 | repository = "https://github.com/vorner/corona" 9 | readme = "README.md" 10 | keywords = ["coroutine", "future", "tokio", "await"] 11 | categories = ["asynchronous"] 12 | license = "Apache-2.0/MIT" 13 | 14 | [badges] 15 | travis-ci = { repository = "vorner/corona" } 16 | appveyor = { repository = "vorner/corona" } 17 | maintenance = { status = "deprecated" } 18 | 19 | [workspace] 20 | members = ["benchmarks"] 21 | 22 | [features] 23 | default = ["blocking-wrappers", "convenient-run"] 24 | blocking-wrappers = ["tokio-io"] 25 | convenient-run = ["tokio"] 26 | 27 | [dependencies] 28 | context = ">=2.1" 29 | futures = "~0.1" 30 | tokio = { version = "~0.1", optional = true } 31 | tokio-current-thread = "~0.1" 32 | tokio-io = { version = "~0.1", optional = true } 33 | 34 | [[test]] 35 | name = "integration_tests" 36 | path = "test/tests.rs" 37 | 38 | [dev-dependencies] 39 | bytes = "~0.4" 40 | tokio = "~0.1" 41 | tokio-io = "~0.1" 42 | version-sync = "~0.5" 43 | -------------------------------------------------------------------------------- /benchmarks/results/parallel/256.log: -------------------------------------------------------------------------------- 1 | 50,474,698 ns/iter (+/- 3,041,003) async 2 | 28,262,775 ns/iter (+/- 4,597,678) async_cpupool 3 | 30,991,612 ns/iter (+/- 5,888,160) async_cpupool_cpus 4 | 27,406,894 ns/iter (+/- 2,956,637) async_cpupool_many 5 | 21,140,164 ns/iter (+/- 1,808,288) async_cpus 6 | 29,511,965 ns/iter (+/- 4,005,716) async_many 7 | 69,096,282 ns/iter (+/- 6,546,174) corona 8 | 68,831,588 ns/iter (+/- 4,324,940) corona_blocking_wrapper 9 | 24,731,765 ns/iter (+/- 2,405,473) corona_blocking_wrapper_cpus 10 | 38,610,118 ns/iter (+/- 3,149,792) corona_blocking_wrapper_many 11 | 24,553,860 ns/iter (+/- 2,361,122) corona_cpus 12 | 37,947,981 ns/iter (+/- 2,934,502) corona_many 13 | 53,621,637 ns/iter (+/- 6,914,495) futures 14 | 27,883,922 ns/iter (+/- 4,555,783) futures_cpupool 15 | 30,202,761 ns/iter (+/- 3,371,911) futures_cpupool_cpus 16 | 27,765,714 ns/iter (+/- 37,878,084) futures_cpupool_many 17 | 30,850,651 ns/iter (+/- 3,676,776) futures_cpus 18 | 30,812,344 ns/iter (+/- 4,771,315) futures_many 19 | 36,430,992 ns/iter (+/- 3,667,559) may 20 | 39,071,158 ns/iter (+/- 3,994,500) may_cpus 21 | 37,974,421 ns/iter (+/- 4,543,238) may_many 22 | 44,144,915 ns/iter (+/- 4,646,951) threads 23 | 53,861,493 ns/iter (+/- 3,585,033) threads_cpus 24 | 44,757,753 ns/iter (+/- 4,293,267) threads_many 25 | -------------------------------------------------------------------------------- /benchmarks/results/parallel/128.log: -------------------------------------------------------------------------------- 1 | 25,129,522 ns/iter (+/- 1,948,456) async 2 | 13,895,220 ns/iter (+/- 2,412,441) async_cpupool 3 | 16,449,300 ns/iter (+/- 7,062,024) async_cpupool_cpus 4 | 14,118,352 ns/iter (+/- 3,170,100) async_cpupool_many 5 | 11,057,663 ns/iter (+/- 1,546,785) async_cpus 6 | 14,304,595 ns/iter (+/- 1,966,185) async_many 7 | 34,898,721 ns/iter (+/- 204,006,257) corona 8 | 35,851,174 ns/iter (+/- 76,716,240) corona_blocking_wrapper 9 | 12,601,478 ns/iter (+/- 1,368,466) corona_blocking_wrapper_cpus 10 | 20,098,071 ns/iter (+/- 84,982,750) corona_blocking_wrapper_many 11 | 12,483,281 ns/iter (+/- 1,547,974) corona_cpus 12 | 19,673,364 ns/iter (+/- 2,028,918) corona_many 13 | 27,259,033 ns/iter (+/- 3,447,498) futures 14 | 14,161,521 ns/iter (+/- 3,190,636) futures_cpupool 15 | 16,162,328 ns/iter (+/- 204,181,954) futures_cpupool_cpus 16 | 14,398,131 ns/iter (+/- 2,722,364) futures_cpupool_many 17 | 17,100,394 ns/iter (+/- 207,223,624) futures_cpus 18 | 16,857,098 ns/iter (+/- 3,664,201) futures_many 19 | 17,407,826 ns/iter (+/- 204,029,107) may 20 | 16,940,574 ns/iter (+/- 3,652,612) may_cpus 21 | 17,976,177 ns/iter (+/- 2,738,411) may_many 22 | 22,011,601 ns/iter (+/- 1,645,368) threads 23 | 26,045,198 ns/iter (+/- 5,311,040) threads_cpus 24 | 22,439,402 ns/iter (+/- 2,497,508) threads_many 25 | -------------------------------------------------------------------------------- /benchmarks/results/parallel/512.log: -------------------------------------------------------------------------------- 1 | 101,686,994 ns/iter (+/- 5,123,786) async 2 | 56,108,200 ns/iter (+/- 7,216,202) async_cpupool 3 | 57,927,110 ns/iter (+/- 9,826,608) async_cpupool_cpus 4 | 56,999,093 ns/iter (+/- 6,922,906) async_cpupool_many 5 | 41,948,095 ns/iter (+/- 3,438,450) async_cpus 6 | 56,715,692 ns/iter (+/- 5,525,090) async_many 7 | 140,258,883 ns/iter (+/- 15,710,836) corona 8 | 140,791,160 ns/iter (+/- 8,103,792) corona_blocking_wrapper 9 | 50,665,896 ns/iter (+/- 11,235,829) corona_blocking_wrapper_cpus 10 | 76,198,800 ns/iter (+/- 4,139,361) corona_blocking_wrapper_many 11 | 49,710,450 ns/iter (+/- 9,349,531) corona_cpus 12 | 76,439,679 ns/iter (+/- 3,527,176) corona_many 13 | 107,429,864 ns/iter (+/- 7,347,724) futures 14 | 56,125,546 ns/iter (+/- 7,982,545) futures_cpupool 15 | 60,735,392 ns/iter (+/- 11,645,379) futures_cpupool_cpus 16 | 60,375,436 ns/iter (+/- 16,537,147) futures_cpupool_many 17 | 60,969,328 ns/iter (+/- 5,157,085) futures_cpus 18 | 60,351,847 ns/iter (+/- 6,153,672) futures_many 19 | 75,033,737 ns/iter (+/- 9,063,053) may 20 | 76,966,372 ns/iter (+/- 11,302,646) may_cpus 21 | 74,542,299 ns/iter (+/- 10,573,275) may_many 22 | 94,386,076 ns/iter (+/- 13,568,794) threads 23 | 110,276,548 ns/iter (+/- 9,329,872) threads_cpus 24 | 101,418,539 ns/iter (+/- 12,408,728) threads_many 25 | -------------------------------------------------------------------------------- /benchmarks/results/parallel/1024.log: -------------------------------------------------------------------------------- 1 | 201,748,759 ns/iter (+/- 11,132,982) async 2 | 113,050,366 ns/iter (+/- 23,130,818) async_cpupool 3 | 126,168,173 ns/iter (+/- 48,294,060) async_cpupool_cpus 4 | 118,648,923 ns/iter (+/- 27,967,392) async_cpupool_many 5 | 86,415,557 ns/iter (+/- 7,908,999) async_cpus 6 | 114,935,383 ns/iter (+/- 7,619,399) async_many 7 | 282,380,663 ns/iter (+/- 27,558,525) corona 8 | 283,125,800 ns/iter (+/- 33,840,360) corona_blocking_wrapper 9 | 120,137,767 ns/iter (+/- 38,051,147) corona_blocking_wrapper_cpus 10 | 156,784,673 ns/iter (+/- 13,232,510) corona_blocking_wrapper_many 11 | 111,638,153 ns/iter (+/- 14,931,491) corona_cpus 12 | 154,046,011 ns/iter (+/- 11,301,858) corona_many 13 | 215,545,183 ns/iter (+/- 14,733,012) futures 14 | 116,971,911 ns/iter (+/- 18,748,185) futures_cpupool 15 | 130,752,065 ns/iter (+/- 44,598,536) futures_cpupool_cpus 16 | 121,116,237 ns/iter (+/- 18,981,789) futures_cpupool_many 17 | 121,592,928 ns/iter (+/- 9,686,767) futures_cpus 18 | 120,843,597 ns/iter (+/- 7,449,768) futures_many 19 | 148,588,569 ns/iter (+/- 20,035,373) may 20 | 152,304,456 ns/iter (+/- 13,476,902) may_cpus 21 | 153,573,235 ns/iter (+/- 26,021,327) may_many 22 | 227,695,889 ns/iter (+/- 29,313,139) threads 23 | 245,212,996 ns/iter (+/- 27,719,783) threads_cpus 24 | 230,794,100 ns/iter (+/- 32,547,282) threads_many 25 | -------------------------------------------------------------------------------- /benchmarks/results/parallel/2048.log: -------------------------------------------------------------------------------- 1 | 403,191,038 ns/iter (+/- 22,391,921) async 2 | 242,253,835 ns/iter (+/- 43,748,743) async_cpupool 3 | 266,967,847 ns/iter (+/- 61,309,748) async_cpupool_cpus 4 | 247,038,291 ns/iter (+/- 43,354,166) async_cpupool_many 5 | 192,441,573 ns/iter (+/- 27,573,891) async_cpus 6 | 234,302,146 ns/iter (+/- 16,341,637) async_many 7 | 594,194,088 ns/iter (+/- 83,839,619) corona 8 | 594,341,355 ns/iter (+/- 63,834,994) corona_blocking_wrapper 9 | 236,800,398 ns/iter (+/- 26,743,191) corona_blocking_wrapper_cpus 10 | 324,948,761 ns/iter (+/- 36,515,432) corona_blocking_wrapper_many 11 | 231,934,640 ns/iter (+/- 27,832,360) corona_cpus 12 | 329,494,267 ns/iter (+/- 37,255,836) corona_many 13 | 431,032,233 ns/iter (+/- 22,382,893) futures 14 | 243,286,739 ns/iter (+/- 57,014,609) futures_cpupool 15 | 266,311,288 ns/iter (+/- 54,712,152) futures_cpupool_cpus 16 | 253,001,981 ns/iter (+/- 62,289,552) futures_cpupool_many 17 | 247,619,492 ns/iter (+/- 18,567,623) futures_cpus 18 | 245,469,219 ns/iter (+/- 16,750,416) futures_many 19 | 318,654,188 ns/iter (+/- 104,996,757) may 20 | 290,569,062 ns/iter (+/- 32,685,311) may_cpus 21 | 306,880,941 ns/iter (+/- 93,345,441) may_many 22 | 620,455,998 ns/iter (+/- 379,442,253) threads 23 | 744,399,170 ns/iter (+/- 513,822,650) threads_cpus 24 | 598,618,500 ns/iter (+/- 357,603,853) threads_many 25 | -------------------------------------------------------------------------------- /benchmarks/results/parallel/64.log: -------------------------------------------------------------------------------- 1 | 12,558,172 ns/iter (+/- 202,530,724) async 2 | 7,474,929 ns/iter (+/- 202,263,640) async_cpupool 3 | 8,527,293 ns/iter (+/- 206,153,893) async_cpupool_cpus 4 | 8,221,107 ns/iter (+/- 200,934,734) async_cpupool_many 5 | 5,733,656 ns/iter (+/- 203,090,250) async_cpus 6 | 9,231,923 ns/iter (+/- 201,386,661) async_many 7 | 17,094,665 ns/iter (+/- 193,376,476) corona 8 | 17,021,656 ns/iter (+/- 194,926,269) corona_blocking_wrapper 9 | 6,481,907 ns/iter (+/- 203,707,427) corona_blocking_wrapper_cpus 10 | 10,593,713 ns/iter (+/- 101,387,214) corona_blocking_wrapper_many 11 | 6,469,426 ns/iter (+/- 123,133,418) corona_cpus 12 | 10,297,160 ns/iter (+/- 106,066,000) corona_many 13 | 14,137,458 ns/iter (+/- 204,619,309) futures 14 | 7,520,827 ns/iter (+/- 201,030,666) futures_cpupool 15 | 8,269,836 ns/iter (+/- 178,813,390) futures_cpupool_cpus 16 | 7,467,551 ns/iter (+/- 1,556,279) futures_cpupool_many 17 | 8,777,875 ns/iter (+/- 25,989,708) futures_cpus 18 | 8,925,556 ns/iter (+/- 203,089,431) futures_many 19 | 6,958,079 ns/iter (+/- 110,869,658) may 20 | 8,104,422 ns/iter (+/- 202,429,210) may_cpus 21 | 7,529,970 ns/iter (+/- 202,571,445) may_many 22 | 10,947,357 ns/iter (+/- 112,815,029) threads 23 | 11,563,243 ns/iter (+/- 112,758,330) threads_cpus 24 | 11,287,597 ns/iter (+/- 202,778,413) threads_many 25 | -------------------------------------------------------------------------------- /benchmarks/tools/crunch.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl 2 | use common::sense; 3 | use Data::Dumper; 4 | 5 | # Generates gnuplot instructions to make a graph from the benchmarks. 6 | # * Run benchmarks, saving with --logfile $i.log (for each measured parameter $i) 7 | # * Run crunch.pl | gnuplot 8 | 9 | my %data; 10 | 11 | for my $f (<*.log>) { 12 | open my $in, '<', $f or die "Couldn't read $f: $!\n"; 13 | 14 | $f =~ s/\..*//; 15 | 16 | for (<$in>) { 17 | s/^\s*//; 18 | s/,//g; 19 | my ($time, $name) = (/(\d+).* (\w+)/); 20 | $data{$name}->{$f} = $time; 21 | } 22 | } 23 | 24 | while (my ($server, $data) = each %data) { 25 | open my $out, '>', "$server.dat" or die "Couldn't write $server.dat: $!\n"; 26 | for my $size (sort { $a <=> $b } keys %$data) { 27 | print $out "$size\t$data->{$size}\n"; 28 | } 29 | } 30 | 31 | $\ = ";\n"; 32 | print "set terminal svg size 1024, 768 background rgb 'white'"; 33 | print "set output 'graph.svg'"; 34 | print "set log xyz"; 35 | print "set key right bottom"; 36 | 37 | my @colors = qw(red blue black orchid green brown purple olivegreen orange #83ffd5 #007f00 #8a0000); 38 | my $cnum; 39 | 40 | sub conv($) { 41 | my ($name) = @_; 42 | $name =~ s/_/\\_/g; 43 | return $name; 44 | } 45 | 46 | print "plot " . join ', ', (map "'$_.dat' title '".conv($_)."' with linespoints lt 1 lc rgb \"".$colors[$cnum ++ % scalar @colors]."\"", qw(async_cpus async_cpupool corona_cpus corona_blocking_wrapper_cpus futures_cpus futures_cpupool may threads)); 47 | -------------------------------------------------------------------------------- /test/reentrant_wait.rs: -------------------------------------------------------------------------------- 1 | //! Experiments and tests around reentrant API abuses. 2 | //! 3 | //! As pointed out by matklad (for which he has big thanks), combination of `unsafe`, callback and 4 | //! reentrance can often lead to unwanted results, including UB. This tests how far the API allows 5 | //! one to go. 6 | 7 | use corona::prelude::*; 8 | use tokio::prelude::*; 9 | use tokio::runtime::current_thread; 10 | 11 | #[derive(Debug, Default)] 12 | struct ReentrantPoll { 13 | reentered: bool, 14 | } 15 | 16 | impl Future for ReentrantPoll { 17 | type Item = (); 18 | type Error = (); 19 | fn poll(&mut self) -> Poll<(), ()> { 20 | if self.reentered { 21 | return Ok(Async::Ready(())); 22 | } 23 | self.reentered = true; 24 | // Wait on self 25 | match self.coro_wait() { 26 | Ok(()) => Ok(Async::Ready(())), 27 | Err(()) => Err(()), 28 | } 29 | } 30 | } 31 | 32 | /// A future directly tries to wait on itself from its own poll. That should panic, since *usually* 33 | /// the poll is called from the core, that should live outside of the coroutines. 34 | /// 35 | /// The panic prevents any potential problems caused by this. 36 | #[test] 37 | fn directly_reentrant() { 38 | current_thread::block_on_all(future::lazy(|| { 39 | Coroutine::new() 40 | .spawn_catch_panic(|| { 41 | ReentrantPoll::default().coro_wait() 42 | }).unwrap() 43 | })).unwrap_err(); // It is expected the coroutine panics 44 | } 45 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # 0.4.3 2 | 3 | * Deprecating. 4 | 5 | # 0.4.2 6 | 7 | * Allow the stack size to be arbitrary and let the libraries deal with it by 8 | rounding up to page sizes. 9 | * Few doc link fixes. 10 | 11 | # 0.4.1 12 | 13 | * Export the coroutine module (made some intended-public things private by 14 | accident before). 15 | 16 | # 0.4.0 17 | 18 | * Ported to use tokio instead of tokio-core. 19 | 20 | # 0.4.0-pre.1 21 | 22 | * Added configuration for the cleanup strategy (eg. when the core is dropped and 23 | the coroutines didn't have a chance to finish yet). 24 | * Added some benchmarks to measure the overhead and compare with others. 25 | * Introduced the BlockingWrapper to wrap AsyncRead/AsyncWrite things and turn 26 | them into blocking (blocking only the coroutine). This allows them to be used 27 | in futures-unaware APIs expecting Read/Write. 28 | * A panic inside a future propagates to the owning coroutine, doesn't kill the 29 | whole core (unless the panic is also propagated out of the coroutine). 30 | * The `spawn` method no longer catches panic by default. The 31 | `spawn_catch_panic`. 32 | 33 | # 0.3.1 34 | 35 | * Made the `Coroutine::new()` builder more ergonomic to use. 36 | * Documentation hint about stack sizes. 37 | 38 | # 0.3.0 39 | 40 | Redesign of the API. The async context is implicit in thread local storage. The 41 | interface is easier to work with and looks cleaner, at the cost of checking some 42 | misuses at runtime. 43 | 44 | Old code can be adapted mostly by removing the parameter of the closure passed 45 | to `Coroutine::new()`. 46 | -------------------------------------------------------------------------------- /examples/echo_server.rs: -------------------------------------------------------------------------------- 1 | //! A show-case of an echo server using coroutines. 2 | //! 3 | //! It listens on port 1234 and sends each line back. It handles multiple clients concurrently. 4 | 5 | extern crate corona; 6 | extern crate tokio; 7 | 8 | use std::io::BufReader; 9 | 10 | use corona::prelude::*; 11 | use tokio::net::{TcpListener, TcpStream}; 12 | use tokio::io as aio; 13 | use tokio::io::AsyncRead; 14 | 15 | fn handle_connection(connection: TcpStream) { 16 | let (input, mut output) = connection.split(); 17 | let input = BufReader::new(input); 18 | corona::spawn(move || { 19 | for line in aio::lines(input).iter_result() { 20 | // If there's an error, kill the current coroutine. That one is not waited on and the 21 | // panic won't propagate. Logging it might be cleaner, but this demonstrates how the 22 | // coroutines act. 23 | let mut line = line.unwrap(); 24 | line += "\n"; 25 | // Send it back (the coroutine will yield until the data is written). 26 | let (o_tmp, _) = aio::write_all(output, line).coro_wait().unwrap(); 27 | output = o_tmp; 28 | } 29 | println!("A connection terminated"); 30 | }); 31 | } 32 | 33 | fn main() { 34 | Coroutine::new().run(|| { 35 | // Set up of the listening socket 36 | let listener = TcpListener::bind(&"[::]:1234".parse().unwrap()).unwrap(); 37 | for attempt in listener.incoming().iter_result() { 38 | match attempt { 39 | Ok(connection) => { 40 | println!("Received a connection"); 41 | handle_connection(connection); 42 | }, 43 | Err(e) => println!("An error accepting a connection: {}", e), 44 | } 45 | } 46 | }).unwrap(); 47 | } 48 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Corona 2 | 3 | ## Deprecated 4 | 5 | This library supports tokio 0.1, which is outdated. There's not much need for it 6 | any more since Rust supports native async/await syntax. Use some of the async 7 | libraries directly. 8 | 9 | ## About the library 10 | 11 | [![Travis Build Status](https://api.travis-ci.org/vorner/corona.svg?branch=master)](https://travis-ci.org/vorner/corona) 12 | [![AppVeyor Build status](https://ci.appveyor.com/api/projects/status/ygytb97bion810ru/branch/master?svg=true)](https://ci.appveyor.com/project/vorner/corona/branch/master) 13 | 14 | When you need to get the asynchronous out of the way. 15 | 16 | Corona is a library providing stackful coroutines for Rust. They integrate well 17 | with futures ‒ it is possible to switch between the abstractions as needed, each 18 | coroutine is also a future and a coroutine can wait for a future to complete. 19 | Furthermore, the futures don't have to be `'static`. 20 | 21 | On the other hand, there's a runtime cost to the library. The performance does 22 | not necessarily suffer (as seen in the 23 | [benchmarks](https://vorner.github.io/async-bench.html)). But each coroutine has 24 | its own stack, which takes memory. 25 | 26 | You want to read the [docs](https://docs.rs/corona) and examine the 27 | [examples](https://github.com/vorner/corona/tree/master/examples). 28 | 29 | ## License 30 | 31 | Licensed under either of 32 | 33 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 34 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 35 | 36 | at your option. 37 | 38 | ### Contribution 39 | 40 | Unless you explicitly state otherwise, any contribution intentionally 41 | submitted for inclusion in the work by you, as defined in the Apache-2.0 42 | license, shall be dual licensed as above, without any additional terms 43 | or conditions. 44 | -------------------------------------------------------------------------------- /test/io_blocking.rs: -------------------------------------------------------------------------------- 1 | //! This tests the pretention of blocking IO (Read/Write). 2 | 3 | use std::io::{BufRead, BufReader, Write}; 4 | use std::net::SocketAddr; 5 | 6 | use corona::prelude::*; 7 | use corona::io::BlockingWrapper; 8 | use tokio::runtime::current_thread::Runtime; 9 | use tokio::net::{TcpListener, TcpStream}; 10 | use tokio::prelude::*; 11 | 12 | fn server() -> SocketAddr { 13 | let listener = TcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); 14 | let addr = listener.local_addr().unwrap(); 15 | Coroutine::with_defaults(move || { 16 | for connection in listener.incoming().iter_ok() { 17 | Coroutine::with_defaults(move || { 18 | let (read, write) = connection.split(); 19 | let read = BufReader::new(BlockingWrapper::new(read)); 20 | let mut write = BlockingWrapper::new(write); 21 | for line in read.lines() { 22 | let mut line = line.unwrap(); 23 | line.push('\n'); 24 | write.write_all(line.as_bytes()).unwrap(); 25 | write.flush().unwrap(); 26 | } 27 | }); 28 | } 29 | }); 30 | addr 31 | } 32 | 33 | fn client(addr: &SocketAddr) { 34 | let connection = TcpStream::connect(addr) 35 | .coro_wait() 36 | .unwrap(); 37 | let mut connection = BlockingWrapper::new(connection); 38 | connection.write_all(b"hello\n").unwrap(); 39 | let mut answer = String::new(); 40 | BufReader::new(connection).read_line(&mut answer).unwrap(); 41 | assert_eq!("hello\n", answer); 42 | } 43 | 44 | /// Runs both the client and server on the same thread, so we can be sure they can switch. 45 | /// 46 | /// Both use the „normal“ blocking API, but switch coroutines under the hood. 47 | /// 48 | /// The test runs a listener, makes a single connection and exchanges a line there and back. 49 | #[test] 50 | fn line_req_resp() { 51 | let mut rt = Runtime::new().unwrap(); 52 | rt.block_on(future::lazy(|| { 53 | let addr = server(); 54 | Coroutine::with_defaults(move || { 55 | client(&addr); 56 | }) 57 | })).unwrap(); 58 | } 59 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | //! Various errors. 2 | 3 | use std::any::Any; 4 | use std::error::Error; 5 | use std::fmt::{self, Display, Formatter}; 6 | 7 | pub use context::stack::StackError; 8 | 9 | /// An error marker when a future is dropped before having change to get resolved. 10 | /// 11 | /// If you wait on a future and the corresponding executor gets destroyed before the future has a 12 | /// chance to run, this error is returned as there's no chance the future will ever get resolved. 13 | /// It is up to the waiter to clean up the stack, or use methods that panic implicitly. 14 | #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] 15 | pub struct Dropped; 16 | 17 | impl Error for Dropped { 18 | fn description(&self) -> &str { 19 | "The future waited on has been dropped before resolving" 20 | } 21 | } 22 | 23 | impl Display for Dropped { 24 | fn fmt(&self, f: &mut Formatter) -> fmt::Result { 25 | write!(f, "{}", self.description()) 26 | } 27 | } 28 | 29 | /// The task (coroutine) has failed. 30 | /// 31 | /// This is used as an error type and represents an unsuccessful coroutine. 32 | #[derive(Debug)] 33 | pub enum TaskFailed { 34 | /// There was a panic inside the coroutine. 35 | /// 36 | /// The coroutine panicked and it was spawned with 37 | /// [`spawn_catch_panic`](../coroutine/struct.Coroutine.html#method.spawn_catch_panic). 38 | Panicked(Box), 39 | /// There was a panic in the coroutine. 40 | /// 41 | /// However, the panic got re-established inside the coroutine's caller. Observing this result 42 | /// is rare, since usually the propagated panic destroys the owner of the coroutine as well. 43 | PanicPropagated, 44 | /// The coroutine was lost. 45 | /// 46 | /// This can happen in case the executor the coroutine was spawned onto was dropped before the 47 | /// coroutine completed. 48 | /// 49 | /// Technically, the coroutine panicked, but this special panic is handled differently. 50 | Lost, 51 | } 52 | 53 | impl Error for TaskFailed { 54 | fn description(&self) -> &str { 55 | match *self { 56 | TaskFailed::Panicked(_) | TaskFailed::PanicPropagated => "The coroutine panicked", 57 | TaskFailed::Lost => "The coroutine was lost", 58 | } 59 | } 60 | } 61 | 62 | impl Display for TaskFailed { 63 | fn fmt(&self, f: &mut Formatter) -> fmt::Result { 64 | write!(f, "{}", self.description()) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /benchmarks/README.md: -------------------------------------------------------------------------------- 1 | # The Corona benchmarks 2 | 3 | These benchmarks try to check the overhead of Corona isn't simply insane. They 4 | run series of tests, using different libraries and paradigms. 5 | 6 | What each test does: 7 | 8 | * Starts a server. Each server is different (using the currently measured 9 | library). 10 | * Starts bunch of client threads. Each thread creates several connections to the 11 | server. 12 | * Each client thread then repeatedly writes and reads over all its connections. 13 | * The server echoes all the messages back. 14 | 15 | The benchmark is highly IO-heavy ‒ neither the clients nor the server do any 16 | computations, they just read and write data. 17 | 18 | ## Naming 19 | 20 | Each library has three different tests. One without a suffix, one with `_many` 21 | and another with `_cpus`. This signifies the parallelism of the server. 22 | 23 | With no prefix, single server is started. Depending on the library or technique 24 | used, this may mean that only one thread is used or that the load is spread by 25 | the master thread to some other, slave threads. The number of slave threads is 26 | auto-configured to the amount of CPUs available. 27 | 28 | The `_many` version runs configurable number of servers in parallel. In case the 29 | technique spreads the load between further work threads, the pool of work 30 | threads is the same size as with no suffix, but there are more master threads. 31 | In case of the single-threaded approaches, it is made multi-threaded by running 32 | parallel (independent) servers on the same port. 33 | 34 | The `_cpus` version is like `_many`, but the number of instances is configured 35 | automatically to the number of available CPUs. 36 | 37 | ## Disadvantages 38 | 39 | * It doesn't measure a „real“ workload, only the switching overhead. 40 | * It uses a huge number of connections to localhost. Sometimes, the benchmark 41 | fails due to that. It may be possible to work around by allowing more ports 42 | for local endpoint of the connections, by 43 | `echo 1024 65535 > /proc/sys/net/ipv4/ip_local_port_range`. 44 | * Sometimes the measurements are bit unstable or the harness over-estimates the 45 | number of iterations and runs for a very long time. 46 | * The client threads run on the same machine. Therefore, they also consume CPU 47 | power, fighting for it with the server instances. Still, slower servers will 48 | produce slower benchmarks. 49 | 50 | Therefore, this needs to be taken with a grain of salt. 51 | 52 | ## Configuration 53 | 54 | There are several environment variables (look into the source code) that 55 | configure aspects of the benchmarks ‒ how many threads are started, how many 56 | messages are exchanged on each connection, etc. 57 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | # Docs 2 | 3 | * More examples 4 | - Including comparison with other methods, like threads, plain futures, 5 | futures-await, etc. 6 | 7 | # Panicking/unwinding 8 | 9 | * Coroutine::scoped & Coroutine::main 10 | 11 | # Interface 12 | 13 | * Something like select? 14 | * Wait for future with timeout? 15 | * Iterator that leaves the rest there on break 16 | * Scoped spawns (so the data passed inside the closure don't have to be 17 | 'static). Maybe call it join? 18 | * Accept None as handle and spawn our own. Or, allow even others executors. 19 | * Access to the builder that created this coroutine, so we can spawn with the 20 | same config (and provide default if we're not inside a coroutine)? This could 21 | be handy for libraries. 22 | * Include some logging. 23 | * Make „eagerness“ of start of the coroutine configurable (eg. do we schedule 24 | it to run later on, or execute it right await and stop the parent while it's 25 | running). 26 | * Cleanup control: add more than just leak_on_panic (always leaking, directly 27 | aborting, etc). 28 | * corona::io::BlockingWrapper could use some more methods, to expose the inner 29 | stream. 30 | 31 | # Threads 32 | 33 | It's impossible to do proper N:M threading scheme in Rust (or it seems). But it 34 | *is* possible to pick a thread at coroutine creation and let it live its whole 35 | life there. 36 | 37 | There are questions, though: 38 | 39 | * We probably want to have this as feature gate. Do we want to provide „dummy“ 40 | implementation that doesn't go into background thread if not enabled? 41 | * What is the preferred way? Have our own thread pool for coroutines, with 42 | lazily (or configurable-started) threads, and using thread futures-cpupool 43 | for the futures we wait for? Or have a core (or another executor) in each of 44 | the threads to place them there? 45 | * Have a one, global pool or something like per-builder? 46 | * Automatically send waited-on coroutines to the futures-cpupool if they're 47 | send? 48 | 49 | # Misc 50 | 51 | * Make the waits without allocating 52 | - Have a (handle-local) `FuturesUnordered` 53 | - Each coroutine pre-allocates one future in there. 54 | - The content may be either empty (the coroutine isn't waiting on anything), 55 | a pointer to the actual waited-on future (on the coroutine's stack) or Done, 56 | at which point it'll resolve. 57 | - We need to switch the content and notify it when needed. 58 | - There are some challenges with ensuring we know pointers both way and that 59 | nothing gets used after freeing. If possible without too many levels of 60 | indirection. Maybe something could be allocated at the bottom of the 61 | coroutine's stack? 62 | 63 | # Tests 64 | 65 | * Does it make sense to do some kind of fuzzying? How would that work, spawning 66 | random coroutines that randomly do something and yield? 67 | 68 | # Tokio porting 69 | 70 | * Allow specifying handle to the runtime/executor (and use the default one by default). 71 | -------------------------------------------------------------------------------- /test/prelude_api.rs: -------------------------------------------------------------------------------- 1 | use futures::{future, stream}; 2 | use futures::sync::mpsc; 3 | use tokio::runtime::current_thread::Runtime; 4 | 5 | use corona; 6 | use corona::prelude::*; 7 | 8 | /// A coroutine test fixture, for convenience and shared methods. 9 | struct Cor { 10 | coroutine: Coroutine, 11 | runtime: Runtime, 12 | } 13 | 14 | impl Cor { 15 | fn new() -> Cor { 16 | let runtime = Runtime::new().unwrap(); 17 | let coroutine = Coroutine::new(); 18 | Cor { 19 | coroutine, 20 | runtime, 21 | } 22 | } 23 | /// Starts a coroutine containing F and checks it returns 42 24 | fn cor_ft u32 + 'static>(&mut self, f: F) { 25 | let coroutine = self.coroutine.clone(); 26 | let result = self.runtime.block_on(future::lazy(move || coroutine.spawn(f).unwrap())); 27 | assert_eq!(42, result.unwrap()); 28 | } 29 | } 30 | 31 | /// One future to wait on 32 | #[test] 33 | fn coro_wait() { 34 | Cor::new().cor_ft(|| future::ok::<_, ()>(42).coro_wait().unwrap()); 35 | } 36 | 37 | /// A stream with single Ok element 38 | #[test] 39 | fn iter_ok() { 40 | Cor::new().cor_ft(|| stream::once::<_, ()>(Ok(42)).iter_ok().sum()); 41 | } 42 | 43 | /// Stream with multiple elements, some errors. This one terminates at the first error. 44 | #[test] 45 | fn iter_ok_many() { 46 | Cor::new().cor_ft(|| stream::iter_result(vec![Ok(42), Err(()), Ok(100)]).iter_ok().sum()); 47 | } 48 | 49 | /// A stream with multiple elements, some errors. This one *skips* errors. 50 | #[test] 51 | fn iter_result() { 52 | Cor::new() 53 | .cor_ft(|| { 54 | stream::iter_result(vec![Ok(12), Err(()), Ok(30)]) 55 | .iter_result() 56 | .filter_map(Result::ok) 57 | .sum() 58 | }); 59 | } 60 | 61 | /// Makes sure we work with non-'static futures. This one touches stuff on the stack. 62 | #[test] 63 | fn reference() { 64 | Cor::new() 65 | .cor_ft(|| { 66 | struct Num(u32); 67 | let num = Num(42); 68 | let num_ref = # 69 | future::ok::<_, ()>(num_ref) 70 | .coro_wait() 71 | .map(|&Num(num)| num) 72 | .unwrap() 73 | }); 74 | } 75 | 76 | /// Pushing things into a sink, which must switch between the coroutines. 77 | #[test] 78 | fn push_sink() { 79 | let sum = Coroutine::new().run(|| { 80 | let (mut sender, receiver) = mpsc::channel(1); 81 | corona::spawn(move || { 82 | sender.coro_send(2).unwrap(); 83 | sender.coro_send_many(vec![20, 20]).unwrap().unwrap(); 84 | }); 85 | 86 | receiver.iter_ok().sum() 87 | }).unwrap(); 88 | assert_eq!(42, sum); 89 | } 90 | 91 | /// Taking one thing out of a stream 92 | #[test] 93 | fn extract() { 94 | let mut cor = Cor::new(); 95 | let mut s = stream::once::<_, ()>(Ok(42)); 96 | cor.cor_ft(move || s.coro_next().unwrap().unwrap()); 97 | } 98 | -------------------------------------------------------------------------------- /src/io.rs: -------------------------------------------------------------------------------- 1 | //! Primitives to turn `AsyncRead` and `AsyncWrite` into (coroutine) blocking `Read` and `Write`. 2 | 3 | use std::io::{Read, Write, Result as IoResult}; 4 | use tokio_io::{AsyncRead, AsyncWrite}; 5 | use tokio_io::io; 6 | 7 | use super::prelude::*; 8 | 9 | /// A wrapper to turn async IO streams into sync ones. 10 | /// 11 | /// This can be used to wrap an asynchronous stream ‒ anything that is `AsyncRead` or `AsyncWrite` 12 | /// (like tokio's `TcpStream`) into a sync one. When performing IO, the current coroutine is 13 | /// suspended, but the thread isn't blocked. 14 | /// 15 | /// This makes it possible to use blocking API (for example `serde_json::from_reader`) on 16 | /// asynchronous primitives. 17 | /// 18 | /// Note that if `T` is `AsyncRead` (or `AsyncWrite`), `&mut T` is too. Therefore, it is possible 19 | /// both to turn the stream into a sync one permanently (or, until the wrapper is unwrapped with 20 | /// [`into_inner`](#method.into_inner)), or just temporarily. 21 | /// 22 | /// # Examples 23 | /// 24 | /// ``` 25 | /// # extern crate corona; 26 | /// # extern crate tokio; 27 | /// use std::io::{Read, Result as IoResult}; 28 | /// use corona::io::BlockingWrapper; 29 | /// use tokio::net::TcpStream; 30 | /// 31 | /// fn blocking_read(connection: &mut TcpStream) -> IoResult<()> { 32 | /// let mut connection = BlockingWrapper::new(connection); 33 | /// let mut buf = [0u8; 64]; 34 | /// // This will block the coroutine, but not the thread 35 | /// connection.read_exact(&mut buf) 36 | /// } 37 | /// 38 | /// # fn main() {} 39 | /// ``` 40 | /// 41 | /// # Panics 42 | /// 43 | /// Using the wrapped object may panic in these circumstances: 44 | /// 45 | /// * If it is used outside of a coroutine (as there's nothing to suspend at that time). 46 | /// * If the tokio core is dropped while waiting for data. 47 | #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 48 | pub struct BlockingWrapper(T); 49 | 50 | impl BlockingWrapper { 51 | /// Wraps the stream and turns it to synchronous one. 52 | pub fn new(stream: T) -> Self { 53 | BlockingWrapper(stream) 54 | } 55 | /// Accesses the inner stream. 56 | pub fn inner(&self) -> &T { 57 | &self.0 58 | } 59 | /// Accesses the inner stream mutably. 60 | pub fn inner_mut(&mut self) -> &mut T { 61 | &mut self.0 62 | } 63 | /// Consumes the wrapper and produces the original stream. 64 | pub fn into_inner(self) -> T { 65 | self.0 66 | } 67 | } 68 | 69 | impl From for BlockingWrapper { 70 | fn from(stream: T) -> Self { 71 | Self::new(stream) 72 | } 73 | } 74 | 75 | impl Read for BlockingWrapper { 76 | fn read(&mut self, buf: &mut [u8]) -> IoResult { 77 | io::read(&mut self.0, buf) 78 | .coro_wait() 79 | .map(|(_stream, _buf, size)| size) 80 | } 81 | } 82 | 83 | impl Write for BlockingWrapper { 84 | fn write(&mut self, buf: &[u8]) -> IoResult { 85 | // TODO: The crate contains only write_all, not write that may be short. Implement our own 86 | // when we have time? Writing everything is surely allowed, but returning earlier might be 87 | // better for performance in some cases. 88 | io::write_all(&mut self.0, buf) 89 | .coro_wait() 90 | .map(|_| buf.len()) 91 | } 92 | fn flush(&mut self) -> IoResult<()> { 93 | io::flush(&mut self.0) 94 | .coro_wait() 95 | .map(|_| ()) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /.appveyor.yml: -------------------------------------------------------------------------------- 1 | # Appveyor configuration template for Rust using rustup for Rust installation 2 | # https://github.com/starkat99/appveyor-rust 3 | 4 | ## Operating System (VM environment) ## 5 | 6 | # Rust needs at least Visual Studio 2013 Appveyor OS for MSVC targets. 7 | os: Visual Studio 2015 8 | 9 | ## Build Matrix ## 10 | 11 | # This configuration will setup a build for each channel & target combination (12 windows 12 | # combinations in all). 13 | # 14 | # There are 3 channels: stable, beta, and nightly. 15 | # 16 | # Alternatively, the full version may be specified for the channel to build using that specific 17 | # version (e.g. channel: 1.5.0) 18 | # 19 | # The values for target are the set of windows Rust build targets. Each value is of the form 20 | # 21 | # ARCH-pc-windows-TOOLCHAIN 22 | # 23 | # Where ARCH is the target architecture, either x86_64 or i686, and TOOLCHAIN is the linker 24 | # toolchain to use, either msvc or gnu. See https://www.rust-lang.org/downloads.html#win-foot for 25 | # a description of the toolchain differences. 26 | # See https://github.com/rust-lang-nursery/rustup.rs/#toolchain-specification for description of 27 | # toolchains and host triples. 28 | # 29 | # Comment out channel/target combos you do not wish to build in CI. 30 | # 31 | # You may use the `cargoflags` and `RUSTFLAGS` variables to set additional flags for cargo commands 32 | # and rustc, respectively. For instance, you can uncomment the cargoflags lines in the nightly 33 | # channels to enable unstable features when building for nightly. Or you could add additional 34 | # matrix entries to test different combinations of features. 35 | environment: 36 | RUST_BACKTRACE: full 37 | matrix: 38 | 39 | ### MSVC Toolchains ### 40 | 41 | # Stable 64-bit MSVC 42 | - channel: stable 43 | target: x86_64-pc-windows-msvc 44 | # Stable 32-bit MSVC 45 | - channel: stable 46 | target: i686-pc-windows-msvc 47 | # Beta 64-bit MSVC 48 | - channel: beta 49 | target: x86_64-pc-windows-msvc 50 | # Beta 32-bit MSVC 51 | - channel: beta 52 | target: i686-pc-windows-msvc 53 | # Nightly 64-bit MSVC 54 | - channel: nightly 55 | target: x86_64-pc-windows-msvc 56 | #cargoflags: --features "unstable" 57 | # Nightly 32-bit MSVC 58 | - channel: nightly 59 | target: i686-pc-windows-msvc 60 | #cargoflags: --features "unstable" 61 | 62 | ### GNU Toolchains ### 63 | 64 | # Stable 64-bit GNU 65 | - channel: stable 66 | target: x86_64-pc-windows-gnu 67 | MSYS2_BITS: 64 68 | # Stable 32-bit GNU 69 | - channel: stable 70 | target: i686-pc-windows-gnu 71 | MSYS2_BITS: 32 72 | # Beta 64-bit GNU 73 | - channel: beta 74 | target: x86_64-pc-windows-gnu 75 | MSYS2_BITS: 64 76 | # Beta 32-bit GNU 77 | - channel: beta 78 | target: i686-pc-windows-gnu 79 | MSYS2_BITS: 32 80 | # Nightly 64-bit GNU 81 | - channel: nightly 82 | target: x86_64-pc-windows-gnu 83 | MSYS2_BITS: 64 84 | #cargoflags: --features "unstable" 85 | # Nightly 32-bit GNU 86 | - channel: nightly 87 | target: i686-pc-windows-gnu 88 | MSYS2_BITS: 32 89 | #cargoflags: --features "unstable" 90 | 91 | ### Allowed failures ### 92 | 93 | # See Appveyor documentation for specific details. In short, place any channel or targets you wish 94 | # to allow build failures on (usually nightly at least is a wise choice). This will prevent a build 95 | # or test failure in the matching channels/targets from failing the entire build. 96 | #matrix: 97 | # allow_failures: 98 | # - channel: nightly 99 | 100 | # If you only care about stable channel build failures, uncomment the following line: 101 | #- channel: beta 102 | 103 | ## Install Script ## 104 | 105 | # This is the most important part of the Appveyor configuration. This installs the version of Rust 106 | # specified by the 'channel' and 'target' environment variables from the build matrix. This uses 107 | # rustup to install Rust. 108 | # 109 | # For simple configurations, instead of using the build matrix, you can simply set the 110 | # default-toolchain and default-host manually here. 111 | install: 112 | - appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe 113 | - rustup-init -yv --default-toolchain %channel% --default-host %target% 114 | - set PATH=%PATH%;%USERPROFILE%\.cargo\bin 115 | - if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin 116 | - rustc -vV 117 | - cargo -vV 118 | 119 | ## Build Script ## 120 | 121 | # 'cargo test' takes care of building for us, so disable Appveyor's build stage. This prevents 122 | # the "directory does not contain a project or solution file" error. 123 | build: false 124 | 125 | # Uses 'cargo test' to run tests and build. Alternatively, the project may call compiled programs 126 | #directly or perform other testing commands. Rust will automatically be placed in the PATH 127 | # environment variable. 128 | test_script: 129 | - cargo test --verbose %cargoflags% 130 | - cargo test --release --verbose %cargoflags% 131 | -------------------------------------------------------------------------------- /test/early_cleanup.rs: -------------------------------------------------------------------------------- 1 | use std::cell::Cell; 2 | use std::panic::{self, AssertUnwindSafe}; 3 | use std::rc::Rc; 4 | 5 | use corona::prelude::*; 6 | use tokio::prelude::*; 7 | use tokio::runtime::current_thread::Runtime; 8 | 9 | #[derive(Clone, Default)] 10 | struct Status(Rc>, Rc>); 11 | 12 | impl Status { 13 | fn before_drop(&self) { 14 | // Check it got to the place where it waits for the future 15 | assert!(self.0.get()); 16 | assert!(!self.1.get()); 17 | // And the RC is still held by it 18 | assert_eq!(2, Rc::strong_count(&self.0)); 19 | } 20 | fn after_drop(&self, panicked: bool) { 21 | // The coroutine got cleaned up 22 | assert!(self.0.get()); 23 | assert_eq!(!panicked, self.1.get()); 24 | assert_eq!(1, Rc::strong_count(&self.0)); 25 | } 26 | } 27 | 28 | fn fut_get() -> impl Future { 29 | future::empty() 30 | } 31 | 32 | fn coroutine_panic(status: &Status) { 33 | let fut = fut_get(); 34 | status.0.set(true); 35 | let _ = fut.coro_wait(); 36 | status.1.set(true); 37 | } 38 | 39 | /// Check cleaning up the coroutines if the core is dropped and the coroutines haven't resolved 40 | /// yet. 41 | #[test] 42 | fn cleanup_panic() { 43 | let mut rt = Runtime::new().unwrap(); 44 | let mut finished = None; 45 | let status = Status::default(); 46 | // This starts the coroutine, spawns waiting onto the executor, but doesn't finish. 47 | rt.block_on(future::lazy(|| { 48 | let status = status.clone(); 49 | finished = Some(Coroutine::with_defaults(move || coroutine_panic(&status))); 50 | future::ok::<(), ()>(()) 51 | })).unwrap(); 52 | status.before_drop(); 53 | drop(rt); 54 | status.after_drop(true); 55 | finished.wait().unwrap_err(); 56 | } 57 | 58 | fn coroutine_nopanic(status: &Status) { 59 | let fut = fut_get(); 60 | status.0.set(true); 61 | fut.coro_wait_cleanup().unwrap_err(); 62 | status.1.set(true); 63 | let fut2 = fut_get(); 64 | fut2.coro_wait_cleanup().unwrap_err(); 65 | } 66 | 67 | /// Check cleaning up with manual handling of being dropped. 68 | #[test] 69 | fn cleanup_nopanic() { 70 | let mut rt = Runtime::new().unwrap(); 71 | let status = Status::default(); 72 | let mut finished = None; 73 | rt.block_on(future::lazy(|| { 74 | let status = status.clone(); 75 | finished = Some(Coroutine::with_defaults(move || coroutine_nopanic(&status))); 76 | future::ok::<(), ()>(()) 77 | })).unwrap(); 78 | status.before_drop(); 79 | // The coroutine finishes once we drop the runtime. Note that it finishes successfully, not 80 | // panicking. 81 | drop(rt); 82 | finished.wait().unwrap(); 83 | status.after_drop(false); 84 | } 85 | 86 | /// The cleanup method handles panicking in the main thread correctly. 87 | #[test] 88 | fn cleanup_main_panic() { 89 | let mut rt = Runtime::new().unwrap(); 90 | let status = Status::default(); 91 | let mut finished = None; 92 | rt.block_on(future::lazy(|| { 93 | let status = status.clone(); 94 | finished = Some(Coroutine::with_defaults(move || coroutine_nopanic(&status))); 95 | future::ok::<(), ()>(()) 96 | })).unwrap(); 97 | status.before_drop(); 98 | panic_rt(rt); 99 | status.after_drop(false); 100 | finished.wait().unwrap(); 101 | } 102 | 103 | /// Make the runtime go away by panicking inside a closure that holds it. We check clean up during 104 | /// dropping things. 105 | fn panic_rt(rt: Runtime) { 106 | panic::catch_unwind(AssertUnwindSafe(|| { 107 | // Steal the rt into the closure 108 | let _rt = rt; 109 | // And panic here, so the rt gets destroyed during an unwind 110 | panic!(); 111 | })) 112 | .unwrap_err(); 113 | } 114 | 115 | /// Just a testing stream. 116 | fn no_stream() -> Box> { 117 | Box::new(stream::futures_unordered(vec![future::empty::<(), ()>()])) 118 | } 119 | 120 | #[test] 121 | fn stream_cleanup() { 122 | let mut rt = Runtime::new().unwrap(); 123 | let no_stream = no_stream(); 124 | let status = Status::default(); 125 | let mut finished = None; 126 | rt.block_on(future::lazy(|| { 127 | let status = status.clone(); 128 | finished = Some(Coroutine::with_defaults(move || { 129 | status.0.set(true); 130 | for item in no_stream.iter_cleanup() { 131 | if item.is_err() { 132 | status.1.set(true); 133 | return; 134 | } 135 | } 136 | unreachable!(); 137 | })); 138 | future::ok::<(), ()>(()) 139 | })).unwrap(); 140 | status.before_drop(); 141 | panic_rt(rt); 142 | status.after_drop(false); 143 | finished.wait().unwrap(); 144 | } 145 | 146 | /// Tests the implicit panic-based cleanup of a stream 147 | #[test] 148 | fn stream_panic() { 149 | let mut rt = Runtime::new().unwrap(); 150 | let no_stream = no_stream(); 151 | let status = Status::default(); 152 | let mut finished = None; 153 | rt.block_on(future::lazy(|| { 154 | let status = status.clone(); 155 | finished = Some(Coroutine::with_defaults(move || { 156 | status.0.set(true); 157 | for _ in no_stream.iter_result() { 158 | // It'll not get here 159 | status.1.set(true); 160 | } 161 | // And it'll not get here 162 | status.1.set(true); 163 | })); 164 | future::ok::<(), ()>(()) 165 | })).unwrap(); 166 | status.before_drop(); 167 | drop(rt); 168 | status.after_drop(true); 169 | finished.wait().unwrap_err(); 170 | } 171 | -------------------------------------------------------------------------------- /examples/chat_server.rs: -------------------------------------------------------------------------------- 1 | //! A show-case of a chat server. 2 | //! 3 | //! The server listens on the port 1234 and accepts connections. Whenever a line of text comes, it 4 | //! is broadcasted to all the clients (including back). 5 | //! 6 | //! There are two long-running coroutines. One accepts the new connections and spawns a receiving 7 | //! coroutine for each. The other is on a receiving end of a channel and broadcasts each message to 8 | //! all the currently available clients. If some of them errors during the send, it is removed. 9 | //! 10 | //! Each receiving coroutine simply reads the lines from the client and stuffs them into the 11 | //! channel. 12 | //! 13 | //! There's a shared `Vec` of new writing halves of the connections. Before every message, the 14 | //! broadcasting coroutine extracts the new ones and appends them to its local storage (so it 15 | //! doesn't have to keep the shared state borrowed). 16 | //! 17 | //! Currently, there's very little error handling ‒ the relevant connections are simply dropped. 18 | 19 | extern crate bytes; 20 | extern crate corona; 21 | extern crate futures; 22 | extern crate tokio; 23 | 24 | use std::cell::RefCell; 25 | use std::io::{BufRead, BufReader, Error as IoError}; 26 | use std::iter; 27 | use std::panic::AssertUnwindSafe; 28 | use std::rc::Rc; 29 | 30 | use bytes::BytesMut; 31 | use corona::Coroutine; 32 | use corona::io::BlockingWrapper; 33 | use corona::prelude::*; 34 | use corona::wrappers::SinkSender; 35 | use futures::{future, Future}; 36 | use futures::unsync::mpsc::{self, Sender, Receiver}; 37 | use tokio::net::{TcpListener, TcpStream}; 38 | use tokio::io::{AsyncRead, WriteHalf}; 39 | use tokio::codec::{Encoder, FramedWrite}; 40 | 41 | /// Encoder turning strings into lines. 42 | /// 43 | /// Doesn't do much, simply passes the strings as lines. For a convenient use of `SinkSender` (the 44 | /// thing that is usually behind `coro_send`, but doesn't wait on the send, is only the future) on 45 | /// the senders. 46 | struct LineEncoder; 47 | 48 | impl Encoder for LineEncoder { 49 | type Item = Rc; 50 | type Error = IoError; 51 | fn encode(&mut self, item: Rc, dst: &mut BytesMut) -> Result<(), IoError> { 52 | dst.extend_from_slice(item.as_bytes()); 53 | dst.extend_from_slice(b"\n"); 54 | Ok(()) 55 | } 56 | } 57 | 58 | type Client = FramedWrite, LineEncoder>; 59 | type Clients = Rc>>; 60 | 61 | fn handle_connection(connection: TcpStream, 62 | clients: &Clients, 63 | mut msgs: Sender) 64 | { 65 | let (input, output) = connection.split(); 66 | let writer = FramedWrite::new(output, LineEncoder); 67 | clients.borrow_mut().push(writer); 68 | let input = BufReader::new(BlockingWrapper::new(input)); 69 | Coroutine::from_thread_local().spawn_catch_panic(AssertUnwindSafe(move || { 70 | // If there's an error, kill the current coroutine. That one is not waited on and the 71 | // panic won't propagate. Logging it might be cleaner, but this demonstrates how the 72 | // coroutines act. 73 | for line in input.lines() { 74 | let line = line.expect("Broken line on input"); 75 | // Pass each line to the broadcaster so it sends it to everyone. 76 | // Send it back (the coroutine will yield until the data is written). May block on 77 | // being full for a while, then we don't accept more messages. 78 | msgs.coro_send(line).expect("The broadcaster suddenly disappeared"); 79 | } 80 | eprintln!("A connection terminated"); 81 | })).expect("Wrong stack size"); 82 | } 83 | 84 | fn broadcaster(msgs: Receiver, clients: &Clients) { 85 | // We have to steal the clients. We can't keep a mut borrow into the clients for the time of 86 | // the future, since someone else might try to add more at the same time, which would panic. 87 | let mut extracted = Vec::new(); 88 | for msg in msgs.iter_ok() { 89 | { // Steal the clients and return the borrow 90 | let mut borrowed = clients.borrow_mut(); 91 | extracted.extend(borrowed.drain(..)); 92 | } 93 | let broken_idxs = { 94 | let msg = Rc::new(msg); 95 | // Schedule sending of the message to everyone in parallel 96 | let all_sent = extracted.iter_mut() 97 | .map(|client| SinkSender::new(client, iter::once(Rc::clone(&msg)))) 98 | // Turn failures into falses, so it plays nice with collect below. 99 | .map(|send_future| send_future.then(|res| Ok::<_, IoError>(res.is_ok()))); 100 | future::join_all(all_sent) // Create a mega-future of everything 101 | .coro_wait() // Wait for them 102 | .unwrap() // Impossible to fail 103 | // Take only the indices of things that failed to send. 104 | .into_iter() 105 | .enumerate() 106 | .filter_map(|(idx, success)| if success { 107 | None 108 | } else { 109 | Some(idx) 110 | }) 111 | .collect::>() 112 | }; 113 | // Remove the failing ones. We go from the back, since swap_remove reorders the tail. 114 | for idx in broken_idxs.into_iter().rev() { 115 | extracted.swap_remove(idx); 116 | } 117 | } 118 | } 119 | 120 | fn acceptor(clients: &Clients, sender: &Sender) { 121 | let listener = TcpListener::bind(&"[::]:1234".parse().unwrap()).unwrap(); 122 | // This will accept the connections, but will allow other coroutines to run when there are 123 | // none ready. 124 | for attempt in listener.incoming().iter_result() { 125 | match attempt { 126 | Ok(connection) => { 127 | eprintln!("Received a connection"); 128 | handle_connection(connection, clients, sender.clone()); 129 | }, 130 | // FIXME: Are all the errors recoverable? 131 | Err(e) => eprintln!("An error accepting a connection: {}", e), 132 | } 133 | } 134 | } 135 | 136 | fn main() { 137 | Coroutine::new().stack_size(32_768).run(|| { 138 | let (sender, receiver) = mpsc::channel(100); 139 | let clients = Clients::default(); 140 | let clients_rc = Rc::clone(&clients); 141 | corona::spawn(move || broadcaster(receiver, &clients_rc)); 142 | acceptor(&clients, &sender); 143 | }).expect("Wrong stack size"); 144 | } 145 | -------------------------------------------------------------------------------- /src/wrappers.rs: -------------------------------------------------------------------------------- 1 | //! Various wrappers and helper structs. 2 | //! 3 | //! The types here are not expected to be used directly. These wrap some things (futures, 4 | //! references) and implement other functionality on them, but are usually created through methods 5 | //! in [`prelude`](../prelude/index.html). 6 | //! 7 | //! Despite that, they still can be created and used directly if the need arises. 8 | 9 | use std::panic; 10 | 11 | use futures::{Async, AsyncSink, Future, Poll, Sink, Stream}; 12 | 13 | use prelude::CoroutineFuture; 14 | use errors::Dropped; 15 | 16 | /// An iterator returned from 17 | /// [`CoroutineStream::iter_cleanup`](../prelude/trait.CoroutineStream.html#method.iter_cleanup). 18 | /// 19 | /// It wraps a stream and allows iterating through it. 20 | pub struct CleanupIterator(Option); 21 | 22 | impl CleanupIterator { 23 | /// A constructor. 24 | pub fn new(stream: S) -> Self { 25 | CleanupIterator(Some(stream)) 26 | } 27 | 28 | /// Extracts the stream inside. 29 | /// 30 | /// # Returns 31 | /// 32 | /// * `Ok(stream)` under normal circumstances. 33 | /// * `Err(Dropped)` if the stream got lost when the reactor got dropped while iterating. 34 | pub fn into_inner(self) -> Result { 35 | self.0.ok_or(Dropped) 36 | } 37 | } 38 | 39 | impl> Iterator for CleanupIterator { 40 | type Item = Result, Dropped>; 41 | fn next(&mut self) -> Option, Dropped>> { 42 | let resolved = match self.0.take() { 43 | Some(stream) => stream.into_future().coro_wait_cleanup(), 44 | None => return Some(Err(Dropped)), // Dropped in previous attempt to iterate. Still dead. 45 | }; 46 | let (result, stream) = match resolved { 47 | Ok(Ok((None, stream))) => (None, Some(stream)), 48 | Ok(Ok((Some(ok), stream))) => (Some(Ok(Ok(ok))), Some(stream)), 49 | Ok(Err((err, stream))) => (Some(Ok(Err(err))), Some(stream)), 50 | Err(Dropped) => (Some(Err(Dropped)), None), 51 | }; 52 | self.0 = stream; 53 | result 54 | } 55 | } 56 | 57 | fn drop_panic(r: Result) -> T { 58 | r.unwrap_or_else(|_| panic::resume_unwind(Box::new(Dropped))) 59 | } 60 | 61 | /// An iterator returned from 62 | /// [`CoroutineStream::iter_ok`](../prelude/trait.CoroutineStream.html#method.iter_ok). 63 | /// 64 | /// This wraps the [`CleanupIterator`](struct.CleanupIterator.html) and provides iteration through 65 | /// the successful items. 66 | pub struct OkIterator(I); 67 | 68 | impl OkIterator { 69 | /// A constructor. 70 | pub fn new(inner: I) -> Self { 71 | OkIterator(inner) 72 | } 73 | 74 | /// Extracts the `CleanupIterator` inside. 75 | pub fn into_inner(self) -> I { 76 | self.0 77 | } 78 | } 79 | 80 | impl> Iterator for OkIterator> { 81 | type Item = I; 82 | fn next(&mut self) -> Option { 83 | self.0 84 | .next() 85 | .map(drop_panic) 86 | .and_then(Result::ok) 87 | } 88 | } 89 | 90 | /// An iterator returned from 91 | /// [`CoroutineStream::iter_result`](../prelude/trait.CoroutineStream.html#method.iter_result). 92 | /// 93 | /// This wraps the [`CleanupIterator`](struct.CleanupIterator.html) and provides iteration through 94 | /// the direct results. 95 | pub struct ResultIterator(I); 96 | 97 | impl ResultIterator { 98 | /// A constructor. 99 | pub fn new(inner: I) -> Self { 100 | ResultIterator(inner) 101 | } 102 | 103 | /// Extracts the `CleanupIterator` inside. 104 | pub fn into_inner(self) -> I { 105 | self.0 106 | } 107 | } 108 | 109 | impl> Iterator for ResultIterator> { 110 | type Item = Result; 111 | fn next(&mut self) -> Option> { 112 | self.0 113 | .next() 114 | .map(drop_panic) 115 | } 116 | } 117 | 118 | /// A future that extracts one item from a stream. 119 | /// 120 | /// This is the future returned from 121 | /// [`CoroutineStream::extractor`](../prelude/trait.CoroutineStream.html#method.extractor). It 122 | /// borrows the stream mutably and allows taking one item out of it. 123 | /// 124 | /// Unlike `Stream::into_future`, this does not consume the stream. 125 | pub struct StreamExtractor<'a, S: 'a>(&'a mut S); 126 | 127 | impl<'a, S: 'a> StreamExtractor<'a, S> { 128 | /// A constructor. 129 | pub fn new(stream: &'a mut S) -> Self { 130 | StreamExtractor(stream) 131 | } 132 | } 133 | 134 | impl<'a, I, E, S: Stream + 'a> Future for StreamExtractor<'a, S> { 135 | type Item = Option; 136 | type Error = E; 137 | fn poll(&mut self) -> Poll, E> { 138 | self.0.poll() 139 | } 140 | } 141 | 142 | /// A future sending a sequence of items into a sink. 143 | /// 144 | /// This borrows a sink and sends the provided items (from an iterator) into it. It is returned by 145 | /// [`CoroutineSink::coro_sender`](../prelude/trait.CoroutineSink.html#method.coro_sender). 146 | pub struct SinkSender<'a, V, S: 'a, I: Iterator> { 147 | sink: &'a mut S, 148 | iter: Option, 149 | value: Option, 150 | } 151 | 152 | impl<'a, V, S: 'a, I: Iterator> SinkSender<'a, V, S, I> { 153 | /// A constructor. 154 | pub fn new>(sink: &'a mut S, src: Src) -> Self { 155 | let iter = src.into_iter(); 156 | Self { 157 | sink, 158 | iter: Some(iter), 159 | value: None, 160 | } 161 | } 162 | 163 | // Pull the next value from somewhere. 164 | fn next(&mut self) -> Option { 165 | // A postponed value 166 | if self.value.is_some() { 167 | return self.value.take(); 168 | } 169 | // If we have nothing postponed, try pulling it from an iterator, if we have one. 170 | let result = self.iter.as_mut().and_then(Iterator::next); 171 | // If we got nothing, then make sure we don't call the iterator again. 172 | if result.is_none() { 173 | self.iter = None; 174 | } 175 | result 176 | } 177 | } 178 | 179 | impl<'a, V, E, S, I> Future for SinkSender<'a, V, S, I> 180 | where 181 | S: Sink + 'a, 182 | I: Iterator, 183 | { 184 | type Item = (); 185 | type Error = E; 186 | fn poll(&mut self) -> Poll<(), E> { 187 | // First, try to push as much inside as possible. 188 | while let Some(value) = self.next() { 189 | match self.sink.start_send(value) { 190 | Err(e) => return Err(e), // Early abort on errors. 191 | Ok(AsyncSink::NotReady(returned)) => { 192 | // This item doesn't fit. Hold onto it until we are called again. 193 | self.value = Some(returned); 194 | return Ok(Async::NotReady); 195 | }, 196 | Ok(AsyncSink::Ready) => (), // Accepted, try next one. 197 | } 198 | } 199 | // By now, we put everything into the sink. Try flushing it. 200 | self.sink.poll_complete() 201 | } 202 | } 203 | 204 | #[cfg(test)] 205 | mod tests { 206 | use super::*; 207 | 208 | use futures::stream; 209 | use futures::sync::mpsc; 210 | use tokio::prelude::*; 211 | use tokio::runtime::current_thread; 212 | 213 | use prelude::*; 214 | 215 | /// Test getting things out of a stream one by one. 216 | /// 217 | /// This is similar to the .into_future() stream modifier, but doesn't consume the stream. That 218 | /// is more convenient in the context of coroutines, which allow waiting for non-'static 219 | /// futures. 220 | #[test] 221 | fn stream_extract() { 222 | let mut s = stream::once::<_, ()>(Ok(42)); 223 | assert_eq!(StreamExtractor::new(&mut s).wait(), Ok(Some(42))); 224 | assert_eq!(StreamExtractor::new(&mut s).wait(), Ok(None)); 225 | } 226 | 227 | /// A test checking that sink_sender feeds everything to the sink. 228 | /// 229 | /// This one doesn't do much async things, though, as everything fits inside right away. 230 | #[test] 231 | fn sink_sender() { 232 | let (mut sender, receiver) = mpsc::unbounded(); 233 | let data = vec![1, 2, 3]; 234 | { 235 | let sender_fut = SinkSender::new(&mut sender, data.clone()); 236 | // Just plain old future's wait. No coroutines here. 237 | sender_fut.wait().unwrap(); 238 | } 239 | drop(sender); // EOF the channel 240 | // The data is there. 241 | let received = receiver.wait().collect::, _>>().unwrap(); 242 | assert_eq!(data, received); 243 | } 244 | 245 | /// An async version of the above. 246 | /// 247 | /// It needs to switch between the two futures to complete, because not everything fits. 248 | #[test] 249 | fn async_sink_sender() { 250 | current_thread::block_on_all(future::lazy(|| { 251 | let (mut sender, receiver) = mpsc::channel(1); 252 | let sending_fut = Coroutine::with_defaults(move || { 253 | let data = vec![1, 2, 3]; 254 | Coroutine::wait(SinkSender::new(&mut sender, data)) 255 | .unwrap() 256 | .unwrap(); 257 | }); 258 | let receiving_fut = Coroutine::with_defaults(move || { 259 | let mut result = Vec::new(); 260 | Coroutine::wait(receiver.for_each(|val| { 261 | result.push(val); 262 | Ok(()) 263 | })) 264 | .unwrap() 265 | .unwrap(); 266 | assert_eq!(vec![1, 2, 3], result); 267 | }); 268 | receiving_fut.join(sending_fut) 269 | })).unwrap(); 270 | } 271 | } 272 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /src/switch.rs: -------------------------------------------------------------------------------- 1 | //! Module for the low-level switching of coroutines 2 | 3 | use std::any::Any; 4 | use std::panic::{self, AssertUnwindSafe}; 5 | use std::process; 6 | use std::thread; 7 | 8 | use context::{Context, Transfer}; 9 | use context::stack::ProtectedFixedSizeStack; 10 | use futures::{Async, Future, Poll}; 11 | use tokio_current_thread::TaskExecutor; 12 | 13 | use coroutine::CleanupStrategy; 14 | use errors::StackError; 15 | use stack_cache; 16 | 17 | /// A workaround because Box is currently very unusable in rust :-(. 18 | pub(crate) trait BoxableTask { 19 | fn perform(&mut self, Context, ProtectedFixedSizeStack) -> 20 | (Context, ProtectedFixedSizeStack, Option>); 21 | } 22 | 23 | impl BoxableTask for Option 24 | where 25 | F: FnOnce(Context, ProtectedFixedSizeStack) -> 26 | (Context, ProtectedFixedSizeStack, Option>), 27 | { 28 | fn perform(&mut self, context: Context, stack: ProtectedFixedSizeStack) -> 29 | (Context, ProtectedFixedSizeStack, Option>) 30 | { 31 | self.take().unwrap()(context, stack) 32 | } 33 | } 34 | 35 | /// A fake Box Context>. 36 | type BoxedTask = Box; 37 | 38 | pub(crate) struct WaitTask { 39 | pub(crate) poll: *mut FnMut() -> Poll<(), ()>, 40 | pub(crate) context: Option, 41 | pub(crate) stack: Option, 42 | pub(crate) cleanup_strategy: CleanupStrategy, 43 | } 44 | 45 | impl Future for WaitTask { 46 | type Item = (); 47 | type Error = (); 48 | fn poll(&mut self) -> Poll<(), ()> { 49 | assert!(self.context.is_some()); 50 | // The catch unwind is fine ‒ we don't swallow the panic, only move it to the correct place 51 | // ‒ so likely everything relevant will be dropped like with any other normal panic. 52 | match panic::catch_unwind(AssertUnwindSafe(unsafe { 53 | // The future is still not dangling pointer ‒ we never resumed the stack 54 | self.poll 55 | .as_mut() 56 | .unwrap() 57 | })) { 58 | Ok(Ok(Async::NotReady)) => Ok(Async::NotReady), 59 | Ok(result) => { 60 | Switch::Resume { 61 | stack: self.stack.take().unwrap(), 62 | } 63 | .run_child(self.context.take().unwrap()); 64 | result 65 | }, 66 | Err(panic) => { 67 | Switch::PropagateFuturePanic { 68 | stack: self.stack.take().unwrap(), 69 | panic 70 | } 71 | .run_child(self.context.take().unwrap()); 72 | Err(()) 73 | }, 74 | } 75 | } 76 | } 77 | 78 | impl Drop for WaitTask { 79 | fn drop(&mut self) { 80 | if let Some(context) = self.context.take() { 81 | // Not terminated yet? 82 | let perform_cleanup = match (self.cleanup_strategy, thread::panicking()) { 83 | (CleanupStrategy::CleanupAlways, _) 84 | | (CleanupStrategy::LeakOnPanic, false) 85 | | (CleanupStrategy::AbortOnPanic, false) => true, 86 | (CleanupStrategy::LeakAlways, _) 87 | | (CleanupStrategy::LeakOnPanic, true) => false, 88 | (CleanupStrategy::AbortAlways, _) 89 | | (CleanupStrategy::AbortOnPanic, true) => { 90 | process::abort(); 91 | } 92 | }; 93 | if perform_cleanup { 94 | Switch::Cleanup { 95 | stack: self.stack.take().expect("Taken stack, but not context?") 96 | } 97 | .run_child(context); 98 | 99 | } 100 | } 101 | } 102 | } 103 | 104 | 105 | /// Execution of a coroutine. 106 | /// 107 | /// This holds the extracted logic, so once we leave the coroutine, all locals that may possibly 108 | /// have any kind of destructor are gone. 109 | fn coroutine_internal(transfer: Transfer) -> (Switch, Context) { 110 | let mut context = transfer.context; 111 | let switch = Switch::extract(transfer.data); 112 | let result = match switch { 113 | Switch::StartTask { stack, mut task } => { 114 | let (ctx, stack, panic) = task.perform(context, stack); 115 | context = ctx; 116 | Switch::Destroy { 117 | stack, 118 | panic, 119 | } 120 | }, 121 | _ => panic!("Invalid switch instruction on coroutine entry"), 122 | }; 123 | (result, context) 124 | } 125 | 126 | /// Wrapper for the execution of a coroutine. 127 | /// 128 | /// This is just a minimal wrapper that runs the `coroutine_internal` and then switches back to the 129 | /// parent context. This contains very minimal amount of local variables and only the ones from the 130 | /// `context` crate, so we don't have anything with destructor here. The problem is, this function 131 | /// never finishes and therefore such destructors wouldn't be run. 132 | extern "C" fn coroutine(transfer: Transfer) -> ! { 133 | let (result, context) = coroutine_internal(transfer); 134 | result.exchange(context); 135 | unreachable!("Woken up after termination!"); 136 | } 137 | 138 | /// An instruction carried across the coroutine boundary. 139 | /// 140 | /// This describes what the receiving coroutine should do next (and contains parameters for that). 141 | /// It also holds the methods to do the actual switching. 142 | /// 143 | /// Note that not all instructions are valid at all contexts, but as this is not an API visible 144 | /// outside of the crate, that's likely OK with checking not thing invalid gets received. 145 | pub(crate) enum Switch { 146 | /// Start a new task in the coroutine. 147 | StartTask { 148 | stack: ProtectedFixedSizeStack, 149 | task: BoxedTask, 150 | }, 151 | /// Wait on a future to finish 152 | WaitFuture { 153 | task: WaitTask, 154 | }, 155 | /// A future panicked, propagate it into the coroutine. 156 | PropagateFuturePanic { 157 | stack: ProtectedFixedSizeStack, 158 | panic: Box, 159 | }, 160 | /// Continue operation, the future is resolved. 161 | Resume { 162 | stack: ProtectedFixedSizeStack, 163 | }, 164 | /// Abort the coroutine and clean up the resources. 165 | Cleanup { 166 | stack: ProtectedFixedSizeStack, 167 | }, 168 | /// Get rid of the sending coroutine, it terminated. 169 | Destroy { 170 | stack: ProtectedFixedSizeStack, 171 | /// In case the coroutine panicked and the panic should continue. 172 | panic: Option>, 173 | }, 174 | } 175 | 176 | impl Switch { 177 | /// Extracts the instruction passed through the coroutine transfer data. 178 | fn extract(transfer_data: usize) -> Switch { 179 | let ptr = transfer_data as *mut Option; 180 | // The extract is called only in two cases. When switching into a newly born coroutine and 181 | // during the exchange of two coroutines. In both cases, the caller is in this module, it 182 | // places data onto its stack and passes the pointer as the usize parameter (which is 183 | // safe). The stack is still alive at the time we are called and it hasn't moved (since our 184 | // stack got the control), so the pointer is not dangling. We just extract the data from 185 | // there right away and leave None on the stack, which doesn't need any special handling 186 | // during destruction, etc. 187 | let optref = unsafe { ptr.as_mut() } 188 | .expect("NULL pointer passed through a coroutine switch"); 189 | optref.take().expect("Switch instruction already extracted") 190 | } 191 | /// Switches to a coroutine and back. 192 | /// 193 | /// Switches to the given context (coroutine) and sends it the current instruction. Returns the 194 | /// context that resumed us (after we are resumed) and provides the instruction it send us. 195 | /// 196 | /// # Internals 197 | /// 198 | /// There are two stacks in the play, the current one and one we want to transition into 199 | /// (described by the passed `context`). We also pass a `Switch` *instruction* along the 200 | /// transition. 201 | /// 202 | /// To pass the instruction, we abuse the usize `data` field in the underlying library for 203 | /// switching stacks (also called `context`). To do that, we place the instruction into a 204 | /// `Option` on the current stack. We pass a pointer to that `Option` through that 205 | /// `usize`. The receiving coroutine takes the instruction out of the `Option`, stealing it 206 | /// from the originating stack. The originating stack doesn't change until we pass back here. 207 | /// 208 | /// Some future exchange from that (or possibly other) stack into this will do the reverse ‒ 209 | /// activate this stack and it'll extract the instruction from that stack. 210 | /// 211 | /// As the exchange leaves just an empty `Option` behind, destroying the stack (once it asks 212 | /// for so through the instruction) is safe, we don't need to run any destructor on that. 213 | pub(crate) fn exchange(self, context: Context) -> (Self, Context) { 214 | let mut sw = Some(self); 215 | let swp: *mut Option = &mut sw; 216 | // We store the switch instruction onto the current stack and pass a pointer for it to the 217 | // other coroutine. It will get extracted just as the first thing the other coroutine does, 218 | // therefore at the time this stack frame is still active. 219 | // 220 | // Also, switching to the other coroutine is OK, because each coroutine owns its own stack 221 | // (it has it passed to it and it keeps it on its own stack until it decides to terminate 222 | // and passes it back through the instruction on switching out). So the stack can't get 223 | // destroyed prematurely. 224 | let transfer = unsafe { context.resume(swp as usize) }; 225 | (Self::extract(transfer.data), transfer.context) 226 | } 227 | /// Runs a child coroutine (one that does the work, is not a control coroutine) and once it 228 | /// returns, handles its return instruction. 229 | pub(crate) fn run_child(self, context: Context) { 230 | let (reply, context) = self.exchange(context); 231 | use self::Switch::*; 232 | match reply { 233 | Destroy { stack, panic } => { 234 | drop(context); 235 | stack_cache::put(stack); 236 | if let Some(panic) = panic { 237 | panic::resume_unwind(panic); 238 | } 239 | }, 240 | WaitFuture { mut task } => { 241 | task.context = Some(context); 242 | // Ignore the result. In case an error happens, the task gets dropped and it can 243 | // already handle that. 244 | let _ = TaskExecutor::current().spawn_local(Box::new(task)); 245 | }, 246 | _ => unreachable!("Invalid switch instruction when switching out"), 247 | } 248 | } 249 | /// Creates a new coroutine and runs it. 250 | pub(crate) fn run_new_coroutine(stack_size: usize, task: BoxedTask) -> Result<(), StackError> { 251 | let stack = stack_cache::get(stack_size)?; 252 | assert!(stack.len() >= stack_size); 253 | // The `Context::new` is unsafe only because we have to promise not to delete the stack 254 | // prematurely, while the coroutine is still alive. We ensure that by giving the ownership 255 | // of the stack to the coroutine and it gives it up only once it is ready to terminate. 256 | let context = unsafe { Context::new(&stack, coroutine) }; 257 | Switch::StartTask { stack, task }.run_child(context); 258 | Ok(()) 259 | } 260 | } 261 | 262 | #[cfg(test)] 263 | mod tests { 264 | use std::cell::Cell; 265 | use std::rc::Rc; 266 | 267 | use super::*; 268 | 269 | #[test] 270 | fn switch_coroutine() { 271 | let called = Rc::new(Cell::new(false)); 272 | let called_cp = called.clone(); 273 | let task = move |context, stack| { 274 | called_cp.set(true); 275 | (context, stack, None) 276 | }; 277 | Switch::run_new_coroutine(40960, Box::new(Some(task))).unwrap(); 278 | assert!(called.get()); 279 | assert_eq!(1, Rc::strong_count(&called)); 280 | } 281 | } 282 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc(html_root_url = "https://docs.rs/corona/0.4.3/corona/")] 2 | #![warn(missing_docs)] 3 | 4 | //! A library combining futures and coroutines. 5 | //! 6 | //! This library brings stack-full coroutines. Each coroutine can asynchronously wait on futures 7 | //! and provides a future of its result. 8 | //! 9 | //! # Deprecation notice 10 | //! 11 | //! This supports the old version of tokio (0.1). There's not much need for it any more, since Rust 12 | //! now supports async/await directly, so use some of the async libraries. 13 | //! 14 | //! # Motivation 15 | //! 16 | //! The current aim of Rust in regards to asynchronous programming is on 17 | //! [`futures`](https://crates.io/crates/futures). They have some good properties, but some tasks 18 | //! are more conveniently done in a more imperative way. 19 | //! 20 | //! There's the work in progress of [async-await](https://github.com/alexcrichton/futures-await). 21 | //! But it requires nightly (for now), provides stack-less coroutines (which means the asynchronous 22 | //! waiting can be done in a top-level function only) and there are too many `'static` bounds. 23 | //! Something might improve over time. 24 | //! 25 | //! This library brings a more convenient interface. However, it comes with a run-time cost, so you 26 | //! might want to consider if you prefer ease of development or memory efficiency. Often, the 27 | //! asynchronous communication isn't the bottleneck and you won't be handling millions of 28 | //! concurrent connections, only tens of thousands, so this might be OK. 29 | //! 30 | //! # Pros 31 | //! 32 | //! * Easier to use than futures. 33 | //! * Can integrate with futures. 34 | //! * Allows working with borrowed futures. 35 | //! * Provides safe interface. 36 | //! 37 | //! # Cons 38 | //! 39 | //! * Each coroutine needs its own stack, which is at least few memory pages large. This makes the 40 | //! library unsuitable when there are many concurrent coroutines. 41 | //! * The coroutines can't move between threads once created safely. This library is tied into 42 | //! [`tokio-current-threaded`](https://crates.io/crates/tokio-current-thread) executor (it might 43 | //! be possible to remove this tie-in, but not the single-threadiness). 44 | //! 45 | //! # How to use 46 | //! 47 | //! By bringing the `corona::prelude::*` into scope, all `Future`s, `Stream`s and `Sink`s get new 48 | //! methods for asynchronous waiting on their completion or progress. 49 | //! 50 | //! The [`Coroutine`](coroutine/struct.Coroutine.html) is used to start a new coroutine. It acts a 51 | //! bit like `std::thread::spawn`. However, all the coroutines run on the current thread and switch 52 | //! to other coroutines whenever they wait on something. This must be done from the context of 53 | //! current-thread executor, which is easiest by wrapping the whole application into 54 | //! `tokio::runtime::current_thread::block_on_all` and `future::lazy`. 55 | //! 56 | //! There's also the [`Coroutine::run`](coroutine/struct.Coroutine.html#method.run) that does the 57 | //! same (but is available only in case the `convenient-run` feature is not turned off). 58 | //! 59 | //! Each new coroutine returns a future. It resolves whenever the coroutine terminates. However, 60 | //! the coroutine is *eager* ‒ it doesn't wait with the execution for the future to be polled. The 61 | //! future can be dropped and the coroutine will still execute. 62 | //! 63 | //! ```rust 64 | //! extern crate corona; 65 | //! extern crate tokio; 66 | //! 67 | //! use std::time::Duration; 68 | //! 69 | //! use corona::prelude::*; 70 | //! use tokio::clock; 71 | //! use tokio::prelude::*; 72 | //! use tokio::runtime::current_thread; 73 | //! use tokio::timer::Delay; 74 | //! 75 | //! fn main() { 76 | //! let result = current_thread::block_on_all(future::lazy(|| { 77 | //! Coroutine::with_defaults(|| { 78 | //! let timeout = Delay::new(clock::now() + Duration::from_millis(50)); 79 | //! timeout.coro_wait().unwrap(); // Timeouts don't error 80 | //! 42 81 | //! }) 82 | //! })).unwrap(); 83 | //! assert_eq!(42, result); 84 | //! } 85 | //! ``` 86 | //! 87 | //! ```rust 88 | //! extern crate corona; 89 | //! extern crate tokio; 90 | //! 91 | //! use std::time::Duration; 92 | //! 93 | //! use corona::prelude::*; 94 | //! use tokio::clock; 95 | //! use tokio::prelude::*; 96 | //! use tokio::runtime::current_thread; 97 | //! use tokio::timer::Delay; 98 | //! 99 | //! fn main() { 100 | //! let result = Coroutine::new() 101 | //! .run(|| { 102 | //! let timeout = Delay::new(clock::now() + Duration::from_millis(50)); 103 | //! timeout.coro_wait().unwrap(); // Timeouts don't error 104 | //! 42 105 | //! }).unwrap(); 106 | //! assert_eq!(42, result); 107 | //! } 108 | //! ``` 109 | //! 110 | //! Few things of note: 111 | //! 112 | //! * All the coroutine-aware methods panic outside of a coroutine. 113 | //! * Many of them panic outside of a current-thread executor. 114 | //! * You can freely mix future and coroutine approach. Therefore, you can use combinators to build 115 | //! a future and then `coro_wait` on it. 116 | //! * A coroutine spawned by [`spawn`](coroutine/struct.Coroutine.html#method.spawn) or 117 | //! [`with_defaults`](coroutine/struct.Coroutine.html#method.with_defaults) will propagate panics 118 | //! outside. One spawned with 119 | //! [`spawn_catch_panic`](coroutine/struct.Coroutine.html#method.spawn_catch_panic) captures the 120 | //! panic and passes it on through its result. 121 | //! * Panicking outside of the coroutine where the executor runs may lead to ugly things, like 122 | //! aborting the program (this'd usually lead to a double panic). 123 | //! * Any of the waiting methods may switch to a different coroutine. Therefore it is not a good 124 | //! idea to hold a `RefCell` borrowed or a `Mutex` locked around that if another coroutine could 125 | //! also borrow it. 126 | //! 127 | //! The new methods are here: 128 | //! 129 | //! * [`Future`s](prelude/trait.CoroutineFuture.html) 130 | //! * [`Stream`s](prelude/trait.CoroutineStream.html) 131 | //! * [`Sink`s](prelude/trait.CoroutineSink.html) 132 | //! 133 | //! ## Coroutine-blocking IO 134 | //! 135 | //! Furthermore, if the `blocking-wrappers` feature is enabled (it is by default), all `AsyncRead` 136 | //! and `AsyncWrite` objects can be wrapped in 137 | //! [`corona::io::BlockingWrapper`](io/struct.BlockingWrapper.html). This implements 138 | //! `Read` and `Write` in a way that mimics blocking, but it blocks only the coroutine, not the 139 | //! whole thread. This allows it to be used with usual blocking routines, like 140 | //! `serde_json::from_reader`. 141 | //! 142 | //! The API is still a bit rough (it exposes just the `Read` and `Write` traits, all other methods 143 | //! need to be accessed through `.inner()` or `.inner_mut`), this will be improved in future 144 | //! versions. 145 | //! 146 | //! ``` 147 | //! # extern crate corona; 148 | //! # extern crate tokio; 149 | //! use std::io::{Read, Result as IoResult}; 150 | //! use corona::io::BlockingWrapper; 151 | //! use tokio::net::TcpStream; 152 | //! 153 | //! fn blocking_read(connection: &mut TcpStream) -> IoResult<()> { 154 | //! let mut connection = BlockingWrapper::new(connection); 155 | //! let mut buf = [0u8; 64]; 156 | //! // This will block the coroutine, but not the thread 157 | //! connection.read_exact(&mut buf) 158 | //! } 159 | //! 160 | //! # fn main() {} 161 | //! ``` 162 | //! 163 | //! # Cleaning up 164 | //! 165 | //! If the executor is dropped while a coroutine waits on something, the waiting method will panic. 166 | //! That way the coroutine's stack is unwinded, releasing resources on its stack (there doesn't 167 | //! seem to be a better way to drop the whole stack). 168 | //! 169 | //! However, if the executor is dropped because of a panic, Rust abort the whole program because of 170 | //! a double-panic. Ideas how to overcome this (since the second panic is on a different stack, but 171 | //! Rust doesn't know that) are welcome. 172 | //! 173 | //! There are waiting methods that return an error instead of panicking, but they are less 174 | //! convenient to use. 175 | //! 176 | //! It also can be configured to leak the stack in such case instead of double-panicking. 177 | //! 178 | //! # Pitfalls 179 | //! 180 | //! If the coroutine is created with default configuration, it gets really small stack. If you 181 | //! overflow it, you get a segfault (it happens more often on debug builds than release ones) with 182 | //! really useless backtrace. Try making the stack bigger in that case. 183 | //! 184 | //! # API Stability 185 | //! 186 | //! The API is likely to get stabilized soon (I hope it won't change much any more). But I still 187 | //! want to do more experimentation before making it official. 188 | //! 189 | //! There are two areas where I expect some changes will still be needed: 190 | //! 191 | //! * I want to support scoped coroutines (similar to some libraries that provide scoped threads). 192 | //! 193 | //! Other experiments from consumers are also welcome. 194 | //! 195 | //! # Known problems 196 | //! 197 | //! These are the problems I'm aware of and which I want to find a solution some day. 198 | //! 199 | //! * Many handy abstractions are still missing, like waiting for a future with a timeout, or 200 | //! conveniently waiting for a first of a set of futures or streams. 201 | //! * The coroutines can't move between threads. This is likely impossible, since Rust's type 202 | //! system doesn't expect whole stacks with all local data to move. 203 | //! * It relies on Tokio. This might change in the future. 204 | //! * The API doesn't prevent some footguns ‒ leaving a `RefCell` borrowed across coroutine switch, 205 | //! deadlocking, calling the waiting methods outside of a coroutine or using `.wait()` by a 206 | //! mistake and blocking the whole thread. These manifest during runtime. 207 | //! * The cleaning up of coroutines when the executor is dropped is done through panics. 208 | //! 209 | //! # Contribution 210 | //! 211 | //! All kinds of contributions are welcome, including reporting bugs, improving the documentation, 212 | //! submitting code, etc. However, the most valuable contribution for now would be trying it out 213 | //! and providing some feedback ‒ if the thing works, where the API needs improvements, etc. 214 | //! 215 | //! # Examples 216 | //! 217 | //! One that shows the API. 218 | //! 219 | //! ``` 220 | //! # extern crate corona; 221 | //! # extern crate futures; 222 | //! # extern crate tokio; 223 | //! use std::time::Duration; 224 | //! use futures::unsync::mpsc; 225 | //! use corona::prelude::*; 226 | //! use tokio::clock; 227 | //! use tokio::prelude::*; 228 | //! use tokio::runtime::current_thread; 229 | //! use tokio::timer::Delay; 230 | //! 231 | //! # fn main() { 232 | //! let result = Coroutine::new().run(|| { 233 | //! let (sender, receiver) = mpsc::channel(1); 234 | //! corona::spawn(|| { 235 | //! let mut sender = sender; 236 | //! sender = sender.send(1).coro_wait().unwrap(); 237 | //! sender = sender.send(2).coro_wait().unwrap(); 238 | //! }); 239 | //! 240 | //! for item in receiver.iter_ok() { 241 | //! println!("{}", item); 242 | //! } 243 | //! 244 | //! let timeout = Delay::new(clock::now() + Duration::from_millis(100)); 245 | //! timeout.coro_wait().unwrap(); 246 | //! 247 | //! 42 248 | //! }).unwrap(); 249 | //! assert_eq!(42, result); 250 | //! # } 251 | //! ``` 252 | //! 253 | //! Further examples can be found in the 254 | //! [repository](https://github.com/vorner/corona/tree/master/examples). 255 | //! 256 | //! # Behind the scenes 257 | //! 258 | //! There are few things that might help understanding how the library works inside. 259 | //! 260 | //! First, there's some thread-local state. This state is used for caching currently unused stacks 261 | //! as well as the state that is used when waiting for something and switching coroutines. 262 | //! 263 | //! Whenever one of the waiting methods is used, a wrapper future is created. After the original 264 | //! future resolves, it resumes the execution to the current stack. This future is spawned onto the 265 | //! executor and a switch is made to the parent coroutine (it's the coroutine that started or 266 | //! resumed the current one). This way, the „outside“ coroutine is reached eventually. It is 267 | //! expected this outside coroutine will run the executor, waking up the ready to proceed 268 | //! coroutines and then switching to them. 269 | //! 270 | //! That's about it, the rest of the library are just implementation details about what is stored 271 | //! where and how to pass the information around without breaking any lifetime bounds. 272 | 273 | extern crate context; 274 | extern crate futures; 275 | #[cfg(any(test, feature = "convenient-run"))] 276 | extern crate tokio; 277 | extern crate tokio_current_thread; 278 | #[cfg(feature = "blocking-wrappers")] 279 | extern crate tokio_io; 280 | 281 | #[cfg(feature = "blocking-wrappers")] 282 | pub mod io; 283 | pub mod coroutine; 284 | pub mod errors; 285 | pub mod prelude; 286 | pub mod wrappers; 287 | 288 | mod stack_cache; 289 | mod switch; 290 | 291 | pub use errors::{Dropped, TaskFailed}; 292 | pub use coroutine::{spawn, Coroutine, CoroutineResult}; 293 | -------------------------------------------------------------------------------- /src/prelude.rs: -------------------------------------------------------------------------------- 1 | //! A module for wildcard import. 2 | //! 3 | //! This contains some traits and general types that are meant to be wildcard imported. These are 4 | //! extension traits, attaching more methods to existing types. 5 | //! 6 | //! Each of the main `futures` trait has one extension crate here. Also, the 7 | //! [`Coroutine`](../coroutine/struct.Coroutine.html) is included, as the main type of the library. 8 | //! 9 | //! All of these things are internally delegated to the 10 | //! [`Coroutine::wait`](../coroutine/struct.Coroutine.html#method.wait) method and are mostly for 11 | //! convenience. 12 | 13 | use std::iter; 14 | use std::panic; 15 | 16 | use futures::{Future, Sink, Stream}; 17 | 18 | use errors::Dropped; 19 | use wrappers::{CleanupIterator, OkIterator, ResultIterator, SinkSender, StreamExtractor}; 20 | 21 | pub use coroutine::Coroutine; 22 | 23 | /// An extension crate for the `Future` trait. 24 | /// 25 | /// This is auto-implemented for everything that implements the `Future` trait, attaching more 26 | /// methods to them. 27 | pub trait CoroutineFuture: Future + Sized { 28 | /// A coroutine aware wait on the result. 29 | /// 30 | /// This blocks the current coroutine until the future resolves and returns the result. This is 31 | /// similar to `Future::wait`. However, this allows other coroutines to run when this one 32 | /// waits. 33 | /// 34 | /// Note that the future does *not* have to be `'static`. 35 | /// 36 | /// # Panics 37 | /// 38 | /// This'll panic if the reactor the coroutine was spawned onto is dropped while the method 39 | /// runs. 40 | /// 41 | /// It also panics when called outside of the coroutine and any panics from the coroutine 42 | /// itself will be propagated to the calling coroutine. 43 | /// 44 | /// # Examples 45 | /// 46 | /// ```rust 47 | /// # extern crate corona; 48 | /// # extern crate tokio; 49 | /// # use std::time::Duration; 50 | /// # use corona::prelude::*; 51 | /// # use tokio::clock; 52 | /// # use tokio::timer::Delay; 53 | /// # fn main() { 54 | /// Coroutine::new().run(move || { 55 | /// let timeout = Delay::new(clock::now() + Duration::from_millis(50)); 56 | /// // This would switch to another coroutine if there was one ready. 57 | /// // We unwrap, since the error doesn't happen on timeouts. 58 | /// timeout.coro_wait().unwrap(); 59 | /// }); 60 | /// # } 61 | /// ``` 62 | fn coro_wait(self) -> Result { 63 | self.coro_wait_cleanup().unwrap_or_else(|_| panic::resume_unwind(Box::new(Dropped))) 64 | } 65 | 66 | /// A coroutine aware wait on the result that doesn't panic. 67 | /// 68 | /// This is just like [`coro_wait`](#method.coro_wait), but instead of panicking when the 69 | /// reactor is unexpectedly dropped, it returns `Err(Dropped)`. This might be used to implement 70 | /// manual coroutine cleanup when needed. 71 | /// 72 | /// # Panics 73 | /// 74 | /// When called outside of the coroutine. Also, panics from within the future are propagated to 75 | /// the calling (current) coroutine. 76 | fn coro_wait_cleanup(self) -> Result, Dropped>; 77 | } 78 | 79 | impl CoroutineFuture for F { 80 | fn coro_wait_cleanup(self) -> Result, Dropped> { 81 | Coroutine::wait(self) 82 | } 83 | } 84 | 85 | /// An extension trait for `Stream`s. 86 | /// 87 | /// This is auto-implemented for `Stream`s and adds some convenient coroutine-aware methods to 88 | /// them. 89 | pub trait CoroutineStream: Stream + Sized { 90 | /// Produces an iterator through the successful items of the stream. 91 | /// 92 | /// This allows iterating comfortably through the stream. It produces only the successful 93 | /// items and stops when the stream terminates or when it reaches the first error. The error is 94 | /// thrown away (you may want to use [`iter_result`](#method.iter_result) if you care about the 95 | /// errors). 96 | /// 97 | /// When it waits for another item to come out of the stream, the coroutine suspends and 98 | /// switches to others if there are some ready. 99 | /// 100 | /// # Panics 101 | /// 102 | /// If the reactor is dropped during the iteration, this method panics to clean up the 103 | /// coroutine. 104 | /// 105 | /// It also panics when called from outside of a coroutine. Panics from within the stream are 106 | /// propagated into the calling coroutine. 107 | /// 108 | /// # Examples 109 | /// 110 | /// ```rust 111 | /// # extern crate corona; 112 | /// # extern crate futures; 113 | /// # use corona::prelude::*; 114 | /// # use futures::unsync::mpsc; 115 | /// # fn main() { 116 | /// let (sender, receiver) = mpsc::unbounded(); 117 | /// sender.unbounded_send(21); 118 | /// sender.unbounded_send(21); 119 | /// // Make sure the channel is terminated, or it would wait forever. 120 | /// drop(sender); 121 | /// 122 | /// let result = Coroutine::new().run(move || { 123 | /// let mut sum = 0; 124 | /// for num in receiver.iter_ok() { 125 | /// sum += num; 126 | /// } 127 | /// sum 128 | /// }); 129 | /// assert_eq!(42, result.unwrap()); 130 | /// # } 131 | /// ``` 132 | fn iter_ok(self) -> OkIterator> { 133 | OkIterator::new(self.iter_cleanup()) 134 | } 135 | 136 | /// Produces an iterator through results. 137 | /// 138 | /// This is similar to [`iter_ok`](#method.iter_ok). However, instead of terminating on errors, 139 | /// the items produced by the iterator are complete `Result`s. The iterator always runs to the 140 | /// end of the stream (or break out of the `for`). 141 | /// 142 | /// # Notes 143 | /// 144 | /// In general, streams don't guarantee to be usable past their first error. So, when working 145 | /// with an unknown stream, it is reasonable to break the `for` on the first error. This is 146 | /// similar to [`iter_ok`](#method.iter_ok), but allows inspecting the error. 147 | /// 148 | /// However, there are some specific streams that are usable past errors. Such example is 149 | /// `TcpListener::incoming`, which may signal an error accepting one connection, but then keeps 150 | /// trying. 151 | /// 152 | /// # Panics 153 | /// 154 | /// This panics when the reactor the current coroutine runs on is dropped while iterating. 155 | /// 156 | /// It panics when called outside of a coroutine and any panics from within the stream are 157 | /// propagated to the calling coroutine. 158 | /// 159 | /// # Examples 160 | /// 161 | /// ```rust 162 | /// # extern crate corona; 163 | /// # extern crate futures; 164 | /// # use corona::prelude::*; 165 | /// # use futures::unsync::mpsc; 166 | /// # fn main() { 167 | /// let (sender, receiver) = mpsc::unbounded(); 168 | /// sender.unbounded_send(21); 169 | /// sender.unbounded_send(21); 170 | /// // Make sure the channel is terminated, or it would wait forever. 171 | /// drop(sender); 172 | /// 173 | /// let result = Coroutine::new().run(move || { 174 | /// let mut sum = 0; 175 | /// for num in receiver.iter_result() { 176 | /// sum += num.expect("MPSC should not error"); 177 | /// } 178 | /// sum 179 | /// }); 180 | /// assert_eq!(42, result.unwrap()); 181 | /// # } 182 | /// ``` 183 | fn iter_result(self) -> ResultIterator> { 184 | ResultIterator::new(self.iter_cleanup()) 185 | } 186 | 187 | /// Produces an iterator that doesn't panic on reactor drop. 188 | /// 189 | /// This acts like [`iter_result`](#method.iter_result). However, the produced items are 190 | /// wrapped inside another level of `Result` and it returns `Err(Dropped)` if the reactor is 191 | /// dropped while iterating instead of panicking. This allows manual coroutine cleanup when 192 | /// needed, but is probably less convenient for casual use. 193 | /// 194 | /// # Panics 195 | /// 196 | /// If called outside of a coroutine, or if the stream itself panics internally. 197 | fn iter_cleanup(self) -> CleanupIterator; 198 | 199 | /// A future that pulls one item out of the stream. 200 | /// 201 | /// This is like `Stream::into_future`, but it doesn't consume and re-produce the stream. 202 | /// Instead it borrows the stream mutably. Such thing is usable with coroutines, since 203 | /// coroutines can easily wait on futures that are not `'static`. 204 | /// 205 | /// Unlike the other methods here, this only builds the future, doesn't run it. 206 | /// 207 | /// # Examples 208 | /// 209 | /// ```rust 210 | /// # extern crate corona; 211 | /// # extern crate futures; 212 | /// # use corona::prelude::*; 213 | /// # use futures::unsync::mpsc; 214 | /// # fn main() { 215 | /// let (sender, mut receiver) = mpsc::unbounded(); 216 | /// sender.unbounded_send(21); 217 | /// // The second item is unused 218 | /// sender.unbounded_send(21); 219 | /// drop(sender); 220 | /// 221 | /// let result = Coroutine::new().run(move || { 222 | /// receiver.extractor() 223 | /// .coro_wait() // Block until the item actually falls out 224 | /// .unwrap() // Unwrap the outer result 225 | /// .unwrap() // Unwrap the option, since it gives `Option` 226 | /// }); 227 | /// assert_eq!(21, result.unwrap()); 228 | /// # } 229 | fn extractor(&mut self) -> StreamExtractor; 230 | 231 | /// Pulls one item out of the stream. 232 | /// 233 | /// This extracts one item out of the stream, returning either the streams error or the item or 234 | /// `None` on end of the stream. 235 | /// 236 | /// It blocks the current coroutine when waiting for the item to appear. 237 | /// 238 | /// # Panics 239 | /// 240 | /// It panics when the reactor is dropped while waiting for the item. 241 | /// 242 | /// It also panics when called outside of a coroutine or when the stream itself panics. 243 | /// 244 | /// # Examples 245 | /// 246 | /// ```rust 247 | /// # extern crate corona; 248 | /// # extern crate futures; 249 | /// # use corona::prelude::*; 250 | /// # use futures::unsync::mpsc; 251 | /// # fn main() { 252 | /// let (sender, mut receiver) = mpsc::unbounded(); 253 | /// sender.unbounded_send(21); 254 | /// sender.unbounded_send(21); 255 | /// drop(sender); 256 | /// 257 | /// let result = Coroutine::new().run(move || { 258 | /// let mut sum = 0; 259 | /// while let Some(num) = receiver.coro_next().unwrap() { 260 | /// sum += num; 261 | /// } 262 | /// sum 263 | /// }); 264 | /// assert_eq!(42, result.unwrap()); 265 | /// # } 266 | /// ``` 267 | fn coro_next(&mut self) -> Result, Self::Error> { 268 | self.coro_next_cleanup().unwrap() 269 | } 270 | 271 | /// Pulls one item out of the stream without panicking. 272 | /// 273 | /// This is like [`coro_next`](#method.coro_next), but returns `Err(Dropped)` when the reactor 274 | /// is dropped during the waiting instead of panicking. That allows manual coroutine cleanup. 275 | /// 276 | /// # Panics 277 | /// 278 | /// When called outside of a coroutine or when the stream itself panics. 279 | fn coro_next_cleanup(&mut self) -> Result, Self::Error>, Dropped> { 280 | self.extractor().coro_wait_cleanup() 281 | } 282 | } 283 | 284 | impl CoroutineStream for S { 285 | fn iter_cleanup(self) -> CleanupIterator { 286 | CleanupIterator::new(self) 287 | } 288 | fn extractor(&mut self) -> StreamExtractor { 289 | StreamExtractor::new(self) 290 | } 291 | } 292 | 293 | /// An extension trait for `Sink`. 294 | /// 295 | /// This is automatically implemented for `Sink`s and adds some convenience methods to them. 296 | pub trait CoroutineSink: Sink + Sized { 297 | /// Sends one item into the sink. 298 | /// 299 | /// This is similar to `Sink::send`, but doesn't consume the sink, only borrows it mutably. 300 | /// This is more convenient with the coroutines, because they can wait on something that is not 301 | /// `'static`. 302 | /// 303 | /// # Parameters 304 | /// 305 | /// * `item`: The item to be sent. 306 | /// 307 | /// # Panics 308 | /// 309 | /// If the reactor is dropped before the sending is done. 310 | /// 311 | /// If it is called outside of a coroutine or if the sink panics internally. 312 | /// 313 | /// # Examples 314 | /// 315 | /// ```rust 316 | /// # extern crate corona; 317 | /// # extern crate futures; 318 | /// # use corona::prelude::*; 319 | /// # use futures::unsync::mpsc; 320 | /// # fn main() { 321 | /// let (mut sender, mut receiver) = mpsc::channel(1); 322 | /// let result = Coroutine::new().run(move || { 323 | /// corona::spawn(move || { 324 | /// sender.coro_send(42).unwrap(); 325 | /// }); 326 | /// receiver.coro_next().unwrap() 327 | /// }); 328 | /// assert_eq!(Some(42), result.unwrap()); 329 | /// # } 330 | /// ``` 331 | fn coro_send(&mut self, item: Self::SinkItem) -> Result<(), Self::SinkError> { 332 | self.coro_sender(iter::once(item)).coro_wait() 333 | } 334 | 335 | /// Sends one item into the sink without panicking on dropped reactor. 336 | /// 337 | /// This sends one item into the sink, similar to [`coro_send`](#method.coro_send). The 338 | /// difference is it doesn't panic on dropped reactor. Instead, it returns `Err(Dropped)` and 339 | /// allows manual cleanup of the coroutine. 340 | /// 341 | /// # Parameters 342 | /// 343 | /// * `item`: The item to be sent. 344 | /// 345 | /// # Panics 346 | /// 347 | /// If it is called outside of a coroutine or if the sink itself panics. 348 | fn coro_send_cleanup(&mut self, item: Self::SinkItem) 349 | -> Result, Dropped> 350 | { 351 | self.coro_sender(iter::once(item)).coro_wait_cleanup() 352 | } 353 | 354 | /// Sends multiple items into the sink. 355 | /// 356 | /// This is like [`coro_send_cleanup`](#method.coro_send_cleanup). However, it sends multiple 357 | /// items instead of one. This is potentially faster than pushing them one by one, since the 358 | /// sink „flushes“ just once after the whole batch. 359 | /// 360 | /// # Parameters 361 | /// 362 | /// * `iter`: Iterator over the items to send. 363 | /// 364 | /// # Panics 365 | /// 366 | /// If it is called outside of a coroutine or if the sink panics internally. 367 | fn coro_send_many(&mut self, iter: I) -> Result, Dropped> 368 | where 369 | I: IntoIterator 370 | { 371 | self.coro_sender(iter).coro_wait_cleanup() 372 | } 373 | 374 | /// Creates a future that sends multiple items into the sink. 375 | /// 376 | /// This is the internal future of [`coro_send_many`](#method.coro_send_many). The difference 377 | /// is, it doesn't wait for the future to resolve, only returns it. 378 | /// 379 | /// It can be used to combine the future with something else, like sending to multiple sinks in 380 | /// parallel. 381 | /// 382 | /// # Parameters 383 | /// 384 | /// * `iter`: The iterator over items to be sent. 385 | fn coro_sender(&mut self, iter: I) -> SinkSender 386 | where 387 | I: IntoIterator; 388 | } 389 | 390 | impl CoroutineSink for S { 391 | fn coro_sender(&mut self, iter: Src) -> SinkSender 392 | where 393 | Src: IntoIterator 394 | { 395 | SinkSender::new(self, iter) 396 | } 397 | } 398 | -------------------------------------------------------------------------------- /benchmarks/benches/compare_methods.rs: -------------------------------------------------------------------------------- 1 | #![feature(generators, proc_macro_hygiene, test)] 2 | 3 | //! Minimal benchmarks and comparison of some IO manipulation. 4 | //! 5 | //! This tries to compare speed of different methods how to implement a networked server. The 6 | //! servers differ, while the client is always the same. 7 | //! 8 | //! The client opens `PARALLEL` connections to the server, then `EXCHANGES` times sends a buffer of 9 | //! data through each of the connection and expects an answer back. 10 | //! 11 | //! Note that we leave the server threads running after the benchmark terminates, to avoid the need 12 | //! to synchronize shut down. As they just sit there inactive, this should have no real effect on 13 | //! the performance. 14 | //! 15 | //! The `*_many` variants run the listener in multiple independent threads. 16 | 17 | extern crate corona; 18 | extern crate futures_await as futures; 19 | extern crate futures_cpupool; 20 | #[macro_use] 21 | extern crate lazy_static; 22 | extern crate may; 23 | extern crate net2; 24 | extern crate num_cpus; 25 | extern crate test; 26 | extern crate tokio; 27 | 28 | use std::env; 29 | use std::io::{Read, Write}; 30 | use std::net::{TcpStream, TcpListener, SocketAddr}; 31 | use std::os::unix::io::{FromRawFd, IntoRawFd}; 32 | use std::sync::mpsc; 33 | use std::thread; 34 | 35 | use corona::io::BlockingWrapper; 36 | use corona::prelude::*; 37 | use futures::{stream, Future, Stream}; 38 | use futures::prelude::await; 39 | use futures::prelude::*; 40 | use futures_cpupool::CpuPool; 41 | use may::coroutine; 42 | use may::net::TcpListener as MayTcpListener; 43 | use net2::TcpBuilder; 44 | use tokio::runtime::current_thread; 45 | use tokio::net::TcpListener as TokioTcpListener; 46 | use tokio::reactor::Handle; 47 | use tokio::io; 48 | use test::Bencher; 49 | 50 | const BUF_SIZE: usize = 512; 51 | 52 | fn get_var(name: &str, default: usize) -> usize { 53 | env::var(name) 54 | .map_err(|_| ()) 55 | .and_then(|s| s.parse().map_err(|_| ())) 56 | .unwrap_or(default) 57 | } 58 | 59 | lazy_static! { 60 | static ref POOL: CpuPool = CpuPool::new_num_cpus(); 61 | 62 | // Configuration bellow 63 | /// The number of connections to the server. 64 | /// 65 | /// This is what the clients aim for. But a client may be deleting or creating the connections 66 | /// at a time, so this is the upper limit. With multiple clients, this is split between them. 67 | static ref PARALLEL: usize = get_var("PARALLEL", 128); 68 | /// How many ping-pongs are done over each connection. 69 | static ref EXCHANGES: usize = get_var("EXCHANGES", 4); 70 | /// How many batches should happen before starting to measure. 71 | /// 72 | /// This allows the servers to get up to speed. 73 | static ref WARMUP: usize = get_var("WARMUP", 2); 74 | /// How many times to connect and disconnect all the connections in one benchmark iteration. 75 | static ref BATCH: usize = get_var("BATCH", 4); 76 | /// Into how many client threads the client workload is spread. 77 | static ref CLIENT_THREADS: usize = get_var("CLIENT_THREADS", 32); 78 | /// Number of server instances in the `_many` scenarios. 79 | static ref SERVER_THREADS: usize = get_var("SERVER_THREADS", 2); 80 | } 81 | 82 | /// The client side 83 | fn batter(addr: SocketAddr) { 84 | let mut streams = (0..*PARALLEL / *CLIENT_THREADS).map(|_| { 85 | TcpStream::connect(&addr) 86 | .unwrap() 87 | }) 88 | .collect::>(); 89 | let input = [1u8; BUF_SIZE]; 90 | let mut output = [0u8; BUF_SIZE]; 91 | for _ in 0..*EXCHANGES { 92 | for stream in &mut streams { 93 | stream.write_all(&input[..]).unwrap(); 94 | } 95 | for stream in &mut streams { 96 | stream.read_exact(&mut output[..]).unwrap(); 97 | } 98 | } 99 | } 100 | 101 | /// Performs one benchmark, with the body as the server implementation 102 | /// 103 | /// There's a short warm-up before the actual benchmark starts ‒ both to initialize whatever 104 | /// buffers or caches the library uses and to make sure the server already started after the 105 | /// barrier. 106 | /// 107 | /// We run the clients in multiple threads (so the server is kept busy). To not start and stop a 108 | /// lot of client threads, we report the progress through a sync channel. 109 | fn bench(b: &mut Bencher, paral: usize, body: fn(TcpListener)) { 110 | let listener = TcpBuilder::new_v4() 111 | .unwrap() 112 | .reuse_address(true) 113 | .unwrap() 114 | .bind("127.0.0.1:0") 115 | .unwrap() 116 | .listen(4096) 117 | .unwrap(); 118 | let addr = listener.local_addr().unwrap(); 119 | for _ in 0..paral { 120 | let listener = listener.try_clone().unwrap(); 121 | thread::spawn(move || body(listener)); 122 | } 123 | let (sender, receiver) = mpsc::sync_channel(*CLIENT_THREADS * 10); 124 | for _ in 0..*CLIENT_THREADS { 125 | let sender = sender.clone(); 126 | let addr = addr; 127 | thread::spawn(move || { 128 | while let Ok(_) = sender.send(()) { 129 | for _ in 0..*BATCH { 130 | batter(addr); 131 | } 132 | } 133 | }); 134 | } 135 | for _ in 0..*WARMUP * *CLIENT_THREADS { 136 | receiver.recv().unwrap(); 137 | } 138 | b.iter(move || { 139 | // One iteration is when all the threads perform the whole batch. This is approximate (they 140 | // don't do it at the same time), but it should cancel out over the many iterations. 141 | for _ in 0..*CLIENT_THREADS { 142 | receiver.recv().unwrap(); 143 | } 144 | }); 145 | } 146 | 147 | fn run_corona(listener: TcpListener) { 148 | Coroutine::new().run(move || { 149 | let incoming = TokioTcpListener::from_std(listener, &Handle::default()) 150 | .unwrap() 151 | .incoming() 152 | .iter_ok(); 153 | for mut connection in incoming { 154 | corona::spawn(move || { 155 | let mut buf = [0u8; BUF_SIZE]; 156 | for _ in 0..*EXCHANGES { 157 | io::read_exact(&mut connection, &mut buf[..]) 158 | .coro_wait() 159 | .unwrap(); 160 | io::write_all(&mut connection, &buf[..]) 161 | .coro_wait() 162 | .unwrap(); 163 | } 164 | }); 165 | } 166 | }).unwrap(); 167 | } 168 | 169 | /// Our own corona. 170 | #[bench] 171 | fn corona(b: &mut Bencher) { 172 | bench(b, 1, run_corona); 173 | } 174 | 175 | #[bench] 176 | fn corona_many(b: &mut Bencher) { 177 | bench(b, *SERVER_THREADS, run_corona); 178 | } 179 | 180 | #[bench] 181 | fn corona_cpus(b: &mut Bencher) { 182 | bench(b, num_cpus::get(), run_corona); 183 | } 184 | 185 | fn run_corona_wrapper(listener: TcpListener) { 186 | Coroutine::new().run(move || { 187 | let incoming = TokioTcpListener::from_std(listener, &Handle::default()) 188 | .unwrap() 189 | .incoming() 190 | .iter_ok(); 191 | for connection in incoming { 192 | corona::spawn(move || { 193 | let mut buf = [0u8; BUF_SIZE]; 194 | let mut connection = BlockingWrapper::new(connection); 195 | for _ in 0..*EXCHANGES { 196 | connection.read_exact(&mut buf[..]).unwrap(); 197 | connection.write_all(&buf[..]).unwrap(); 198 | } 199 | }); 200 | } 201 | }).unwrap(); 202 | } 203 | 204 | /// Corona, but with the blocking wrapper 205 | #[bench] 206 | fn corona_blocking_wrapper(b: &mut Bencher) { 207 | bench(b, 1, run_corona_wrapper); 208 | } 209 | 210 | #[bench] 211 | fn corona_blocking_wrapper_many(b: &mut Bencher) { 212 | bench(b, *SERVER_THREADS, run_corona_wrapper); 213 | } 214 | 215 | #[bench] 216 | fn corona_blocking_wrapper_cpus(b: &mut Bencher) { 217 | bench(b, num_cpus::get(), run_corona_wrapper); 218 | } 219 | 220 | 221 | fn run_threads(listener: TcpListener) { 222 | while let Ok((mut connection, _address)) = listener.accept() { 223 | thread::spawn(move || { 224 | let mut buf = [0u8; BUF_SIZE]; 225 | for _ in 0..*EXCHANGES { 226 | connection.read_exact(&mut buf[..]).unwrap(); 227 | connection.write_all(&buf[..]).unwrap(); 228 | } 229 | }); 230 | } 231 | drop(listener); // Just to prevent clippy warning 232 | } 233 | 234 | /// Runs a fresh thread for each connection 235 | /// 236 | /// This might happen to be slightly faster because it may use more CPU parallelism. Or maybe 237 | /// there's just less overhead due to the event loop ‒ but there's the price to switch threads. 238 | #[bench] 239 | fn threads(b: &mut Bencher) { 240 | bench(b, 1, run_threads); 241 | } 242 | 243 | #[bench] 244 | fn threads_many(b: &mut Bencher) { 245 | bench(b, *SERVER_THREADS, run_threads); 246 | } 247 | 248 | #[bench] 249 | fn threads_cpus(b: &mut Bencher) { 250 | bench(b, num_cpus::get(), run_threads); 251 | } 252 | 253 | fn gen_futures(listener: TcpListener) -> impl Future { 254 | TokioTcpListener::from_std(listener, &Handle::default()) 255 | .unwrap() 256 | .incoming() 257 | .map_err(|e: std::io::Error| panic!("{}", e)) 258 | .for_each(move |connection| { 259 | let buf = vec![0u8; BUF_SIZE]; 260 | let perform = stream::iter_ok(0..*EXCHANGES) 261 | .fold((connection, buf), |(connection, buf), _i| { 262 | io::read_exact(connection, buf) 263 | .and_then(|(connection, buf)| io::write_all(connection, buf)) 264 | }) 265 | .map(|_| ()) 266 | .map_err(|e: std::io::Error| panic!("{}", e)); 267 | tokio::spawn(perform) 268 | }) 269 | } 270 | 271 | fn run_futures(listener: TcpListener) { 272 | current_thread::block_on_all(gen_futures(listener)).unwrap(); 273 | } 274 | 275 | fn run_futures_workstealing(listener: TcpListener) { 276 | tokio::run(gen_futures(listener)); 277 | } 278 | 279 | /// Just plain futures. 280 | /// 281 | /// While faster than corona, it is probably harder to read and write. 282 | #[bench] 283 | fn futures(b: &mut Bencher) { 284 | bench(b, 1, run_futures); 285 | } 286 | 287 | #[bench] 288 | fn futures_many(b: &mut Bencher) { 289 | bench(b, *SERVER_THREADS, run_futures); 290 | } 291 | 292 | #[bench] 293 | fn futures_cpus(b: &mut Bencher) { 294 | bench(b, *SERVER_THREADS, run_futures); 295 | } 296 | 297 | #[bench] 298 | fn futures_workstealing(b: &mut Bencher) { 299 | bench(b, 1, run_futures_workstealing); 300 | } 301 | 302 | fn run_futures_cpupool(listener: TcpListener) { 303 | let main = TokioTcpListener::from_std(listener, &Handle::default()) 304 | .unwrap() 305 | .incoming() 306 | .map_err(|e: std::io::Error| panic!("{}", e)) 307 | .for_each(move |connection| { 308 | let buf = vec![0u8; BUF_SIZE]; 309 | let perform = stream::iter_ok(0..*EXCHANGES) 310 | .fold((connection, buf), |(connection, buf), _i| { 311 | io::read_exact(connection, buf) 312 | .and_then(|(connection, buf)| io::write_all(connection, buf)) 313 | }) 314 | .map(|_| ()); 315 | let offloaded = POOL.spawn(perform) 316 | .map(|_| ()) 317 | .map_err(|e: std::io::Error| panic!("{}", e)); 318 | tokio::spawn(offloaded) 319 | }); 320 | current_thread::block_on_all(main).unwrap(); 321 | } 322 | 323 | /// Like `futures`, but uses cpu pool. 324 | #[bench] 325 | fn futures_cpupool(b: &mut Bencher) { 326 | bench(b, 1, run_futures_cpupool); 327 | } 328 | 329 | #[bench] 330 | fn futures_cpupool_many(b: &mut Bencher) { 331 | bench(b, *SERVER_THREADS, run_futures_cpupool); 332 | } 333 | 334 | #[bench] 335 | fn futures_cpupool_cpus(b: &mut Bencher) { 336 | bench(b, num_cpus::get(), run_futures_cpupool); 337 | } 338 | 339 | fn gen_async(listener: TcpListener) -> impl Future { 340 | let incoming = TokioTcpListener::from_std(listener, &Handle::default()) 341 | .unwrap() 342 | .incoming(); 343 | let main = async_block! { 344 | #[async] 345 | for mut connection in incoming { 346 | let client = async_block! { 347 | let mut buf = vec![0u8; BUF_SIZE]; 348 | for _ in 0..*EXCHANGES { 349 | let (c, b) = await!(io::read_exact(connection, buf))?; 350 | let (c, b) = await!(io::write_all(c, b))?; 351 | connection = c; 352 | buf = b; 353 | } 354 | Ok(()) 355 | } 356 | .map(|_| ()) 357 | .map_err(|e: std::io::Error| panic!(e)); 358 | tokio::spawn(client); 359 | } 360 | Ok::<(), std::io::Error>(()) 361 | } 362 | .map_err(|e| panic!("{}", e)); 363 | main 364 | } 365 | 366 | fn run_async(listener: TcpListener) { 367 | current_thread::block_on_all(gen_async(listener)).unwrap(); 368 | } 369 | 370 | fn run_async_workstealing(listener: TcpListener) { 371 | tokio::run(gen_async(listener)); 372 | } 373 | 374 | /// With the futures-async magic 375 | #[bench] 376 | fn async(b: &mut Bencher) { 377 | bench(b, 1, run_async); 378 | } 379 | 380 | #[bench] 381 | fn async_many(b: &mut Bencher) { 382 | bench(b, *SERVER_THREADS, run_async); 383 | } 384 | 385 | #[bench] 386 | fn async_cpus(b: &mut Bencher) { 387 | bench(b, num_cpus::get(), run_async); 388 | } 389 | 390 | #[bench] 391 | fn async_workstealing(b: &mut Bencher) { 392 | bench(b, 1, run_async_workstealing); 393 | } 394 | 395 | fn run_async_cpupool(listener: TcpListener) { 396 | let incoming = TokioTcpListener::from_std(listener, &Handle::default()) 397 | .unwrap() 398 | .incoming(); 399 | let main = async_block! { 400 | #[async] 401 | for mut connection in incoming { 402 | let client = async_block! { 403 | let mut buf = vec![0u8; BUF_SIZE]; 404 | for _ in 0..*EXCHANGES { 405 | let (c, b) = await!(io::read_exact(connection, buf))?; 406 | let (c, b) = await!(io::write_all(c, b))?; 407 | connection = c; 408 | buf = b; 409 | } 410 | Ok(()) 411 | } 412 | .map(|_| ()) 413 | .map_err(|e: std::io::Error| panic!(e)); 414 | let offloaded = POOL.spawn(client) 415 | .map(|_| ()) 416 | .map_err(|e: std::io::Error| panic!(e)); 417 | tokio::spawn(offloaded); 418 | } 419 | Ok::<_, std::io::Error>(()) 420 | }; 421 | current_thread::block_on_all(main).unwrap(); 422 | } 423 | 424 | /// Async, but with cpu pool 425 | #[bench] 426 | fn async_cpupool(b: &mut Bencher) { 427 | bench(b, 1, run_async_cpupool); 428 | } 429 | 430 | #[bench] 431 | fn async_cpupool_many(b: &mut Bencher) { 432 | bench(b, *SERVER_THREADS, run_async_cpupool); 433 | } 434 | 435 | #[bench] 436 | fn async_cpupool_cpus(b: &mut Bencher) { 437 | bench(b, num_cpus::get(), run_async_cpupool); 438 | } 439 | 440 | /* 441 | * Note about the unsafety here. 442 | * 443 | * The may library uses N:M threading with work stealing of coroutine threads. This completely 444 | * disregards all the compile-time thread safety guarantees of Rust and turns Rust into a C++ with 445 | * better package management. 446 | * 447 | * The problem is, Rust doesn't check if the stack contains something that isn't Send. And moving 448 | * the stack to a different OS thread sends all these potentially non-Send things to a different 449 | * thread, basically insuring undefined behaviour. 450 | * 451 | * This is OK in our case ‒ we do basically nothing here and we have no non-Send data on the 452 | * thread. But it's hard to ensure in the general case (you'd have to check the stacks of all the 453 | * dependencies and you'd have to make sure none of your dependencies uses TLS). Still, the check 454 | * lies with the user of the library. 455 | */ 456 | fn run_may(listener: TcpListener) { 457 | // May can't change config later on… so all tests need to have the same config. Let's use the 458 | // same thing (number of real CPUs) as with the futures-cpupool, to have some illusion of 459 | // fairness. 460 | may::config() 461 | .set_workers(num_cpus::get()) 462 | .set_io_workers(num_cpus::get()); 463 | // May doesn't seem to support direct conversion 464 | let raw_fd = listener.into_raw_fd(); 465 | let listener = unsafe { MayTcpListener::from_raw_fd(raw_fd) }; 466 | while let Ok((mut connection, _address)) = listener.accept() { 467 | unsafe { 468 | coroutine::spawn(move || { 469 | let mut buf = [0u8; BUF_SIZE]; 470 | for _ in 0..*EXCHANGES { 471 | connection.read_exact(&mut buf[..]).unwrap(); 472 | connection.write_all(&buf[..]).unwrap(); 473 | } 474 | }) 475 | }; 476 | } 477 | } 478 | 479 | /// May 480 | #[bench] 481 | fn may(b: &mut Bencher) { 482 | bench(b, 1, run_may); 483 | } 484 | 485 | #[bench] 486 | fn may_many(b: &mut Bencher) { 487 | bench(b, *SERVER_THREADS, run_may); 488 | } 489 | 490 | #[bench] 491 | fn may_cpus(b: &mut Bencher) { 492 | bench(b, num_cpus::get(), run_may); 493 | } 494 | -------------------------------------------------------------------------------- /benchmarks/Cargo.lock: -------------------------------------------------------------------------------- 1 | [[package]] 2 | name = "benchmarks" 3 | version = "0.1.0" 4 | dependencies = [ 5 | "corona 0.3.1", 6 | "futures-await 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 7 | "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", 8 | "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", 9 | "may 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", 10 | "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", 11 | "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", 12 | "tokio-core 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", 13 | "tokio-io 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", 14 | ] 15 | 16 | [[package]] 17 | name = "bitflags" 18 | version = "0.9.1" 19 | source = "registry+https://github.com/rust-lang/crates.io-index" 20 | 21 | [[package]] 22 | name = "bitflags" 23 | version = "1.0.1" 24 | source = "registry+https://github.com/rust-lang/crates.io-index" 25 | 26 | [[package]] 27 | name = "byteorder" 28 | version = "1.2.1" 29 | source = "registry+https://github.com/rust-lang/crates.io-index" 30 | 31 | [[package]] 32 | name = "bytes" 33 | version = "0.4.6" 34 | source = "registry+https://github.com/rust-lang/crates.io-index" 35 | dependencies = [ 36 | "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 37 | "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 38 | ] 39 | 40 | [[package]] 41 | name = "cc" 42 | version = "1.0.4" 43 | source = "registry+https://github.com/rust-lang/crates.io-index" 44 | 45 | [[package]] 46 | name = "cfg-if" 47 | version = "0.1.2" 48 | source = "registry+https://github.com/rust-lang/crates.io-index" 49 | 50 | [[package]] 51 | name = "context" 52 | version = "2.0.1" 53 | source = "registry+https://github.com/rust-lang/crates.io-index" 54 | dependencies = [ 55 | "cc 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", 56 | "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", 57 | "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", 58 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 59 | ] 60 | 61 | [[package]] 62 | name = "corona" 63 | version = "0.3.1" 64 | dependencies = [ 65 | "context 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", 66 | "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", 67 | "tokio-core 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", 68 | ] 69 | 70 | [[package]] 71 | name = "crossbeam" 72 | version = "0.3.2" 73 | source = "registry+https://github.com/rust-lang/crates.io-index" 74 | 75 | [[package]] 76 | name = "fuchsia-zircon" 77 | version = "0.3.3" 78 | source = "registry+https://github.com/rust-lang/crates.io-index" 79 | dependencies = [ 80 | "bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", 81 | "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", 82 | ] 83 | 84 | [[package]] 85 | name = "fuchsia-zircon-sys" 86 | version = "0.3.3" 87 | source = "registry+https://github.com/rust-lang/crates.io-index" 88 | 89 | [[package]] 90 | name = "futures" 91 | version = "0.1.18" 92 | source = "registry+https://github.com/rust-lang/crates.io-index" 93 | 94 | [[package]] 95 | name = "futures-await" 96 | version = "0.1.0" 97 | source = "registry+https://github.com/rust-lang/crates.io-index" 98 | dependencies = [ 99 | "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", 100 | "futures-await-async-macro 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 101 | "futures-await-await-macro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 102 | ] 103 | 104 | [[package]] 105 | name = "futures-await-async-macro" 106 | version = "0.1.1" 107 | source = "registry+https://github.com/rust-lang/crates.io-index" 108 | dependencies = [ 109 | "futures-await-quote 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 110 | "futures-await-syn 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", 111 | "futures-await-synom 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", 112 | "proc-macro2 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", 113 | ] 114 | 115 | [[package]] 116 | name = "futures-await-await-macro" 117 | version = "0.1.0" 118 | source = "registry+https://github.com/rust-lang/crates.io-index" 119 | 120 | [[package]] 121 | name = "futures-await-quote" 122 | version = "0.4.0" 123 | source = "registry+https://github.com/rust-lang/crates.io-index" 124 | dependencies = [ 125 | "proc-macro2 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", 126 | ] 127 | 128 | [[package]] 129 | name = "futures-await-syn" 130 | version = "0.12.0" 131 | source = "registry+https://github.com/rust-lang/crates.io-index" 132 | dependencies = [ 133 | "futures-await-quote 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 134 | "futures-await-synom 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", 135 | "proc-macro2 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", 136 | "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 137 | ] 138 | 139 | [[package]] 140 | name = "futures-await-synom" 141 | version = "0.12.0" 142 | source = "registry+https://github.com/rust-lang/crates.io-index" 143 | dependencies = [ 144 | "futures-await-quote 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 145 | "proc-macro2 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", 146 | ] 147 | 148 | [[package]] 149 | name = "futures-cpupool" 150 | version = "0.1.8" 151 | source = "registry+https://github.com/rust-lang/crates.io-index" 152 | dependencies = [ 153 | "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", 154 | "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", 155 | ] 156 | 157 | [[package]] 158 | name = "generator" 159 | version = "0.6.5" 160 | source = "registry+https://github.com/rust-lang/crates.io-index" 161 | dependencies = [ 162 | "cc 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", 163 | "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", 164 | "rustc_version 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 165 | ] 166 | 167 | [[package]] 168 | name = "iovec" 169 | version = "0.1.2" 170 | source = "registry+https://github.com/rust-lang/crates.io-index" 171 | dependencies = [ 172 | "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", 173 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 174 | ] 175 | 176 | [[package]] 177 | name = "kernel32-sys" 178 | version = "0.2.2" 179 | source = "registry+https://github.com/rust-lang/crates.io-index" 180 | dependencies = [ 181 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 182 | "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 183 | ] 184 | 185 | [[package]] 186 | name = "lazy_static" 187 | version = "1.0.0" 188 | source = "registry+https://github.com/rust-lang/crates.io-index" 189 | 190 | [[package]] 191 | name = "lazycell" 192 | version = "0.6.0" 193 | source = "registry+https://github.com/rust-lang/crates.io-index" 194 | 195 | [[package]] 196 | name = "libc" 197 | version = "0.2.36" 198 | source = "registry+https://github.com/rust-lang/crates.io-index" 199 | 200 | [[package]] 201 | name = "log" 202 | version = "0.3.9" 203 | source = "registry+https://github.com/rust-lang/crates.io-index" 204 | dependencies = [ 205 | "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", 206 | ] 207 | 208 | [[package]] 209 | name = "log" 210 | version = "0.4.1" 211 | source = "registry+https://github.com/rust-lang/crates.io-index" 212 | dependencies = [ 213 | "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 214 | ] 215 | 216 | [[package]] 217 | name = "may" 218 | version = "0.2.0" 219 | source = "registry+https://github.com/rust-lang/crates.io-index" 220 | dependencies = [ 221 | "crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", 222 | "generator 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", 223 | "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", 224 | "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", 225 | "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", 226 | "may_queue 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 227 | "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 228 | "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", 229 | "nix 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", 230 | "rustc_version 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 231 | "smallvec 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", 232 | "time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", 233 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 234 | ] 235 | 236 | [[package]] 237 | name = "may_queue" 238 | version = "0.1.1" 239 | source = "registry+https://github.com/rust-lang/crates.io-index" 240 | dependencies = [ 241 | "rustc_version 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 242 | "smallvec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", 243 | ] 244 | 245 | [[package]] 246 | name = "mio" 247 | version = "0.6.12" 248 | source = "registry+https://github.com/rust-lang/crates.io-index" 249 | dependencies = [ 250 | "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", 251 | "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", 252 | "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 253 | "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", 254 | "lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", 255 | "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", 256 | "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", 257 | "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 258 | "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", 259 | "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", 260 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 261 | ] 262 | 263 | [[package]] 264 | name = "miow" 265 | version = "0.2.1" 266 | source = "registry+https://github.com/rust-lang/crates.io-index" 267 | dependencies = [ 268 | "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", 269 | "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", 270 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 271 | "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 272 | ] 273 | 274 | [[package]] 275 | name = "net2" 276 | version = "0.2.31" 277 | source = "registry+https://github.com/rust-lang/crates.io-index" 278 | dependencies = [ 279 | "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 280 | "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", 281 | "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", 282 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 283 | "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 284 | ] 285 | 286 | [[package]] 287 | name = "nix" 288 | version = "0.9.0" 289 | source = "registry+https://github.com/rust-lang/crates.io-index" 290 | dependencies = [ 291 | "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", 292 | "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 293 | "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", 294 | "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", 295 | ] 296 | 297 | [[package]] 298 | name = "num_cpus" 299 | version = "1.8.0" 300 | source = "registry+https://github.com/rust-lang/crates.io-index" 301 | dependencies = [ 302 | "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", 303 | ] 304 | 305 | [[package]] 306 | name = "proc-macro2" 307 | version = "0.1.10" 308 | source = "registry+https://github.com/rust-lang/crates.io-index" 309 | dependencies = [ 310 | "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 311 | ] 312 | 313 | [[package]] 314 | name = "redox_syscall" 315 | version = "0.1.37" 316 | source = "registry+https://github.com/rust-lang/crates.io-index" 317 | 318 | [[package]] 319 | name = "rustc_version" 320 | version = "0.2.1" 321 | source = "registry+https://github.com/rust-lang/crates.io-index" 322 | dependencies = [ 323 | "semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", 324 | ] 325 | 326 | [[package]] 327 | name = "scoped-tls" 328 | version = "0.1.0" 329 | source = "registry+https://github.com/rust-lang/crates.io-index" 330 | 331 | [[package]] 332 | name = "semver" 333 | version = "0.6.0" 334 | source = "registry+https://github.com/rust-lang/crates.io-index" 335 | dependencies = [ 336 | "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", 337 | ] 338 | 339 | [[package]] 340 | name = "semver-parser" 341 | version = "0.7.0" 342 | source = "registry+https://github.com/rust-lang/crates.io-index" 343 | 344 | [[package]] 345 | name = "slab" 346 | version = "0.3.0" 347 | source = "registry+https://github.com/rust-lang/crates.io-index" 348 | 349 | [[package]] 350 | name = "slab" 351 | version = "0.4.0" 352 | source = "registry+https://github.com/rust-lang/crates.io-index" 353 | 354 | [[package]] 355 | name = "smallvec" 356 | version = "0.4.4" 357 | source = "registry+https://github.com/rust-lang/crates.io-index" 358 | 359 | [[package]] 360 | name = "smallvec" 361 | version = "0.6.0" 362 | source = "registry+https://github.com/rust-lang/crates.io-index" 363 | 364 | [[package]] 365 | name = "time" 366 | version = "0.1.39" 367 | source = "registry+https://github.com/rust-lang/crates.io-index" 368 | dependencies = [ 369 | "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", 370 | "redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)", 371 | "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", 372 | ] 373 | 374 | [[package]] 375 | name = "tokio-core" 376 | version = "0.1.12" 377 | source = "registry+https://github.com/rust-lang/crates.io-index" 378 | dependencies = [ 379 | "bytes 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", 380 | "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", 381 | "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 382 | "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", 383 | "mio 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", 384 | "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 385 | "slab 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 386 | "tokio-io 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", 387 | ] 388 | 389 | [[package]] 390 | name = "tokio-io" 391 | version = "0.1.4" 392 | source = "registry+https://github.com/rust-lang/crates.io-index" 393 | dependencies = [ 394 | "bytes 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", 395 | "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", 396 | "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", 397 | ] 398 | 399 | [[package]] 400 | name = "unicode-xid" 401 | version = "0.1.0" 402 | source = "registry+https://github.com/rust-lang/crates.io-index" 403 | 404 | [[package]] 405 | name = "void" 406 | version = "1.0.2" 407 | source = "registry+https://github.com/rust-lang/crates.io-index" 408 | 409 | [[package]] 410 | name = "winapi" 411 | version = "0.2.8" 412 | source = "registry+https://github.com/rust-lang/crates.io-index" 413 | 414 | [[package]] 415 | name = "winapi" 416 | version = "0.3.4" 417 | source = "registry+https://github.com/rust-lang/crates.io-index" 418 | dependencies = [ 419 | "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 420 | "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 421 | ] 422 | 423 | [[package]] 424 | name = "winapi-build" 425 | version = "0.1.1" 426 | source = "registry+https://github.com/rust-lang/crates.io-index" 427 | 428 | [[package]] 429 | name = "winapi-i686-pc-windows-gnu" 430 | version = "0.4.0" 431 | source = "registry+https://github.com/rust-lang/crates.io-index" 432 | 433 | [[package]] 434 | name = "winapi-x86_64-pc-windows-gnu" 435 | version = "0.4.0" 436 | source = "registry+https://github.com/rust-lang/crates.io-index" 437 | 438 | [[package]] 439 | name = "ws2_32-sys" 440 | version = "0.2.1" 441 | source = "registry+https://github.com/rust-lang/crates.io-index" 442 | dependencies = [ 443 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 444 | "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 445 | ] 446 | 447 | [metadata] 448 | "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" 449 | "checksum bitflags 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c30d3802dfb7281680d6285f2ccdaa8c2d8fee41f93805dba5c4cf50dc23cf" 450 | "checksum byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "652805b7e73fada9d85e9a6682a4abd490cb52d96aeecc12e33a0de34dfd0d23" 451 | "checksum bytes 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "1b7db437d718977f6dc9b2e3fd6fc343c02ac6b899b73fdd2179163447bd9ce9" 452 | "checksum cc 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "deaf9ec656256bb25b404c51ef50097207b9cbb29c933d31f92cae5a8a0ffee0" 453 | "checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de" 454 | "checksum context 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "36a20941c83c641dcb891f6810d60d68bd7345207471b47191fee81a57145110" 455 | "checksum crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24ce9782d4d5c53674646a6a4c1863a21a8fc0cb649b3c94dfc16e45071dea19" 456 | "checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" 457 | "checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" 458 | "checksum futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "0bab5b5e94f5c31fc764ba5dd9ad16568aae5d4825538c01d6bca680c9bf94a7" 459 | "checksum futures-await 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "84431acb0f168d02bd7727ad9fa385ff877e46d6018efad17ca509ae3bf5457c" 460 | "checksum futures-await-async-macro 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2868c9550459b113f8a656bd8f665bcdcfffb794e3fe5fbeaf5734325d18c2b5" 461 | "checksum futures-await-await-macro 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b7adba18b51fd888a24f6bd41c85e4f544a7089b15f84242350c014f9fdbf895" 462 | "checksum futures-await-quote 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f310765f0efc9c12ffb4887ca48d87a71c44ba531d9ba23055681a879f98ab75" 463 | "checksum futures-await-syn 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "90dcf027151897fe50205762efdec791be0e1e2a018d0ae077f72aa0abbf947f" 464 | "checksum futures-await-synom 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c84de0ba04a67d7fc0fb4e3218ba539da65890549922d9d2bc874ba6240030" 465 | "checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" 466 | "checksum generator 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "58793854b56599595bce36c0efa63ce12fcf5827ca7eaead71af94deba0d0dac" 467 | "checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08" 468 | "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" 469 | "checksum lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c8f31047daa365f19be14b47c29df4f7c3b581832407daabe6ae77397619237d" 470 | "checksum lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a6f08839bc70ef4a3fe1d566d5350f519c5912ea86be0df1740a7d247c7fc0ef" 471 | "checksum libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "1e5d97d6708edaa407429faa671b942dc0f2727222fb6b6539bf1db936e4b121" 472 | "checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" 473 | "checksum log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "89f010e843f2b1a31dbd316b3b8d443758bc634bed37aabade59c686d644e0a2" 474 | "checksum may 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cd5fa68cc3eaa221765cc0289dcb27d0dcb39b02d8c5178ab4bd849529f22f3c" 475 | "checksum may_queue 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e82cff13b5ba8afed5ee7a9fdfbc277023c4af09a59596315acef9e33e31f204" 476 | "checksum mio 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "75f72a93f046f1517e3cfddc0a096eb756a2ba727d36edc8227dee769a50a9b0" 477 | "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" 478 | "checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09" 479 | "checksum nix 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a2c5afeb0198ec7be8569d666644b574345aad2e95a53baf3a532da3e0f3fb32" 480 | "checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30" 481 | "checksum proc-macro2 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "557facecbf90ff79faea80a08230d10c812016aa19198ed07d06de61f965b5cc" 482 | "checksum redox_syscall 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "0d92eecebad22b767915e4d529f89f28ee96dbbf5a4810d2b844373f136417fd" 483 | "checksum rustc_version 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b9743a7670d88d5d52950408ecdb7c71d8986251ab604d4689dd2ca25c9bca69" 484 | "checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d" 485 | "checksum semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" 486 | "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" 487 | "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" 488 | "checksum slab 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fdeff4cd9ecff59ec7e3744cbca73dfe5ac35c2aedb2cfba8a1c715a18912e9d" 489 | "checksum smallvec 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ee4f357e8cd37bf8822e1b964e96fd39e2cb5a0424f8aaa284ccaccc2162411c" 490 | "checksum smallvec 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44db0ecb22921ef790d17ae13a3f6d15784183ff5f2a01aa32098c7498d2b4b9" 491 | "checksum time 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "a15375f1df02096fb3317256ce2cee6a1f42fc84ea5ad5fc8c421cfe40c73098" 492 | "checksum tokio-core 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "52b4e32d8edbf29501aabb3570f027c6ceb00ccef6538f4bddba0200503e74e8" 493 | "checksum tokio-io 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "514aae203178929dbf03318ad7c683126672d4d96eccb77b29603d33c9e25743" 494 | "checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" 495 | "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" 496 | "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" 497 | "checksum winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "04e3bd221fcbe8a271359c04f21a76db7d0c6028862d1bb5512d85e1e2eb5bb3" 498 | "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" 499 | "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 500 | "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 501 | "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" 502 | -------------------------------------------------------------------------------- /src/coroutine.rs: -------------------------------------------------------------------------------- 1 | //! The [`Coroutine`][::coroutine::Coroutine] and related things. 2 | 3 | use std::any::Any; 4 | use std::cell::RefCell; 5 | use std::panic::{self, AssertUnwindSafe, UnwindSafe}; 6 | 7 | use context::Context; 8 | use context::stack::{Stack, ProtectedFixedSizeStack}; 9 | use futures::{Async, Future, Poll}; 10 | use futures::unsync::oneshot::{self, Receiver}; 11 | 12 | use errors::{Dropped, StackError, TaskFailed}; 13 | use switch::{Switch, WaitTask}; 14 | 15 | enum TaskResult { 16 | Panicked(Box), 17 | PanicPropagated, 18 | Lost, 19 | Finished(R), 20 | } 21 | 22 | /// A `Future` representing a completion of a coroutine. 23 | /// 24 | /// Returns the result of the task the coroutine runs or the reason why it failed (it got lost 25 | /// during shutdown or panicked). 26 | pub struct CoroutineResult { 27 | receiver: Receiver>, 28 | } 29 | 30 | impl Future for CoroutineResult { 31 | type Item = R; 32 | type Error = TaskFailed; 33 | fn poll(&mut self) -> Poll { 34 | match self.receiver.poll() { 35 | Ok(Async::NotReady) => Ok(Async::NotReady), 36 | Ok(Async::Ready(TaskResult::Finished(result))) => Ok(Async::Ready(result)), 37 | Ok(Async::Ready(TaskResult::Panicked(reason))) => Err(TaskFailed::Panicked(reason)), 38 | Ok(Async::Ready(TaskResult::PanicPropagated)) => Err(TaskFailed::PanicPropagated), 39 | Ok(Async::Ready(TaskResult::Lost)) | Err(_) => Err(TaskFailed::Lost), 40 | } 41 | } 42 | } 43 | 44 | /// Controls how a cleanup happens if the driving executor is dropped while a coroutine lives. 45 | /// 46 | /// If an executor is dropped and there is a coroutine that haven't finished yet, there's no chance 47 | /// for it to make further progress and it becomes orphaned. The question is how to go about 48 | /// cleaning it up, as there's a stack allocated somewhere, full of data that may need a destructor 49 | /// run. 50 | /// 51 | /// Some primitives for waiting for a future resolution in the coroutine return an error in such 52 | /// case, but most of them panic ‒ panicking in the stack does exactly what is needed. 53 | /// 54 | /// However, there are problems when the executor is dropped due to a panic itself ‒ then this would 55 | /// double panic and abort the whole program. 56 | /// 57 | /// This describes a strategy taken for the cleaning up of a coroutine (configured with 58 | /// [`Coroutine::cleanup_strategy`](struct.Coroutine.html#method.cleanup_strategy). 59 | #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 60 | pub enum CleanupStrategy { 61 | /// Always perform cleanup. 62 | /// 63 | /// No matter the situation, perform a cleanup ‒ return the error or panic. This is the 64 | /// default. 65 | CleanupAlways, 66 | /// Cleanup the coroutine unless it is already panicking. 67 | /// 68 | /// This opts out to *not* clean up the coroutine and leak the data on its stack in case a 69 | /// panic is already happening. This prevents the double-panic problem, but doesn't free some 70 | /// resources or run destructors (this may lead to files not being flushed, for example). 71 | /// 72 | /// This is probably a good idea when panics lead to program termination anyway ‒ that way a 73 | /// better error message is output if only one panic happens. 74 | LeakOnPanic, 75 | /// Do not perform any cleanup. 76 | /// 77 | /// This is suitable if you never drop the executor until the end of the program. This doesn't 78 | /// perform the cleanup at all. This may lead to destructors not being run, which may lead to 79 | /// effects like files not being flushed. 80 | /// 81 | /// It may also be appropriate if your application is compiled with `panic = "abort"`. 82 | LeakAlways, 83 | /// Perform a cleanup in normal situation, abort if already panicking. 84 | /// 85 | /// This is similar to how `CleanupAlways` acts, but instead of relying on the double-panic to 86 | /// abort the program, do the abort right away. 87 | AbortOnPanic, 88 | /// Abort on any attempt to drop a living coroutine. 89 | /// 90 | /// This is mostly for completeness sake, but it may make some sense if you're sure the 91 | /// executor is never dropped while coroutines live. 92 | AbortAlways, 93 | } 94 | 95 | struct CoroutineContext { 96 | /// The context that called us and we'll switch back to it when we wait for something. 97 | parent_context: Context, 98 | /// Our own stack. We keep ourselves alive. 99 | stack: ProtectedFixedSizeStack, 100 | /// How do we clean up the coroutine if it doesn't end before dropping the core? 101 | cleanup_strategy: CleanupStrategy, 102 | } 103 | 104 | thread_local! { 105 | static CONTEXTS: RefCell> = RefCell::new(Vec::new()); 106 | static BUILDER: RefCell = RefCell::new(Coroutine::new()); 107 | } 108 | 109 | /// A builder of coroutines. 110 | /// 111 | /// This struct is the main entry point and a way to start coroutines of various kinds. It allows 112 | /// both starting them with default parameters and configuring them with the builder pattern. 113 | #[derive(Clone, Debug)] 114 | pub struct Coroutine { 115 | stack_size: usize, 116 | cleanup_strategy: CleanupStrategy, 117 | } 118 | 119 | impl Coroutine { 120 | /// Starts building a coroutine. 121 | /// 122 | /// This constructor produces a new builder for coroutines. The builder can then be used to 123 | /// specify configuration of the coroutines. 124 | /// 125 | /// It is possible to spawn multiple coroutines from the same builder. 126 | /// 127 | /// # Examples 128 | /// 129 | /// ``` 130 | /// # extern crate corona; 131 | /// # extern crate tokio; 132 | /// use corona::Coroutine; 133 | /// use tokio::prelude::*; 134 | /// use tokio::runtime::current_thread; 135 | /// 136 | /// # fn main() { 137 | /// Coroutine::new() 138 | /// .run(|| {}) 139 | /// .unwrap(); 140 | /// # } 141 | /// 142 | /// ``` 143 | pub fn new() -> Self { 144 | Coroutine { 145 | stack_size: Stack::default_size(), 146 | cleanup_strategy: CleanupStrategy::CleanupAlways, 147 | } 148 | } 149 | 150 | /// Configures the stack size used for coroutines. 151 | /// 152 | /// Coroutines spawned from this builder will get stack of this size. The default is something 153 | /// small, so if you use recursion, you might want to use this. 154 | /// 155 | /// The stack size might be rounded up to a valid platform-dependent stack size (usually the 156 | /// nearest multiple of 4096). 157 | /// 158 | /// It is possible the stack size would still be invalid after this rounding up on given 159 | /// platform (eg. too large). In such case, attempts to spawn coroutines will fail with error. 160 | /// 161 | /// # Parameters 162 | /// 163 | /// * `size`: The stack size to use for newly spawned coroutines, in bytes. 164 | pub fn stack_size(&mut self, size: usize) -> &mut Self { 165 | self.stack_size = size; 166 | self 167 | } 168 | 169 | /// Configures how the coroutines should be cleaned up if the core is dropped before the 170 | /// coroutine resolves. 171 | /// 172 | /// See the details of [`CleanupStrategy`](enum.CleanupStrategy.html). 173 | pub fn cleanup_strategy(&mut self, strategy: CleanupStrategy) -> &mut Self { 174 | self.cleanup_strategy = strategy; 175 | self 176 | } 177 | 178 | /// Spawns a coroutine directly. 179 | /// 180 | /// This constructor spawns a coroutine with default parameters without the inconvenience of 181 | /// handling a builder. It is equivalent to spawning it with an unconfigured builder. 182 | /// 183 | /// Unlike the [`spawn`](#method.spawn.html), this one can't fail, since the default parameters 184 | /// of the builder are expected to always work (if they don't, file a bug). 185 | /// 186 | /// # Parameters 187 | /// 188 | /// * `task`: The closure to run inside the coroutine. 189 | /// 190 | /// # Returns 191 | /// 192 | /// A future that'll resolve once the coroutine completes, with the result of the `task`, or 193 | /// with an error explaining why the coroutine failed. 194 | /// 195 | /// # Examples 196 | /// 197 | /// ```rust 198 | /// # extern crate corona; 199 | /// # extern crate tokio; 200 | /// use corona::Coroutine; 201 | /// use tokio::prelude::*; 202 | /// use tokio::runtime::current_thread; 203 | /// 204 | /// # fn main() { 205 | /// current_thread::block_on_all(future::lazy(|| { 206 | /// Coroutine::with_defaults(|| {}) 207 | /// })).unwrap(); 208 | /// # } 209 | /// 210 | /// ``` 211 | pub fn with_defaults(task: Task) -> CoroutineResult 212 | where 213 | R: 'static, 214 | Task: FnOnce() -> R + 'static, 215 | { 216 | Coroutine::new().spawn(task).unwrap() 217 | } 218 | 219 | /// The inner workings of `spawn` and `spawn_catch_panic`. 220 | fn spawn_inner(&self, task: Task, propagate_panic: bool) 221 | -> Result, StackError> 222 | where 223 | R: 'static, 224 | Task: FnOnce() -> R + UnwindSafe + 'static, 225 | { 226 | let (sender, receiver) = oneshot::channel(); 227 | 228 | let cleanup_strategy = self.cleanup_strategy; 229 | 230 | let perform = move |context, stack| { 231 | let my_context = CoroutineContext { 232 | parent_context: context, 233 | stack, 234 | cleanup_strategy, 235 | }; 236 | CONTEXTS.with(|c| c.borrow_mut().push(my_context)); 237 | let mut panic_result = None; 238 | let result = match panic::catch_unwind(AssertUnwindSafe(task)) { 239 | Ok(res) => TaskResult::Finished(res), 240 | Err(panic) => { 241 | if panic.is::() { 242 | TaskResult::Lost 243 | } else if propagate_panic { 244 | panic_result = Some(panic); 245 | TaskResult::PanicPropagated 246 | } else { 247 | TaskResult::Panicked(panic) 248 | } 249 | }, 250 | }; 251 | // We are not interested in errors. They just mean the receiver is no longer 252 | // interested, which is fine by us. 253 | drop(sender.send(result)); 254 | let my_context = CONTEXTS.with(|c| c.borrow_mut().pop().unwrap()); 255 | (my_context.parent_context, my_context.stack, panic_result) 256 | }; 257 | Switch::run_new_coroutine(self.stack_size, Box::new(Some(perform)))?; 258 | 259 | Ok(CoroutineResult { receiver }) 260 | } 261 | 262 | /// Spawns a coroutine with configuration from the builder. 263 | /// 264 | /// This spawns a new coroutine with the values previously set in the builder. 265 | /// 266 | /// # Parameters 267 | /// 268 | /// * `task`: The closure to run inside the coroutine. 269 | /// 270 | /// # Returns 271 | /// 272 | /// A future that'll resolve once the coroutine terminates and will yield the result of 273 | /// `task`, or an error explaining why the coroutine failed. 274 | /// 275 | /// This returns a `StackError` if the configured stack size is invalid. 276 | /// 277 | /// # Examples 278 | /// 279 | /// ```rust 280 | /// # extern crate corona; 281 | /// # extern crate tokio; 282 | /// use corona::Coroutine; 283 | /// use tokio::prelude::*; 284 | /// use tokio::runtime::current_thread; 285 | /// 286 | /// # fn main() { 287 | /// Coroutine::new() 288 | /// .stack_size(40_960) 289 | /// .run(|| { }) 290 | /// .unwrap(); 291 | /// # } 292 | /// ``` 293 | /// 294 | /// # Panic handling 295 | /// 296 | /// If the coroutine panics, the panic is propagated. This usually means the executor, unless 297 | /// the panic happens before the first suspension point, in which case it is the `spawn` itself 298 | /// which panics. 299 | pub fn spawn(&self, task: Task) -> Result, StackError> 300 | where 301 | R: 'static, 302 | Task: FnOnce() -> R + 'static, 303 | { 304 | // That AssertUnwindSafe is OK. We just pause the panic, teleport it to the callers thread 305 | // and then let it continue. 306 | self.spawn_inner(AssertUnwindSafe(task), true) 307 | } 308 | 309 | /// Spawns a coroutine, preventing the panics in it from killing the parent task. 310 | /// 311 | /// This is just like [spawn](#method.spawn), but any panic in the coroutine is captured and 312 | /// returned through the result instead of propagating. 313 | /// 314 | /// Note that you need to ensure the `task` is [unwind 315 | /// safe](https://doc.rust-lang.org/std/panic/trait.UnwindSafe.html) for that reason. 316 | pub fn spawn_catch_panic(&self, task: Task) -> Result, StackError> 317 | where 318 | R: 'static, 319 | Task: FnOnce() -> R + UnwindSafe + 'static, 320 | { 321 | self.spawn_inner(task, false) 322 | } 323 | 324 | /// Waits for completion of a future. 325 | /// 326 | /// This suspends the execution of the current coroutine until the provided future is 327 | /// completed, possibly switching to other coroutines in the meantime. 328 | /// 329 | /// This is the low-level implementation of the waiting. It is expected user code will use the 330 | /// interface in [`prelude`](../prelude/index.html) instead. 331 | /// 332 | /// # Parameters 333 | /// 334 | /// * `fut`: The future to wait on. 335 | /// 336 | /// # Returns 337 | /// 338 | /// * `Ok(result)` with the result the future resolved to (which is in itself a `Result`). 339 | /// * `Err(Dropped)` when the executor was dropped before the future had a chance to resolve. 340 | /// 341 | /// # Panics 342 | /// 343 | /// If called outside of a coroutine (there's nothing to suspend). 344 | /// 345 | /// Also, panics from within the provided future are propagated into the calling coroutine. 346 | pub fn wait(mut fut: Fut) -> Result, Dropped> 347 | where 348 | Fut: Future, 349 | { 350 | // Grimoire marginalia (eg. a sidenote on the magic here). 351 | // 352 | // This is probably the hearth of the library both in the importance and complexity to 353 | // understand. We want to wait for the future to finish. 354 | // 355 | // To do that we do the following: 356 | // • Prepare a space for the result on our own stack. 357 | // • Prepare a wrapper future that'll do some bookkeeping around the user's future ‒ for 358 | // example makes sure the wrapper future has the same signature and can be spawned onto 359 | // the reactor. 360 | // • Switch to our parent context with the instruction to install the future for us into 361 | // the reactor. 362 | // 363 | // Some time later, as the reactor runs, the future resolves. It'll do the following: 364 | // • Store the result into the prepared space on our stack. 365 | // • Switch the context back to us. 366 | // • This function resumes, picks ups the result from its stack and returns it. 367 | // 368 | // There are few unsafe blocks here, some of them looking a bit dangerous. So, some 369 | // rationale why this should be in fact safe. 370 | // 371 | // The handle.spawn() requires a 'static future. It is because the future will almost 372 | // certainly live longer than the stack frame that spawned it onto the reactor. Therefore, 373 | // the future must own anything it'll touch in some unknown later time. 374 | // 375 | // However, this is true in our case. The closure that runs in a coroutine is required to 376 | // be 'static. Therefore, anything non-'static must live on the coroutine's stack. And the 377 | // future has the only pointer to the stack of this coroutine, therefore effectively owns 378 | // the stack and everything on it. 379 | // 380 | // In other words, the stack is there for as long as the future waits idle in the reactor 381 | // and won't go away before the future either resolves or is dropped. There's a small trick 382 | // in the `drop` implementation and the future itself to ensure this is true even when 383 | // switching the contexts (it is true when we switch to this coroutine, but not after we 384 | // leave it, so the future's implementation must not touch the things afterwards. 385 | let my_context = CONTEXTS.with(|c| { 386 | c.borrow_mut().pop().expect("Can't wait outside of a coroutine") 387 | }); 388 | let mut result: Option> = None; 389 | let (reply_instruction, context) = { 390 | // Shenanigans to make the closure pretend to be 'static to the compiler. 391 | let res_ref = &mut result as *mut _ as usize; 392 | let fut_ref = &mut fut as *mut _ as usize; 393 | 394 | let mut poll = move || { 395 | let fut = fut_ref as *mut Fut; 396 | let res = match unsafe { fut.as_mut() }.unwrap().poll() { 397 | Ok(Async::NotReady) => return Ok(Async::NotReady), 398 | Ok(Async::Ready(ok)) => Ok(ok), 399 | Err(err) => Err(err), 400 | }; 401 | let result = res_ref as *mut Option>; 402 | // Inject the result inside the place on the stack. 403 | unsafe { *result = Some(res) }; 404 | Ok(Async::Ready(())) 405 | }; 406 | let task = WaitTask { 407 | poll: &mut poll, 408 | context: None, 409 | cleanup_strategy: my_context.cleanup_strategy, 410 | stack: Some(my_context.stack), 411 | }; 412 | let instruction = Switch::WaitFuture { task }; 413 | instruction.exchange(my_context.parent_context) 414 | }; 415 | let (result, stack) = match reply_instruction { 416 | Switch::Resume { stack } => (Ok(Ok(result.unwrap())), stack), 417 | Switch::Cleanup { stack } => (Ok(Err(Dropped)), stack), 418 | Switch::PropagateFuturePanic { stack, panic } => (Err(panic), stack), 419 | _ => unreachable!("Invalid instruction on wakeup"), 420 | }; 421 | // Reconstruct our context anew after we switched back. 422 | let new_context = CoroutineContext { 423 | parent_context: context, 424 | stack, 425 | cleanup_strategy: my_context.cleanup_strategy, 426 | }; 427 | CONTEXTS.with(|c| c.borrow_mut().push(new_context)); 428 | match result { 429 | Ok(result) => result, 430 | Err(panic) => panic::resume_unwind(panic), 431 | } 432 | } 433 | 434 | /// Checks if the current configuration is able spawn coroutines. 435 | /// 436 | /// The ability of a configuration is deterministic on the given system. Therefore, it is 437 | /// possible to check if further coroutines spawned by this builder would succeed. This does 438 | /// the check. 439 | pub fn verify(&self) -> Result<(), StackError> { 440 | // Try to spawn empty coroutine. That'd create the stack, etc, but there are no suspension 441 | // points in it, so it's safe even outside of the executor. 442 | self.spawn(|| ()).map(|_| ()) 443 | } 444 | 445 | /// Puts the builder into a thread-local storage. 446 | /// 447 | /// This first verifies (see [`verify`](#method.verify)) the configuration in the builder is 448 | /// usable and then sets it into a thread-local storage. 449 | /// 450 | /// The thread-local storage is then used by the [`spawn`](fn.spawn.html) stand-alone function 451 | /// (in contrast to the [`spawn`](#method.spawn) method). 452 | /// 453 | /// If the verification fails, the original value is preserved. If not called, the thread-local 454 | /// storage contains a default configuration created by [`Coroutine::new`](#method.new). 455 | pub fn set_thread_local(&self) -> Result<(), StackError> { 456 | self.verify()?; 457 | BUILDER.with(|builder| builder.replace(self.clone())); 458 | Ok(()) 459 | } 460 | 461 | /// Gets a copy of the builder in thread-local storage. 462 | /// 463 | /// This may help if you want to use the same builder as [`spawn`](fn.spawn.html) does, but you 464 | /// want to do something more fancy, like [`spawn_catch_panic`](#method.spawn_catch_panic). 465 | pub fn from_thread_local() -> Self { 466 | BUILDER.with(|builder| builder.borrow().clone()) 467 | } 468 | 469 | /// Starts a whole runtime and waits for a main coroutine. 470 | /// 471 | /// While it is possible to create the `tokio::runtime::current_thread::Runtime` manually, feed 472 | /// it with a lazy future and then run a coroutine inside it (or reuse the runtime when 473 | /// something else creates it), this method is provided to take care of all these things, 474 | /// making it more convenient. 475 | /// 476 | /// In addition to starting the main coroutine passed to it, it sets the coroutine builder into 477 | /// a thread-local storage (see [`set_thread_local`](#method.set_thread_local). 478 | /// 479 | /// ```rust 480 | /// extern crate corona; 481 | /// extern crate tokio; 482 | /// 483 | /// use corona::prelude::*; 484 | /// use corona::spawn; 485 | /// use tokio::prelude::*; 486 | /// 487 | /// let result = Coroutine::new() 488 | /// // 40kB of stack size for all the coroutines. 489 | /// .stack_size(40960) 490 | /// .run(|| { 491 | /// // Everything (builder in thread local storage, coroutine, tokio runtime) is set up 492 | /// // in here. 493 | /// future::ok::<(), ()>(()).coro_wait(); 494 | /// let sub_coroutine = spawn(|| { 495 | /// 42 496 | /// }); 497 | /// sub_coroutine.coro_wait().unwrap() 498 | /// }).unwrap(); 499 | /// assert_eq!(42, result); 500 | /// ``` 501 | #[cfg(feature = "convenient-run")] 502 | pub fn run(&self, task: Task) -> Result 503 | where 504 | R: 'static, 505 | Task: FnOnce() -> R + 'static, 506 | { 507 | self.set_thread_local()?; 508 | let result = ::tokio::runtime::current_thread::block_on_all(::futures::future::lazy(|| { 509 | spawn(task) 510 | })).expect("Lost a coroutine when waiting for all of them"); 511 | Ok(result) 512 | } 513 | } 514 | 515 | impl Default for Coroutine { 516 | fn default() -> Self { 517 | Self::new() 518 | } 519 | } 520 | 521 | /// Spawns a new coroutine. 522 | /// 523 | /// This is very similar to [`Coroutine::spawn`](struct.Coroutine.html#method.spawn), but it uses 524 | /// the coroutine builder in thread local storage (see 525 | /// [`Coroutine::set_thread_local`](struct.Coroutine.html#method.set_thread_local)). This exists 526 | /// merely as a convenience. 527 | pub fn spawn(task: Task) -> CoroutineResult 528 | where 529 | R: 'static, 530 | Task: FnOnce() -> R + 'static, 531 | { 532 | BUILDER.with(|builder| builder.borrow().spawn(task)) 533 | .expect("Unverified builder in thread local storage") 534 | } 535 | 536 | #[cfg(test)] 537 | mod tests { 538 | use std::sync::atomic::{AtomicBool, Ordering}; 539 | use std::rc::Rc; 540 | use std::time::Duration; 541 | 542 | use futures::future; 543 | use tokio::clock; 544 | use tokio::prelude::*; 545 | use tokio::runtime::current_thread::{self, Runtime}; 546 | use tokio::timer::Delay; 547 | 548 | use super::*; 549 | 550 | /// Test spawning and execution of tasks. 551 | #[test] 552 | fn spawn_some() { 553 | let s1 = Rc::new(AtomicBool::new(false)); 554 | let s2 = Rc::new(AtomicBool::new(false)); 555 | let s1c = s1.clone(); 556 | let s2c = s2.clone(); 557 | 558 | let mut builder = Coroutine::new(); 559 | builder.stack_size(40960); 560 | let builder_inner = builder.clone(); 561 | 562 | let result = builder.spawn(move || { 563 | let result = builder_inner.spawn(move || { 564 | s2c.store(true, Ordering::Relaxed); 565 | 42 566 | }) 567 | .unwrap(); 568 | s1c.store(true, Ordering::Relaxed); 569 | result 570 | }) 571 | .unwrap(); 572 | 573 | // Both coroutines run to finish 574 | assert!(s1.load(Ordering::Relaxed), "The outer closure didn't run"); 575 | assert!(s2.load(Ordering::Relaxed), "The inner closure didn't run"); 576 | // The result gets propagated through. 577 | let extract = result.and_then(|r| r); 578 | assert_eq!(42, current_thread::block_on_all(extract).unwrap()); 579 | } 580 | 581 | #[test] 582 | fn coroutine_run() { 583 | let result = Coroutine::new().run(|| { 584 | Coroutine::wait(future::ok::<(), ()>(())).unwrap().unwrap(); 585 | 42 586 | }).unwrap(); 587 | assert_eq!(42, result); 588 | } 589 | 590 | /// Wait for a future to complete. 591 | #[test] 592 | fn future_wait() { 593 | let result = Coroutine::new().run(|| { 594 | let (sender, receiver) = oneshot::channel(); 595 | let all_done = Coroutine::with_defaults(move || { 596 | let msg = Coroutine::wait(receiver).unwrap().unwrap(); 597 | msg 598 | }); 599 | Coroutine::with_defaults(move || { 600 | let timeout = Delay::new(clock::now() + Duration::from_millis(50)); 601 | Coroutine::wait(timeout).unwrap().unwrap(); 602 | sender.send(42).unwrap(); 603 | }); 604 | Coroutine::wait(all_done).unwrap().unwrap() 605 | }); 606 | assert_eq!(42, result.unwrap()); 607 | } 608 | 609 | /// The panic doesn't kill the main thread, but is reported. 610 | #[test] 611 | fn panics_catch() { 612 | let mut rt = Runtime::new().unwrap(); 613 | let catch_panic = future::lazy(|| { 614 | Coroutine::new().spawn_catch_panic(|| panic!("Test")).unwrap() 615 | }); 616 | match rt.block_on(catch_panic) { 617 | Err(TaskFailed::Panicked(_)) => (), 618 | _ => panic!("Panic not reported properly"), 619 | } 620 | assert_eq!(42, rt.block_on(future::lazy(|| Coroutine::with_defaults(|| 42))).unwrap()); 621 | } 622 | 623 | /// However, normal coroutines do panic. 624 | #[test] 625 | #[should_panic] 626 | fn panics_spawn() { 627 | let _ = Coroutine::new().run(|| { 628 | spawn(|| panic!("Test")) 629 | }); 630 | } 631 | 632 | /// This one panics and the panic is propagated, but after suspension point it is out of run. 633 | #[test] 634 | fn panics_run() { 635 | panic::catch_unwind(|| { 636 | current_thread::block_on_all(future::lazy(|| { 637 | Coroutine::with_defaults(|| { 638 | let _ = Coroutine::wait(future::ok::<(), ()>(())); 639 | panic!("Test"); 640 | }) 641 | })) 642 | }).unwrap_err(); 643 | } 644 | 645 | /// It's impossible to wait on a future outside of a coroutine 646 | #[test] 647 | #[should_panic] 648 | fn panic_without_coroutine() { 649 | drop(Coroutine::wait(future::ok::<_, ()>(42))); 650 | } 651 | 652 | /// Tests leaking instead of double-panicking. This is tested simply by the tests not crashing 653 | /// hard. 654 | #[test] 655 | fn panic_leak() { 656 | panic::catch_unwind(|| current_thread::block_on_all(future::lazy(|| -> Result<(), ()> { 657 | let _coroutine = Coroutine::new() 658 | .cleanup_strategy(CleanupStrategy::LeakOnPanic) 659 | .spawn(|| { 660 | let _ = Coroutine::wait(future::empty::<(), ()>()); 661 | panic!("Should never get here!"); 662 | }) 663 | .unwrap(); 664 | panic!("Test"); 665 | }))).unwrap_err(); 666 | /* 667 | * FIXME: This doesn't work as intended. The sender gets leaked too, so it is never closed 668 | * and we don't get the Lost case. Any way to make sure we get it? 669 | if let Err(TaskFailed::Lost) = coroutine.wait() { 670 | // OK, correct error 671 | } else { 672 | panic!("Coroutine didn't get lost correctly"); 673 | } 674 | */ 675 | } 676 | 677 | /// Tests unconditional leaking on dropping the core. We panic in the destructor in there, so 678 | /// that checks it is not called. 679 | #[test] 680 | fn leak_always() { 681 | let mut rt = Runtime::new().unwrap(); 682 | // This leaves the coroutine in there 683 | let _ = rt.block_on(future::lazy(|| { 684 | Coroutine::new() 685 | .cleanup_strategy(CleanupStrategy::LeakAlways) 686 | .spawn(|| { 687 | struct Destroyer; 688 | impl Drop for Destroyer { 689 | fn drop(&mut self) { 690 | panic!("Destructor called"); 691 | } 692 | } 693 | let _d = Destroyer; 694 | let _ = Coroutine::wait(future::empty::<(), ()>()); 695 | }) 696 | .unwrap(); 697 | Ok::<(), ()>(()) 698 | })); 699 | drop(rt); 700 | } 701 | 702 | /// A panic from inside the future doesn't kill the core, but falls out of the wait into the 703 | /// responsible coroutine. 704 | /// 705 | /// We test this separately because the future is „exported“ to the main coroutine to be 706 | /// polled. So we check it doesn't kill the core. 707 | #[test] 708 | fn panic_in_future() { 709 | current_thread::block_on_all(future::lazy(|| { 710 | Coroutine::with_defaults(|| { 711 | struct PanicFuture; 712 | impl Future for PanicFuture { 713 | type Item = (); 714 | type Error = (); 715 | fn poll(&mut self) -> Poll<(), ()> { 716 | panic!("Test"); 717 | } 718 | } 719 | 720 | if let Ok(_) = panic::catch_unwind(|| Coroutine::wait(PanicFuture)) { 721 | panic!("A panic should fall out of wait"); 722 | } 723 | }) 724 | })).unwrap(); 725 | } 726 | } 727 | --------------------------------------------------------------------------------