├── .gitignore ├── .github └── workflows │ └── ci.yml ├── src ├── page_size.rs ├── fiber │ ├── windows.rs │ ├── alloc.rs │ ├── mod.rs │ └── mmap.rs ├── arch │ ├── windows.c │ ├── x86_64.S │ ├── aarch64.S │ ├── x86.s │ ├── riscv64.s │ └── wasm32.s ├── lib.rs ├── future.rs └── generator.rs ├── Cargo.toml ├── examples ├── generator.rs └── read.rs └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /Cargo.lock 3 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | strategy: 15 | matrix: 16 | os: [ubuntu-latest, macos-latest] 17 | runs-on: ${{ matrix.os }} 18 | steps: 19 | - uses: actions/checkout@v2 20 | - name: Test 21 | run: cargo test --verbose 22 | -------------------------------------------------------------------------------- /src/page_size.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0); 4 | 5 | pub fn get() -> usize { 6 | let mut ret = PAGE_SIZE.load(Ordering::Relaxed); 7 | if ret == 0 { 8 | ret = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }; 9 | assert!(ret >= 4096, "page size must be no smaller than 4KiB"); 10 | PAGE_SIZE.store(ret, Ordering::Relaxed); 11 | } 12 | ret 13 | } 14 | -------------------------------------------------------------------------------- /src/fiber/windows.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | pub struct Stack(usize); 4 | 5 | extern "C" { 6 | fn fiber_create() -> usize; 7 | fn fiber_destroy(fiber: usize); 8 | } 9 | 10 | impl Stack { 11 | pub fn allocate() -> Self { 12 | Self(unsafe { fiber_create() }) 13 | } 14 | 15 | pub fn bottom(&self) -> usize { 16 | // FIXME: Get the actual stack bottom 17 | 0 18 | } 19 | 20 | pub fn top(&self) -> StackPointer { 21 | unsafe { StackPointer(NonZeroUsize::new_unchecked(self.0)) } 22 | } 23 | } 24 | 25 | impl Drop for Stack { 26 | fn drop(&mut self) { 27 | unsafe { 28 | fiber_destroy(self.0); 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/fiber/alloc.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | pub struct Stack(usize); 4 | 5 | impl Stack { 6 | pub fn allocate() -> Self { 7 | Self(unsafe { 8 | std::alloc::alloc(std::alloc::Layout::from_size_align(0x200000, 16).unwrap()) as usize 9 | }) 10 | } 11 | 12 | pub fn bottom(&self) -> usize { 13 | self.0 14 | } 15 | 16 | pub fn top(&self) -> StackPointer { 17 | unsafe { StackPointer(NonZeroUsize::new_unchecked(self.0 + 0x200000)) } 18 | } 19 | } 20 | 21 | impl Drop for Stack { 22 | fn drop(&mut self) { 23 | unsafe { 24 | std::alloc::dealloc( 25 | self.0 as *mut u8, 26 | std::alloc::Layout::from_size_align(0x200000, 16).unwrap(), 27 | ); 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "stackful" 3 | version = "0.1.4" 4 | authors = ["Gary Guo "] 5 | edition = "2018" 6 | license = "MIT OR Apache-2.0" 7 | repository = "https://github.com/nbdd0121/stackful" 8 | description = "Bridge between sync and async" 9 | 10 | [dependencies] 11 | futures-executor = { version = "0.3.5", optional = true } 12 | stacker = { git = "https://github.com/nbdd0121/stacker.git", optional = true } 13 | 14 | [target.'cfg(not(any(target_arch = "wasm32", windows)))'.dependencies] 15 | libc = "0.2" 16 | 17 | [build-dependencies] 18 | cc = "1.0" 19 | 20 | [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] 21 | async-std = { version = "1.6", features = ["unstable"] } 22 | 23 | [dev-dependencies] 24 | futures = "0.3.5" 25 | byteorder = "1.3" 26 | 27 | [features] 28 | future = ["futures-executor"] 29 | nightly = [] 30 | default = ["future"] 31 | -------------------------------------------------------------------------------- /examples/generator.rs: -------------------------------------------------------------------------------- 1 | use stackful::generator::*; 2 | use std::pin::Pin; 3 | 4 | fn main() { 5 | let mut gen = StackfulGenerator::new(|y: &YieldHandle, mut r: i32| { 6 | for i in 0..100 { 7 | assert_eq!(r, i); 8 | r = y.yeet(i); 9 | } 10 | 11 | // Test yield cross nested generators. 12 | let mut gen2 = StackfulGenerator::new(|_: &YieldHandle<(), ()>, ()| { 13 | assert_eq!(r, 100); 14 | r = y.yeet(100); 15 | }); 16 | assert!(matches!( 17 | Pin::new(&mut gen2).resume(()), 18 | GeneratorState::Complete(()) 19 | ),); 20 | drop(gen2); 21 | 22 | assert_eq!(r, 1000); 23 | 1000 24 | }); 25 | let mut gen = Pin::new(&mut gen); 26 | 27 | for i in 0..101 { 28 | println!("{:?}", gen.as_mut().resume(i)); 29 | } 30 | assert!(matches!( 31 | gen.as_mut().resume(1000), 32 | GeneratorState::Complete(1000) 33 | )); 34 | } 35 | -------------------------------------------------------------------------------- /src/fiber/mod.rs: -------------------------------------------------------------------------------- 1 | use core::num::NonZeroUsize; 2 | 3 | #[cfg(not(any(target_arch = "wasm32", windows)))] 4 | mod mmap; 5 | #[cfg(not(any(target_arch = "wasm32", windows)))] 6 | pub use mmap::*; 7 | 8 | #[cfg(target_arch = "wasm32")] 9 | mod alloc; 10 | #[cfg(target_arch = "wasm32")] 11 | pub use alloc::*; 12 | 13 | #[cfg(windows)] 14 | mod windows; 15 | #[cfg(windows)] 16 | pub use windows::*; 17 | 18 | #[repr(transparent)] 19 | #[derive(Clone, Copy)] 20 | pub struct StackPointer(pub NonZeroUsize); 21 | 22 | #[repr(C)] 23 | #[derive(Clone, Copy)] 24 | pub struct SwitchResult { 25 | pub stack: Option, 26 | pub payload: usize, 27 | } 28 | 29 | extern "C" { 30 | pub fn fiber_enter( 31 | stack: StackPointer, 32 | payload: usize, 33 | f: extern "C" fn(StackPointer, payload: usize) -> !, 34 | ) -> SwitchResult; 35 | pub fn fiber_switch_enter(stack: StackPointer, payload: usize) -> SwitchResult; 36 | pub fn fiber_switch_leave(stack: StackPointer, payload: usize) -> SwitchResult; 37 | } 38 | -------------------------------------------------------------------------------- /examples/read.rs: -------------------------------------------------------------------------------- 1 | use async_std::io::Read as AsyncRead; 2 | use async_std::prelude::*; 3 | use byteorder::{ReadBytesExt, LE}; 4 | use stackful::{stackful, wait}; 5 | use std::io::Read; 6 | use std::marker::Unpin; 7 | 8 | struct Sync(T); 9 | 10 | impl Read for Sync 11 | where 12 | T: AsyncRead + Unpin, 13 | { 14 | fn read(&mut self, buf: &mut [u8]) -> std::io::Result { 15 | wait(self.0.read(buf)) 16 | } 17 | } 18 | 19 | async fn process(stream: &mut (dyn AsyncRead + Unpin)) -> u32 { 20 | stackful(|| { 21 | let mut sync = Sync(stream); 22 | // Note that this will recursively call into `read` function will 23 | // calls `wait` to await the future. 24 | sync.read_u32::().unwrap() 25 | // This is just an example, can be complex processing, zipping, etc. 26 | // If you are calling into a FFI library that uses a callback, you 27 | // can even `wait()` from that callback and turn the whole FFI library 28 | // into async! 29 | }) 30 | .await 31 | } 32 | 33 | fn main() { 34 | async_std::task::block_on(async { 35 | async_std::task::spawn_local(async { 36 | // This is just an example, can be any AsyncRead stream 37 | let mut stream: &[u8] = &[0xef, 0xbe, 0xad, 0xde]; 38 | println!("{:x}", process(&mut stream).await); 39 | }) 40 | .await; 41 | }); 42 | } 43 | -------------------------------------------------------------------------------- /src/arch/windows.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | __declspec(thread) int active_fibers = 0; 5 | __declspec(thread) void *switching_fiber = 0; 6 | __declspec(thread) void *switching_payload = 0; 7 | 8 | typedef struct { 9 | void *fiber; 10 | void *payload; 11 | } switch_result; 12 | 13 | typedef void (*fiber_func)(void*, void*); 14 | 15 | struct enter_payload { 16 | fiber_func func; 17 | void *actual_payload; 18 | }; 19 | 20 | static void fiber_proc(void *param) { 21 | (void)param; 22 | struct enter_payload *payload = (struct enter_payload *)switching_payload; 23 | payload->func(switching_fiber, payload->actual_payload); 24 | } 25 | 26 | static switch_result fiber_switch(void *fiber, void *payload) { 27 | switching_fiber = GetCurrentFiber(); 28 | switching_payload = payload; 29 | SwitchToFiber(fiber); 30 | switch_result ret = { 31 | .fiber = switching_fiber, 32 | .payload = switching_payload, 33 | }; 34 | return ret; 35 | } 36 | 37 | void* fiber_create() { 38 | if (active_fibers == 0) { 39 | ConvertThreadToFiber(0); 40 | } 41 | active_fibers += 1; 42 | return CreateFiber(0x200000, fiber_proc, 0); 43 | } 44 | 45 | void fiber_destroy(void *fiber) { 46 | active_fibers -= 1; 47 | DeleteFiber(fiber); 48 | 49 | if (active_fibers == 0) { 50 | ConvertFiberToThread(); 51 | } 52 | } 53 | 54 | switch_result fiber_enter(void *fiber, void *payload, fiber_func func) { 55 | struct enter_payload enter_payload = { 56 | .func = func, 57 | .actual_payload = payload, 58 | }; 59 | return fiber_switch(fiber, &enter_payload); 60 | } 61 | 62 | switch_result fiber_switch_enter(void *fiber, void *payload) { 63 | return fiber_switch(fiber, payload); 64 | } 65 | 66 | switch_result fiber_switch_leave(void *fiber, void *payload) { 67 | return fiber_switch(fiber, payload); 68 | } 69 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # stackful 2 | 3 | [![](https://img.shields.io/crates/v/stackful.svg)](https://crates.io/crates/stackful) 4 | [![](https://docs.rs/stackful/badge.svg)](https://docs.rs/stackful/) 5 | [![Build Status](https://github.com/nbdd0121/stackful/workflows/CI/badge.svg)](https://github.com/nbdd0121/stackful/actions) 6 | 7 | `stackful` attempts to bridge sync and async and blur the difference between them. 8 | 9 | It allows you to easily convert between them with two supplied function `wait` and `stackful`. 10 | It can be quitely useful if you are using a library that only provides sync interface on top of 11 | async IO. 12 | 13 | More details can be found in the docs or the source code. 14 | 15 | ## Example 16 | 17 | ```Rust 18 | use async_std::io::Read as AsyncRead; 19 | use async_std::prelude::*; 20 | use byteorder::{ReadBytesExt, LE}; 21 | use stackful::{stackful, wait}; 22 | use std::io::Read; 23 | use std::marker::Unpin; 24 | 25 | struct Sync(T); 26 | 27 | impl Read for Sync 28 | where 29 | T: AsyncRead + Unpin, 30 | { 31 | fn read(&mut self, buf: &mut [u8]) -> std::io::Result { 32 | wait(self.0.read(buf)) 33 | } 34 | } 35 | 36 | async fn process(stream: &mut (dyn AsyncRead + Unpin)) -> u32 { 37 | stackful(|| { 38 | let mut sync = Sync(stream); 39 | // Note that this will recursively call into `read` function will 40 | // calls `wait` to await the future. 41 | sync.read_u32::().unwrap() 42 | // This is just an example, can be complex processing, zipping, etc. 43 | // If you are calling into a FFI library that uses a callback, you 44 | // can even `wait()` from that callback and turn the whole FFI library 45 | // into async! 46 | }) 47 | .await 48 | } 49 | 50 | fn main() { 51 | async_std::task::block_on(async { 52 | async_std::task::spawn_local(async { 53 | // This is just an example, can be any AsyncRead stream 54 | let mut stream: &[u8] = &[0xef, 0xbe, 0xad, 0xde]; 55 | println!("{:x}", process(&mut stream).await); 56 | }) 57 | .await; 58 | }); 59 | } 60 | ``` 61 | -------------------------------------------------------------------------------- /src/fiber/mmap.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use crate::page_size; 3 | 4 | use std::ptr; 5 | use std::sync::atomic::{AtomicUsize, Ordering}; 6 | 7 | pub struct Stack(usize); 8 | 9 | // Keep a stack so that repeated fiber calls don't require new allocation. 10 | static STACK_CACHE: AtomicUsize = AtomicUsize::new(0); 11 | 12 | impl Stack { 13 | pub fn allocate() -> Self { 14 | // Before allocating, first check the cache. 15 | let stack = STACK_CACHE.swap(0, Ordering::Relaxed); 16 | if stack != 0 { 17 | return Self(stack); 18 | } 19 | 20 | #[cfg(not(target_os = "macos"))] 21 | use libc::MAP_STACK; 22 | #[cfg(target_os = "macos")] 23 | const MAP_STACK: libc::c_int = 0; 24 | 25 | unsafe { 26 | // Allocate stack 27 | let ptr = libc::mmap( 28 | ptr::null_mut(), 29 | 0x200000, 30 | libc::PROT_READ | libc::PROT_WRITE, 31 | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS | MAP_STACK, 32 | -1, 33 | 0, 34 | ); 35 | if ptr.is_null() { 36 | panic!("failed to allocate stack"); 37 | } 38 | 39 | // Guard page to avoid stack overflow 40 | let page_size = page_size::get(); 41 | let ret = libc::mprotect(ptr, page_size, libc::PROT_NONE); 42 | if ret != 0 { 43 | panic!("failed to allocated stack"); 44 | } 45 | 46 | Self(ptr as usize) 47 | } 48 | } 49 | 50 | #[allow(unused)] 51 | pub fn bottom(&self) -> usize { 52 | self.0 53 | } 54 | 55 | pub fn top(&self) -> StackPointer { 56 | unsafe { StackPointer(NonZeroUsize::new_unchecked(self.0 + 0x200000)) } 57 | } 58 | } 59 | 60 | impl Drop for Stack { 61 | fn drop(&mut self) { 62 | // Before freeing, first check the cache. 63 | if STACK_CACHE 64 | .compare_exchange(0, self.0, Ordering::Relaxed, Ordering::Relaxed) 65 | .is_ok() 66 | { 67 | return; 68 | } 69 | 70 | unsafe { libc::munmap(self.0 as _, 0x200000) }; 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # Bridge between sync and async 2 | //! 3 | //! `stackful` is a minimalistic library that allows you to convert between sync code and async 4 | //! freely. 5 | //! 6 | //! A major issue of Rust's `async` is that it is very intrusive: it pretty much requires a whole 7 | //! program rewrite to convert synchronous code to async, and it would require many libraries to 8 | //! support both variants. 9 | //! 10 | //! Currently, we could use `async_std::task::spawn_blocking` or equivalents to convert blocking 11 | //! IO into async IO using thread pools, or use `async_std::task::block_on` to use an async library 12 | //! in blocking code. But it is still quite problematic if you want to use a synchronous middleware 13 | //! inside an generally asynchronous program. 14 | //! 15 | //! `stackful` aims to blur the boundary by using stackful coroutines to allow arbitary yield 16 | //! points within other-wise synchronous code. 17 | //! 18 | //! ## Usage 19 | //! Use `wait` to convert an async value to a sync value, similar to `.await` or `block_on`. 20 | //! 21 | //! ``` 22 | //! # use std::time::Duration; 23 | //! use stackful::wait; 24 | //! # async_std::task::block_on(stackful::stackful(|| { 25 | //! wait(async_std::task::sleep(Duration::from_secs(1))); 26 | //! # })); 27 | //! ``` 28 | //! 29 | //! Use `stackful` to convert a synchronous function into a `Future`: 30 | //! ``` 31 | //! use stackful::stackful; 32 | //! # async_std::task::spawn(async { 33 | //! async_std::task::spawn_local(stackful(|| { 34 | //! // Synchronous code 35 | //! // This shouldn't block, however 36 | //! })); 37 | //! # }); 38 | //! ``` 39 | //! You can combine these functions, note that we seamlessly handle nested functions: 40 | //! ``` 41 | //! # use std::time::Duration; 42 | //! use stackful::{stackful, wait}; 43 | //! 44 | //! fn maybe_sleep(dur: Option) { 45 | //! // This is not possible under `async`! 46 | //! dur.map(|x| wait(async_std::task::sleep(x))); 47 | //! } 48 | //! 49 | //! # async_std::task::spawn(async { 50 | //! async_std::task::spawn_local(async { 51 | //! stackful(|| maybe_sleep(Some(Duration::from_secs(1)))).await 52 | //! }); 53 | //! # }); 54 | //! ``` 55 | 56 | #![cfg_attr(feature = "nightly", feature(generator_trait))] 57 | 58 | #[cfg(not(any(target_arch = "wasm32", windows)))] 59 | mod page_size; 60 | 61 | mod fiber; 62 | pub mod generator; 63 | 64 | #[cfg(feature = "future")] 65 | pub mod future; 66 | #[cfg(feature = "future")] 67 | #[doc(inline)] 68 | pub use future::{stackful, wait}; 69 | -------------------------------------------------------------------------------- /src/arch/x86_64.S: -------------------------------------------------------------------------------- 1 | .intel_syntax noprefix 2 | 3 | # Save all non-volatile registers on stack and return. 4 | fiber_save_raw: 5 | pop rax 6 | push rbx 7 | push rbp 8 | push r12 9 | push r13 10 | push r14 11 | push r15 12 | sub rsp, 8 13 | stmxcsr [rsp] 14 | fnstcw [rsp + 4] 15 | push rax 16 | ret 17 | 18 | # Restore all non-volatile registers and return 19 | fiber_restore_ret_raw: 20 | fldcw [rsp + 4] 21 | ldmxcsr [rsp] 22 | add rsp, 8 23 | pop r15 24 | pop r14 25 | pop r13 26 | pop r12 27 | pop rbp 28 | pop rbx 29 | ret 30 | 31 | # fiber_enter: fn(StackPointer, usize, fn(StackPointer, usize) -> !) -> SwitchResult 32 | # Enter a fresh stack and call the supplied function 33 | #ifndef __apple_build_version__ 34 | .global fiber_enter 35 | .type fiber_enter, @function 36 | fiber_enter: 37 | #else 38 | .global _fiber_enter 39 | _fiber_enter: 40 | #endif 41 | .cfi_startproc 42 | # Top of the fresh stack, we use these to store the last function that 43 | # calls fiber_enter/fiber_switch_enter so that the stack trace can continue 44 | # past this function. 45 | sub rdi, 16 46 | mov rax, [rsp] 47 | mov [rdi + 8], rax 48 | mov rax, rsp 49 | add rax, 8 50 | mov [rdi], rax 51 | 52 | call fiber_save_raw 53 | 54 | # Switch stack 55 | xchg rsp, rdi 56 | 57 | # CFI metadata to instruct unwinder to find our saved info at the top of stack. 58 | .cfi_def_cfa rsp, 16 59 | .cfi_offset rsp, -16 60 | .cfi_offset rip, -8 61 | 62 | # Save the top-of-stack address in old stack frame; otherwise this will be lost after a switch 63 | mov [rdi - 8], rsp 64 | 65 | call rdx 66 | ud2 67 | #ifndef __apple_build_version__ 68 | .size fiber_enter, .-fiber_enter 69 | #endif 70 | .cfi_endproc 71 | 72 | # fiber_switch_enter: fn(StackPointer, usize) -> SwitchResult 73 | .global fiber_switch_enter 74 | .global _fiber_switch_enter 75 | fiber_switch_enter: 76 | _fiber_switch_enter: 77 | # Extract the saved top-of-stack address 78 | mov rcx, [rdi - 8] 79 | 80 | # Fill the address with new caller info for a proper stack trace 81 | mov rax, [rsp] 82 | mov [rcx + 8], rax 83 | mov rax, rsp 84 | add rax, 8 85 | mov [rcx], rax 86 | 87 | call fiber_save_raw 88 | 89 | # Switch stack 90 | mov rax, rsp 91 | mov rsp, rdi 92 | mov rdx, rsi 93 | 94 | # Save the top-of-stack address in old stack frame again. 95 | mov [rax - 8], rcx 96 | 97 | jmp fiber_restore_ret_raw 98 | 99 | # fiber_switch_leave: fn(StackPointer, usize) -> SwitchResult 100 | .global fiber_switch_leave 101 | .global _fiber_switch_leave 102 | fiber_switch_leave: 103 | _fiber_switch_leave: 104 | call fiber_save_raw 105 | 106 | # Extract the saved top-of-stack address 107 | mov rcx, [rdi - 8] 108 | 109 | # Switch stack 110 | mov rax, rsp 111 | mov rsp, rdi 112 | mov rdx, rsi 113 | 114 | # Save the top-of-stack address 115 | mov [rax - 8], rcx 116 | 117 | jmp fiber_restore_ret_raw 118 | -------------------------------------------------------------------------------- /src/arch/aarch64.S: -------------------------------------------------------------------------------- 1 | # Save all non-volatile registers on stack and return. 2 | fiber_save_raw: 3 | sub sp, sp, 0xA0 4 | stp d8, d9, [sp, 0x60] 5 | stp d10, d11, [sp, 0x70] 6 | stp d12, d13, [sp, 0x80] 7 | stp d14, d15, [sp, 0x90] 8 | # x9 is the saved lr 9 | stp x9, x19, [sp, 0x00] 10 | stp x20, x21, [sp, 0x10] 11 | stp x22, x23, [sp, 0x20] 12 | stp x24, x25, [sp, 0x30] 13 | stp x26, x27, [sp, 0x40] 14 | stp x28, x29, [sp, 0x50] 15 | ret 16 | 17 | # Restore all non-volatile registers and return 18 | fiber_restore_ret_raw: 19 | ldp d8, d9, [sp, 0x70] 20 | ldp d10, d11, [sp, 0x80] 21 | ldp d12, d13, [sp, 0x90] 22 | ldp d14, d15, [sp, 0xA0] 23 | ldp x30, x19, [sp, 0x00] 24 | ldp x20, x21, [sp, 0x10] 25 | ldp x22, x23, [sp, 0x20] 26 | ldp x24, x25, [sp, 0x30] 27 | ldp x26, x27, [sp, 0x40] 28 | ldp x28, x29, [sp, 0x50] 29 | add sp, sp, 0xA0 30 | ret 31 | 32 | # fiber_enter: fn(StackPointer, usize, fn(StackPointer, usize) -> !) -> SwitchResult 33 | # Enter a fresh stack and call the supplied function 34 | #ifndef __apple_build_version__ 35 | .global fiber_enter 36 | .type fiber_enter, @function 37 | fiber_enter: 38 | #else 39 | .global _fiber_enter 40 | _fiber_enter: 41 | #endif 42 | .cfi_startproc 43 | # Top of the fresh stack, we use these to store the last function that 44 | # calls fiber_enter/fiber_switch_enter so that the stack trace can continue 45 | # past this function. 46 | sub x0, x0, 0x10 47 | mov x9, sp 48 | stp x9, x30, [x0] 49 | 50 | mov x9, x30 51 | bl fiber_save_raw 52 | 53 | # Switch stack and enter 54 | mov x9, x0 55 | mov x0, sp 56 | mov sp, x9 57 | 58 | # CFI metadata to instruct unwinder to find our saved info at the top of stack. 59 | .cfi_def_cfa sp, 16 60 | .cfi_offset sp, -16 61 | .cfi_offset x30, -8 62 | 63 | # Save the top-of-stack address in old stack frame; otherwise this will be lost after a switch 64 | str x9, [x0, -8] 65 | 66 | blr x2 67 | brk 1 68 | #ifndef __apple_build_version__ 69 | .size fiber_enter, .-fiber_enter 70 | #endif 71 | .cfi_endproc 72 | 73 | # fiber_switch_enter: fn(StackPointer, usize) -> SwitchResult 74 | .global fiber_switch_enter 75 | .global _fiber_switch_enter 76 | fiber_switch_enter: 77 | _fiber_switch_enter: 78 | # Extract the saved top-of-stack address 79 | ldr x3, [x0, -8] 80 | 81 | # Fill the address with new caller info for a proper stack trace 82 | mov x9, sp 83 | stp x9, x30, [x3] 84 | 85 | mov x9, x30 86 | bl fiber_save_raw 87 | 88 | # Switch stack 89 | mov x9, x0 90 | mov x0, sp 91 | mov sp, x9 92 | 93 | # Save the top-of-stack address in old stack frame again. 94 | str x3, [x0, -8] 95 | 96 | b fiber_restore_ret_raw 97 | 98 | # fiber_switch_leave: fn(StackPointer, usize) -> SwitchResult 99 | .global fiber_switch_leave 100 | .global _fiber_switch_leave 101 | fiber_switch_leave: 102 | _fiber_switch_leave: 103 | mov x9, x30 104 | bl fiber_save_raw 105 | 106 | # Extract the saved top-of-stack address 107 | ldr x3, [x0, -8] 108 | 109 | # Switch stack 110 | mov x9, x0 111 | mov x0, sp 112 | mov sp, x9 113 | 114 | # Save the top-of-stack address 115 | str x3, [x0, -8] 116 | 117 | b fiber_restore_ret_raw 118 | -------------------------------------------------------------------------------- /src/arch/x86.s: -------------------------------------------------------------------------------- 1 | .intel_syntax noprefix 2 | 3 | # Save all non-volatile registers on stack and return. 4 | fiber_save_raw: 5 | pop eax 6 | push ebx 7 | push ebp 8 | push esi 9 | push edi 10 | sub esp, 8 11 | stmxcsr [esp] 12 | fnstcw [esp + 4] 13 | # For easier reference to arguments from the caller. 14 | lea ebp, [esp + 24] 15 | # Load the first argument to EDX 16 | mov edx, [ebp + 8] 17 | push eax 18 | ret 19 | 20 | # Restore all non-volatile registers and return 21 | fiber_restore_ret_raw: 22 | fldcw [esp + 4] 23 | ldmxcsr [esp] 24 | add esp, 8 25 | pop edi 26 | pop esi 27 | pop ebp 28 | pop ebx 29 | # x86 has this annoying pass on stack convention, and when return value 30 | # is aggregate, the pointer to the return value is passed to the callee 31 | # on stack, making it a double-indirection. 32 | # 33 | # We let this subroutine absorb the complexity so the code below can 34 | # return using EAX:EDX. 35 | mov ecx, [esp + 4] 36 | mov [ecx], eax 37 | mov [ecx + 4], edx 38 | ret 4 39 | 40 | # fiber_enter: fn(StackPointer, usize, fn(StackPointer, usize) -> !) -> SwitchResult 41 | # Enter a fresh stack and call the supplied function 42 | .global fiber_enter 43 | .global _fiber_enter 44 | .type fiber_enter, @function 45 | fiber_enter: 46 | _fiber_enter: 47 | .cfi_startproc 48 | call fiber_save_raw 49 | 50 | # Top of the fresh stack, we use these to store the last function that 51 | # calls fiber_enter/fiber_switch_enter so that the stack trace can continue 52 | # past this function. 53 | sub edx, 8 54 | mov eax, [ebp] 55 | mov [edx + 4], eax 56 | lea eax, [ebp + 4] 57 | mov [edx], eax 58 | 59 | # Switch stack 60 | mov eax, esp 61 | mov esp, edx 62 | 63 | # CFI metadata to instruct unwinder to find our saved info at the top of stack. 64 | .cfi_def_cfa esp, 16 65 | .cfi_offset esp, -8 66 | .cfi_offset eip, -4 67 | 68 | # Save the top-of-stack address in old stack frame; otherwise this will be lost after a switch 69 | mov [eax - 4], esp 70 | 71 | push [ebp + 12] 72 | push eax 73 | call dword ptr [ebp + 16] 74 | ud2 75 | .size fiber_enter, .-fiber_enter 76 | .cfi_endproc 77 | 78 | # fiber_switch_enter: fn(StackPointer, usize) -> SwitchResult 79 | .global fiber_switch_enter 80 | .global _fiber_switch_enter 81 | fiber_switch_enter: 82 | _fiber_switch_enter: 83 | call fiber_save_raw 84 | 85 | # Extract the saved top-of-stack address 86 | mov ecx, [edx - 4] 87 | 88 | # Fill the address with new caller info for a proper stack trace 89 | mov eax, [ebp] 90 | mov [ecx + 4], eax 91 | lea eax, [ebp + 4] 92 | mov [ecx], eax 93 | 94 | # Switch stack 95 | mov eax, esp 96 | mov esp, edx 97 | mov edx, [ebp + 12] 98 | 99 | # Save the top-of-stack address in old stack frame again. 100 | mov [eax - 4], ecx 101 | 102 | jmp fiber_restore_ret_raw 103 | 104 | # fiber_switch_leave: fn(StackPointer, usize) -> SwitchResult 105 | .global fiber_switch_leave 106 | .global _fiber_switch_leave 107 | fiber_switch_leave: 108 | _fiber_switch_leave: 109 | call fiber_save_raw 110 | 111 | # Extract the saved top-of-stack address 112 | mov ecx, [edx - 4] 113 | 114 | # Switch stack 115 | mov eax, esp 116 | mov esp, edx 117 | mov edx, [ebp + 12] 118 | 119 | # Save the top-of-stack address 120 | mov [eax - 4], ecx 121 | 122 | jmp fiber_restore_ret_raw 123 | -------------------------------------------------------------------------------- /src/arch/riscv64.s: -------------------------------------------------------------------------------- 1 | # Save all non-volatile registers on stack and return. 2 | fiber_save_raw: 3 | add sp, sp, -0xD0 4 | sd ra, 0x00(sp) 5 | sd s0, 0x08(sp) 6 | sd s1, 0x10(sp) 7 | sd s2, 0x18(sp) 8 | sd s3, 0x20(sp) 9 | sd s4, 0x28(sp) 10 | sd s5, 0x30(sp) 11 | sd s6, 0x38(sp) 12 | sd s7, 0x40(sp) 13 | sd s8, 0x48(sp) 14 | sd s9, 0x50(sp) 15 | sd s10, 0x58(sp) 16 | sd s11, 0x60(sp) 17 | fsd fs0, 0x68(sp) 18 | fsd fs1, 0x70(sp) 19 | fsd fs2, 0x78(sp) 20 | fsd fs3, 0x80(sp) 21 | fsd fs4, 0x88(sp) 22 | fsd fs5, 0x90(sp) 23 | fsd fs6, 0x98(sp) 24 | fsd fs7, 0xA0(sp) 25 | fsd fs8, 0xA8(sp) 26 | fsd fs9, 0xB0(sp) 27 | fsd fs10, 0xB8(sp) 28 | fsd fs11, 0xC0(sp) 29 | jr t0 30 | 31 | # Restore all non-volatile registers and return 32 | fiber_restore_ret_raw: 33 | ld ra, 0x00(sp) 34 | ld s0, 0x08(sp) 35 | ld s1, 0x10(sp) 36 | ld s2, 0x18(sp) 37 | ld s3, 0x20(sp) 38 | ld s4, 0x28(sp) 39 | ld s5, 0x30(sp) 40 | ld s6, 0x38(sp) 41 | ld s7, 0x40(sp) 42 | ld s8, 0x48(sp) 43 | ld s9, 0x50(sp) 44 | ld s10, 0x58(sp) 45 | ld s11, 0x60(sp) 46 | fld fs0, 0x68(sp) 47 | fld fs1, 0x70(sp) 48 | fld fs2, 0x78(sp) 49 | fld fs3, 0x80(sp) 50 | fld fs4, 0x88(sp) 51 | fld fs5, 0x90(sp) 52 | fld fs6, 0x98(sp) 53 | fld fs7, 0xA0(sp) 54 | fld fs8, 0xA8(sp) 55 | fld fs9, 0xB0(sp) 56 | fld fs10, 0xB8(sp) 57 | fld fs11, 0xC0(sp) 58 | add sp, sp, 0xD0 59 | ret 60 | 61 | # fiber_enter: fn(StackPointer, usize, fn(StackPointer, usize) -> !) -> SwitchResult 62 | # Enter a fresh stack and call the supplied function 63 | .global fiber_enter 64 | .type fiber_enter, @function 65 | fiber_enter: 66 | .cfi_startproc 67 | # Top of the fresh stack, we use these to store the last function that 68 | # calls fiber_enter/fiber_switch_enter so that the stack trace can continue 69 | # past this function. 70 | add a0, a0, -0x10 71 | sd sp, 0(a0) 72 | sd ra, 8(a0) 73 | 74 | jal t0, fiber_save_raw 75 | 76 | # Switch stack and enter 77 | mv t0, sp 78 | mv sp, a0 79 | mv a0, t0 80 | 81 | # CFI metadata to instruct unwinder to find our saved info at the top of stack. 82 | .cfi_def_cfa sp, 16 83 | .cfi_offset sp, -16 84 | .cfi_offset ra, -8 85 | 86 | # Save the top-of-stack address in old stack frame; otherwise this will be lost after a switch 87 | sd sp, -8(a0) 88 | 89 | jalr a2 90 | ebreak 91 | .size fiber_enter, .-fiber_enter 92 | .cfi_endproc 93 | 94 | # fiber_switch_enter: fn(StackPointer, usize) -> SwitchResult 95 | .global fiber_switch_enter 96 | fiber_switch_enter: 97 | # Extract the saved top-of-stack address 98 | ld t1, -8(a0) 99 | 100 | # Fill the address with new caller info for a proper stack trace 101 | sd sp, 0(t1) 102 | sd ra, 8(t1) 103 | 104 | jal t0, fiber_save_raw 105 | 106 | # Switch stack 107 | mv t0, sp 108 | mv sp, a0 109 | mv a0, t0 110 | 111 | # Save the top-of-stack address 112 | sd t1, -8(a0) 113 | 114 | j fiber_restore_ret_raw 115 | 116 | # fiber_switch_leave: fn(StackPointer, usize) -> SwitchResult 117 | .global fiber_switch_leave 118 | fiber_switch_leave: 119 | jal t0, fiber_save_raw 120 | 121 | # Extract the saved top-of-stack address 122 | ld t1, -8(a0) 123 | 124 | # Switch stack 125 | mv t0, sp 126 | mv sp, a0 127 | mv a0, t0 128 | 129 | # Save the top-of-stack address 130 | sd t1, -8(a0) 131 | 132 | j fiber_restore_ret_raw 133 | -------------------------------------------------------------------------------- /src/future.rs: -------------------------------------------------------------------------------- 1 | use crate::generator::*; 2 | 3 | use std::cell::Cell; 4 | use std::future::Future; 5 | use std::pin::Pin; 6 | use std::task::Poll; 7 | 8 | struct Context { 9 | parent: Cell>, 10 | yielder: Cell>>, 11 | panicking: Cell, 12 | ctx: *mut core::task::Context<'static>, 13 | } 14 | 15 | thread_local! { 16 | static CONTEXT: Cell> = Cell::new(None); 17 | } 18 | 19 | /// Wait for a future to complete and return its output. 20 | /// 21 | /// If the function is called directly or recursively from a closure passed to `stackful`, 22 | /// then the `Future` returned by `stackful` would return `Pending`. Otherwise the current 23 | /// thread would block until the future has been completed. 24 | pub fn wait(mut fut: impl Future) -> T { 25 | let mut context = match CONTEXT.with(|ctx| ctx.get()) { 26 | Some(v) => v, 27 | None => { 28 | // Not called from a fiber context, do a block_on instead. 29 | return futures_executor::block_on(fut); 30 | } 31 | }; 32 | loop { 33 | // SAFETY: This is safe because we don't move fut. 34 | if let Poll::Ready(val) = unsafe { Pin::new_unchecked(&mut fut) } 35 | .as_mut() 36 | .poll(unsafe { &mut *context.ctx }) 37 | { 38 | return val; 39 | } 40 | 41 | CONTEXT.with(|ctx| ctx.set(context.parent.take())); 42 | let yielder = context.yielder.get().unwrap(); 43 | 44 | struct PanicGuard; 45 | impl Drop for PanicGuard { 46 | fn drop(&mut self) { 47 | CONTEXT.with(|ctx| { 48 | let context = match ctx.get() { 49 | Some(v) => v, 50 | None => return, 51 | }; 52 | context.panicking.set(true) 53 | }); 54 | } 55 | } 56 | 57 | let guard = PanicGuard; 58 | context = yielder.yeet(()); 59 | core::mem::forget(guard); 60 | 61 | CONTEXT.with(|ctx| { 62 | context.parent.set(ctx.take()); 63 | context.yielder.set(Some(yielder)); 64 | ctx.set(Some(context)); 65 | }); 66 | } 67 | } 68 | 69 | pub struct StackfulFuture<'a, T> { 70 | generator: StackfulGenerator<'a, (), T, &'static Context>, 71 | } 72 | 73 | impl<'a, T> StackfulFuture<'a, T> { 74 | pub fn new(f: F) -> Self 75 | where 76 | F: FnOnce() -> T + 'a, 77 | { 78 | Self { 79 | generator: StackfulGenerator::new( 80 | move |y: &YieldHandle<(), &'static Context>, context: &'static Context| { 81 | CONTEXT.with(|ctx| { 82 | context.parent.set(ctx.take()); 83 | context.yielder.set(Some(unsafe { std::mem::transmute(y) })); 84 | ctx.set(Some(context)); 85 | }); 86 | 87 | struct ScopeGuard; 88 | impl Drop for ScopeGuard { 89 | fn drop(&mut self) { 90 | CONTEXT.with(|ctx| { 91 | let context = match ctx.get() { 92 | Some(v) => v, 93 | None => return, 94 | }; 95 | if context.panicking.get() { 96 | return; 97 | } 98 | let parent = context.parent.take(); 99 | ctx.set(parent); 100 | }); 101 | } 102 | } 103 | 104 | let _guard = ScopeGuard; 105 | f() 106 | }, 107 | ), 108 | } 109 | } 110 | } 111 | 112 | impl Future for StackfulFuture<'_, T> { 113 | type Output = T; 114 | 115 | fn poll(mut self: Pin<&mut Self>, cx: &mut core::task::Context<'_>) -> Poll { 116 | let ctx = Context { 117 | parent: Cell::new(None), 118 | yielder: Cell::new(None), 119 | panicking: Cell::new(false), 120 | ctx: unsafe { std::mem::transmute(cx) }, 121 | }; 122 | match Pin::new(&mut self.generator).resume(unsafe { std::mem::transmute(&ctx) }) { 123 | GeneratorState::Yielded(()) => Poll::Pending, 124 | GeneratorState::Complete(val) => Poll::Ready(val), 125 | } 126 | } 127 | } 128 | 129 | /// Turn a synchronous function into a `Future`. 130 | /// 131 | /// `stackful` can be paired with `wait` to allow async function to be used within a sync function 132 | /// and it can be nested arbitarily deep. 133 | pub async fn stackful T>(f: F) -> T { 134 | StackfulFuture::new(f).await 135 | } 136 | 137 | #[cfg(not(target_arch = "wasm32"))] 138 | #[test] 139 | #[should_panic] 140 | fn panick() { 141 | async_std::task::block_on(stackful(|| { 142 | wait(async_std::task::yield_now()); 143 | panic!(); 144 | })); 145 | } 146 | 147 | #[cfg(not(target_arch = "wasm32"))] 148 | #[test] 149 | fn drop_before_polling() { 150 | drop(stackful(|| { 151 | wait(async_std::task::yield_now()); 152 | })); 153 | } 154 | 155 | #[cfg(not(target_arch = "wasm32"))] 156 | #[test] 157 | fn drop_after_polling() { 158 | let waker = futures::task::noop_waker_ref(); 159 | let mut cx = core::task::Context::from_waker(waker); 160 | let mut fut = Box::pin(stackful(|| { 161 | wait(async_std::task::yield_now()); 162 | })); 163 | let _ = Pin::new(&mut fut).poll(&mut cx); 164 | drop(fut); 165 | assert!(CONTEXT.with(|ctx| ctx.get()).is_none()); 166 | } 167 | 168 | #[cfg(not(target_arch = "wasm32"))] 169 | #[test] 170 | fn test() { 171 | async_std::task::block_on(stackful(|| { 172 | eprintln!("A"); 173 | wait(async_std::task::yield_now()); 174 | eprintln!("B"); 175 | wait(async_std::task::sleep(std::time::Duration::from_secs(1))); 176 | eprintln!("C"); 177 | })); 178 | wait(async_std::task::yield_now()); 179 | eprintln!("D"); 180 | } 181 | -------------------------------------------------------------------------------- /src/generator.rs: -------------------------------------------------------------------------------- 1 | use crate::fiber::*; 2 | 3 | use core::cell::Cell; 4 | use core::marker::PhantomData; 5 | use core::mem::ManuallyDrop; 6 | use core::pin::Pin; 7 | 8 | #[cfg(feature = "nightly")] 9 | pub use core::ops::{Generator, GeneratorState}; 10 | 11 | #[cfg(not(feature = "nightly"))] 12 | #[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)] 13 | pub enum GeneratorState { 14 | Yielded(Y), 15 | Complete(R), 16 | } 17 | 18 | #[cfg(not(feature = "nightly"))] 19 | pub trait Generator { 20 | type Yield; 21 | type Return; 22 | 23 | fn resume(self: Pin<&mut Self>, arg: R) -> GeneratorState; 24 | } 25 | 26 | pub struct StackfulGenerator<'a, Y, R, Resume> { 27 | stack: Stack, 28 | result: Option, 29 | #[cfg(feature = "stacker")] 30 | stack_limit: Option, 31 | func: Option, Resume) -> R + 'a>>, 32 | // Make sure this Generator is not Send. 33 | _marker: PhantomData<*const fn(Resume) -> (Y, R)>, 34 | } 35 | 36 | // Everything is movable. 37 | impl Unpin for StackfulGenerator<'_, Y, R, Resume> {} 38 | 39 | pub struct YieldHandle { 40 | stack: Cell, 41 | _marker: PhantomData<(Y, Resume)>, 42 | } 43 | 44 | impl<'a, Y, R, Resume> StackfulGenerator<'a, Y, R, Resume> { 45 | pub fn new(f: F) -> Self 46 | where 47 | F: FnOnce(&YieldHandle, Resume) -> R + 'a, 48 | { 49 | let stack = Stack::allocate(); 50 | Self { 51 | func: Some(Box::new(f)), 52 | stack, 53 | #[cfg(feature = "stacker")] 54 | stack_limit: None, 55 | result: None, 56 | _marker: PhantomData, 57 | } 58 | } 59 | } 60 | 61 | struct DropPanic; 62 | 63 | struct EnterPayload<'a, Y, R, Resume> { 64 | f: ManuallyDrop, Resume) -> R + 'a>>, 65 | p: usize, 66 | } 67 | 68 | enum YieldPayload { 69 | Yielded(*const ()), 70 | Complete(*const ()), 71 | Panic(*mut (dyn std::any::Any + Send)), 72 | } 73 | 74 | extern "C" fn enter(stack: StackPointer, payload: usize) -> ! { 75 | let enter = unsafe { &mut *(payload as *mut EnterPayload<'static, Y, R, Resume>) }; 76 | let f = unsafe { ManuallyDrop::take(&mut enter.f) }; 77 | let r = unsafe { (enter.p as *mut Resume).read() }; 78 | let mut yielder = YieldHandle { 79 | stack: Cell::new(stack), 80 | _marker: PhantomData, 81 | }; 82 | let y = &mut yielder; 83 | let output = std::panic::catch_unwind(std::panic::AssertUnwindSafe(move || f(y, r))); 84 | 85 | let payload = match output { 86 | Ok(ref output) => YieldPayload::Complete(output as *const _ as _), 87 | Err(err) => YieldPayload::Panic(Box::into_raw(err)), 88 | }; 89 | unsafe { 90 | fiber_switch_leave(yielder.stack.get(), &payload as *const _ as _); 91 | } 92 | 93 | unreachable!("resuming a completed generator"); 94 | } 95 | 96 | impl Drop for StackfulGenerator<'_, Y, R, Resume> { 97 | fn drop(&mut self) { 98 | if let Some(stack) = self.result { 99 | // This will give us a `YieldPayload::Panic(DropPanic)`, but we can safely ignore it 100 | // because DropPanic is a ZST. 101 | unsafe { 102 | fiber_switch_enter(stack, 0); 103 | } 104 | } 105 | } 106 | } 107 | 108 | impl Generator for StackfulGenerator<'_, Y, R, Resume> { 109 | type Yield = Y; 110 | type Return = R; 111 | 112 | fn resume(mut self: Pin<&mut Self>, arg: Resume) -> GeneratorState { 113 | let payload = &arg as *const _ as usize; 114 | #[cfg(feature = "stacker")] 115 | let stack_limit = stacker::get_stack_limit(); 116 | let result = match self.result { 117 | None => { 118 | let mut payload = EnterPayload { 119 | f: ManuallyDrop::new(self.func.take().expect("polling a completed future")), 120 | p: payload, 121 | }; 122 | #[cfg(feature = "stacker")] 123 | stacker::set_stack_limit(Some(self.stack.bottom())); 124 | unsafe { 125 | fiber_enter( 126 | self.stack.top(), 127 | core::ptr::addr_of_mut!(payload) as usize, 128 | enter::, 129 | ) 130 | } 131 | } 132 | Some(v) => { 133 | #[cfg(feature = "stacker")] 134 | stacker::set_stack_limit(self.stack_limit); 135 | unsafe { fiber_switch_enter(v, payload) } 136 | } 137 | }; 138 | std::mem::forget(arg); 139 | self.result = result.stack; 140 | #[cfg(feature = "stacker")] 141 | { 142 | self.stack_limit = stacker::get_stack_limit(); 143 | stacker::set_stack_limit(stack_limit); 144 | } 145 | 146 | let y_payload = unsafe { (result.payload as *const YieldPayload).read() }; 147 | 148 | match y_payload { 149 | YieldPayload::Yielded(y) => GeneratorState::Yielded(unsafe { (y as *const Y).read() }), 150 | YieldPayload::Complete(r) => { 151 | self.result = None; 152 | GeneratorState::Complete(unsafe { (r as *const R).read() }) 153 | } 154 | YieldPayload::Panic(p) => { 155 | self.result = None; 156 | std::panic::resume_unwind(unsafe { Box::from_raw(p) }); 157 | } 158 | } 159 | } 160 | } 161 | 162 | impl YieldHandle { 163 | pub fn yeet(&self, arg: Y) -> Resume { 164 | unsafe { 165 | // `arg` is passed by reference. It lives on the stack of the current fiber, which 166 | // will be valid while the fiber hile the current fiber is suspended. `forget` it 167 | // after `fiber_switch` because the ownership is transferred to the target fiber. 168 | let payload = YieldPayload::Yielded(&arg as *const Y as _); 169 | let result = fiber_switch_leave(self.stack.get(), &payload as *const YieldPayload as _); 170 | std::mem::forget(arg); 171 | 172 | self.stack.set(result.stack.unwrap()); 173 | if result.payload == 0 { 174 | std::panic::resume_unwind(Box::new(DropPanic)); 175 | } 176 | let r = (result.payload as *mut Resume).read(); 177 | r 178 | } 179 | } 180 | } 181 | 182 | #[test] 183 | fn test_generator() { 184 | let mut gen = StackfulGenerator::new(|y: &YieldHandle, mut r: i32| { 185 | for i in 0..100 { 186 | assert_eq!(r, i); 187 | r = y.yeet(i); 188 | } 189 | assert_eq!(r, 1024); 190 | 1024 191 | }); 192 | let mut gen = Pin::new(&mut gen); 193 | 194 | for i in 0..100 { 195 | assert!(matches!(gen.as_mut().resume(i), GeneratorState::Yielded(x) if x == i)); 196 | } 197 | assert!(matches!( 198 | gen.as_mut().resume(1024), 199 | GeneratorState::Complete(1024) 200 | )); 201 | } 202 | -------------------------------------------------------------------------------- /src/arch/wasm32.s: -------------------------------------------------------------------------------- 1 | .section .bss.unwinding_stack,"",@ 2 | .type unwinding_stack,@object 3 | # Used to carry the updated `StackPointer` from an entering fiber switch to a leaving fiber switch. 4 | rewinding_stack: 5 | .skip 4 6 | .size rewinding_stack, 4 7 | 8 | # Used to carry the target `StackPointer` from a leaving fiber switch to an entering fiber switch. 9 | unwinding_stack: 10 | .skip 4 11 | .size unwinding_stack, 4 12 | 13 | .globaltype __stack_pointer, i32 14 | 15 | .functype asyncify_start_unwind (i32) -> () 16 | .import_module asyncify_start_unwind, asyncify 17 | .import_name asyncify_start_unwind, start_unwind 18 | 19 | .functype asyncify_stop_unwind () -> () 20 | .import_module asyncify_stop_unwind, asyncify 21 | .import_name asyncify_stop_unwind, stop_unwind 22 | 23 | .functype asyncify_start_rewind (i32) -> () 24 | .import_module asyncify_start_rewind, asyncify 25 | .import_name asyncify_start_rewind, start_rewind 26 | 27 | .functype asyncify_stop_rewind () -> () 28 | .import_module asyncify_stop_rewind, asyncify 29 | .import_name asyncify_stop_rewind, stop_rewind 30 | 31 | # For a suspended stack, the 16 bytes below the stack pointers are used to store some info: 32 | # * -4: payload 33 | # * -8: pointer to the entry point 34 | # * -12: asyncify stack limit 35 | # * -16: asyncify stack pointer 36 | # 37 | # The bytes below the specified 16 bytes are used as asyncify stack when it is suspended. 38 | 39 | .section .text.fiber_enter,"",@ 40 | 41 | # fiber_enter_impl: fn(StackPointer, usize, fn(StackPointer, usize) -> !) -> SwitchResult 42 | .type fiber_enter_impl, @function 43 | fiber_enter_impl: 44 | .functype fiber_enter_impl (i32, i32, i32, i32) -> () 45 | .local i32 46 | 47 | # Check if we are in the process of rewinding. 48 | i32.const 0 49 | i32.load rewinding_stack 50 | if 51 | # This is the more complicated case where are are an intermediate frame of rewinding. 52 | # See the save code at the end of function as well. 53 | 54 | # All our local variables are garbage when rewinding. 55 | # We need to extract local 0,1,3 from the asyncify stack. 56 | 57 | global.get __stack_pointer 58 | i32.const 16 59 | i32.sub 60 | local.set 4 61 | 62 | # Retrieve the asyncify stack pointer 63 | local.get 4 64 | i32.load 0 65 | i32.const 16 66 | i32.sub 67 | local.set 2 68 | 69 | # Decrement the asyncify stack pointer 70 | local.get 4 71 | local.get 2 72 | i32.store 0 73 | 74 | # Load locals 75 | local.get 2 76 | i32.load 0 77 | local.set 0 78 | 79 | local.get 2 80 | i32.load 4 81 | local.set 1 82 | 83 | local.get 2 84 | i32.load 12 85 | local.set 3 86 | else 87 | 88 | # Swap argument 0 with __stack_pointer 89 | local.get 1 90 | global.get __stack_pointer 91 | local.set 1 92 | global.set __stack_pointer 93 | 94 | # If function pointer is 0, then we'll start rewind. 95 | local.get 3 96 | i32.const 0 97 | i32.eq 98 | if 99 | global.get __stack_pointer 100 | i32.const 16 101 | i32.sub 102 | local.set 4 103 | 104 | # Store the payload 105 | local.get 4 106 | local.get 2 107 | i32.store 12 108 | 109 | # Store the new stack pointer 110 | i32.const 0 111 | local.get 1 112 | i32.store rewinding_stack 113 | 114 | # Prepare for rewind 115 | local.get 4 116 | call asyncify_start_rewind 117 | 118 | # Retrieve the saved function pointer 119 | local.get 4 120 | i32.load 8 121 | local.set 3 122 | end_if 123 | 124 | end_if 125 | 126 | local.get 1 127 | local.get 2 128 | local.get 3 129 | call_indirect (i32, i32) -> () 130 | 131 | # The only way that the above call returns is through unwinding. 132 | 133 | global.get __stack_pointer 134 | i32.const 16 135 | i32.sub 136 | local.set 4 137 | 138 | # But we can't stop unwind yet, we need to make sure if we are actually 139 | # the target. 140 | 141 | i32.const 0 142 | i32.load unwinding_stack 143 | local.get 1 144 | i32.eq 145 | if 146 | # Okay, we are indeed the target 147 | 148 | i32.const 0 149 | i32.const 0 150 | i32.store unwinding_stack 151 | 152 | call asyncify_stop_unwind 153 | 154 | # Store the function pointer into suspended stack. 155 | local.get 4 156 | local.get 3 157 | i32.store 8 158 | 159 | # Store SwitchResult.0 160 | local.get 0 161 | global.get __stack_pointer 162 | i32.store 0 163 | 164 | # Store SwitchResult.1 165 | local.get 0 166 | local.get 4 167 | i32.load 12 168 | i32.store 4 169 | 170 | # Restore __stack_pointer 171 | local.get 1 172 | global.set __stack_pointer 173 | 174 | else 175 | 176 | # Now, this case is more complicated. 177 | # We are not the target, so we must not stop unwinding. This means we need to save our 178 | # states (local 0,1,3) as well and recover them when rewinding happens. 179 | 180 | # What's worse is that we have to save the state on the asyncify stack; the global 181 | # __asyncify_data is not visible to us. Luckily, we actually know that asyncify_data is 182 | # __stack_pointer - 16! 183 | 184 | # Fetch asyncify stack pointer 185 | local.get 4 186 | i32.load 0 187 | local.set 2 188 | 189 | # Asyncify stack overflow check 190 | local.get 2 191 | i32.const 16 192 | i32.add 193 | local.get 4 194 | i32.load 4 195 | i32.gt_u 196 | if 197 | unreachable 198 | end_if 199 | 200 | # Save locals 201 | local.get 2 202 | local.get 0 203 | i32.store 0 204 | 205 | local.get 2 206 | local.get 1 207 | i32.store 4 208 | 209 | local.get 2 210 | local.get 3 211 | i32.store 12 212 | 213 | # Increment asyncify stack pointer and we're done! 214 | local.get 4 215 | local.get 2 216 | i32.const 16 217 | i32.add 218 | i32.store 0 219 | end_if 220 | 221 | end_function 222 | 223 | # fiber_enter: fn(StackPointer, usize, fn(StackPointer, usize) -> !) -> SwitchResult 224 | # Enter a fresh stack and call the supplied function 225 | .global fiber_enter 226 | .type fiber_enter, @function 227 | fiber_enter: 228 | .functype fiber_enter (i32, i32, i32, i32) -> () 229 | .local i32 230 | 231 | local.get 0 232 | local.get 1 233 | local.get 2 234 | local.get 3 235 | call fiber_enter_impl 236 | 237 | # Given the alignment of stack, this can never be called -- of course the compiler don't know 238 | # that, so this will remain. Asyncify will therefore treat as this function as unwindable. 239 | # This is only necessary when a yield crosses nested generators. 240 | i32.const 0 241 | i32.load rewinding_stack 242 | i32.const 1 243 | i32.eq 244 | if 245 | call asyncify_stop_rewind 246 | end_if 247 | end_function 248 | 249 | .section .text.fiber_switch_enter,"",@ 250 | 251 | # fiber_switch_enter: fn(StackPointer, usize) -> SwitchResult 252 | .global fiber_switch_enter 253 | .type fiber_switch_enter, @function 254 | fiber_switch_enter: 255 | .functype fiber_switch_enter (i32, i32, i32) -> () 256 | 257 | local.get 0 258 | local.get 1 259 | local.get 2 260 | i32.const 0 261 | call fiber_enter_impl 262 | 263 | # Given the alignment of stack, this can never be called -- of course the compiler don't know 264 | # that, so this will remain. Asyncify will therefore treat as this function as unwindable. 265 | # This is only necessary when a yield crosses nested generators. 266 | i32.const 0 267 | i32.load rewinding_stack 268 | i32.const 1 269 | i32.eq 270 | if 271 | call asyncify_stop_rewind 272 | end_if 273 | end_function 274 | 275 | .section .text.fiber_switch_leave,"",@ 276 | 277 | # fiber_switch_leave: fn(StackPointer, usize) -> SwitchResult 278 | .global fiber_switch_leave 279 | .type fiber_switch_leave, @function 280 | fiber_switch_leave: 281 | .functype fiber_switch_leave (i32, i32, i32) -> () 282 | .local i32 283 | 284 | global.get __stack_pointer 285 | i32.const 16 286 | i32.sub 287 | local.set 3 288 | 289 | i32.const 0 290 | i32.load rewinding_stack 291 | if 292 | # In this case we are rewinding in, meaning that we are being resumed. 293 | 294 | # Stop asyncify from rewinding 295 | call asyncify_stop_rewind 296 | 297 | # Load the updated stack pointer 298 | local.get 0 299 | i32.const 0 300 | i32.load rewinding_stack 301 | i32.store 0 302 | 303 | i32.const 0 304 | i32.const 0 305 | i32.store rewinding_stack 306 | 307 | # Load the payload 308 | local.get 0 309 | local.get 3 310 | i32.load 12 311 | i32.store 4 312 | 313 | else 314 | 315 | # In this case we are suspending, so need to trigger an unwinding. 316 | 317 | # Store the target stack pointer 318 | i32.const 0 319 | local.get 1 320 | i32.store unwinding_stack 321 | 322 | # Store the payload 323 | local.get 3 324 | local.get 2 325 | i32.store 12 326 | 327 | # Store the asyncify stack pointer 328 | local.get 3 329 | global.get __stack_pointer 330 | i32.const 65552 331 | i32.sub 332 | i32.store 0 333 | 334 | # Store the asyncify stack limit 335 | local.get 3 336 | local.get 3 337 | i32.store 4 338 | 339 | # Start unwinding 340 | local.get 3 341 | call asyncify_start_unwind 342 | 343 | end_if 344 | 345 | end_function 346 | --------------------------------------------------------------------------------