├── .github └── workflows │ ├── lint.yml │ └── test.yml ├── .gitignore ├── .travis.yml ├── Cargo.toml ├── Cross.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── benches └── lib.rs ├── build.rs ├── examples ├── cd.rs ├── fib.rs ├── get_yield.rs ├── lifetime.rs ├── number.rs ├── pipe.rs ├── range.rs ├── send.rs └── yield_from.rs ├── src ├── detail │ ├── aarch64_unix.rs │ ├── arm_unix.rs │ ├── asm │ │ ├── asm_aarch64_aapcs_elf.S │ │ ├── asm_aarch64_aapcs_macho.S │ │ ├── asm_arm_aapcs_elf.S │ │ ├── asm_loongarch64_sysv_elf.S │ │ ├── asm_ppc64le_elf.S │ │ ├── asm_riscv64_c_elf.S │ │ ├── asm_x86_64_sysv_elf.S │ │ ├── asm_x86_64_sysv_macho.S │ │ └── asm_x86_64_sysv_pe.S │ ├── gen.rs │ ├── loongarch64_unix.rs │ ├── mod.rs │ ├── ppc64le_unix.rs │ ├── riscv64_unix.rs │ ├── x86_64_unix.rs │ └── x86_64_windows.rs ├── gen_impl.rs ├── lib.rs ├── reg_context.rs ├── rt.rs ├── scope.rs ├── stack │ ├── mod.rs │ ├── overflow_unix.rs │ ├── overflow_windows.rs │ ├── unix.rs │ └── windows.rs └── yield_.rs └── tests └── lib.rs /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | paths-ignore: 6 | - '**.md' 7 | pull_request: 8 | paths-ignore: 9 | - '**.md' 10 | workflow_dispatch: 11 | 12 | env: 13 | CARGO_TERM_COLOR: always 14 | 15 | jobs: 16 | lints: 17 | name: Run cargo fmt and cargo clippy 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Checkout sources 21 | uses: actions/checkout@v2 22 | - name: Install toolchain 23 | uses: actions-rs/toolchain@v1 24 | with: 25 | profile: minimal 26 | toolchain: stable 27 | override: true 28 | components: rustfmt, clippy 29 | - name: Run cargo clippy 30 | uses: actions-rs/cargo@v1 31 | with: 32 | command: clippy 33 | args: -- -D warnings -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | paths-ignore: 6 | - '**.md' 7 | pull_request: 8 | paths-ignore: 9 | - '**.md' 10 | workflow_dispatch: 11 | 12 | env: 13 | CARGO_TERM_COLOR: always 14 | 15 | jobs: 16 | test: 17 | name: ${{ matrix.channel }} Test on ${{ matrix.target.name }} 18 | runs-on: ${{ matrix.target.os }} 19 | strategy: 20 | fail-fast: false 21 | matrix: 22 | channel: 23 | - stable 24 | - nightly 25 | target: 26 | #- { name: macOS-aarch64, os: macos-latest, tool: aarch64-apple-darwin } 27 | - { name: macOS-x64, os: macos-latest, tool: x86_64-apple-darwin } 28 | - { name: Windows-x64, os: windows-latest, tool: x86_64-pc-windows-msvc } 29 | #- { name: Windows-aarch64, os: windows-latest, tool: aarch64-pc-windows-msvc } 30 | - { name: Linux-x64, os: ubuntu-latest, tool: x86_64-unknown-linux-gnu } 31 | - { name: Linux-aarch64, os: ubuntu-latest, tool: aarch64-unknown-linux-gnu } 32 | - { name: Linux-armv7, os: ubuntu-latest, tool: armv7-unknown-linux-gnueabihf } 33 | #- { name: Linux-arm, os: ubuntu-latest, tool: arm-unknown-linux-gnueabihf } 34 | - { name: Linux-loong64, os: ubuntu-latest, tool: loongarch64-unknown-linux-gnu } 35 | #- { name: Linux-mips64, os: ubuntu-latest, tool: mips64-unknown-linux-gnuabi64 } 36 | #- { name: Linux-powerpc64, os: ubuntu-latest, tool: powerpc64-unknown-linux-gnu } 37 | - { name: Linux-powerpc64le, os: ubuntu-latest, tool: powerpc64le-unknown-linux-gnu } 38 | #- { name: Linux-thumbv7, os: ubuntu-latest, tool: thumbv7neon-unknown-linux-gnueabihf } 39 | - { name: Linux-riscv64, os: ubuntu-latest, tool: riscv64gc-unknown-linux-gnu } 40 | #- { name: Linux-s390x, os: ubuntu-latest, tool: s390x-unknown-linux-gnu } 41 | #- { name: Linux-sparc64, os: ubuntu-latest, tool: sparc64-unknown-linux-gnu } 42 | #- { name: iOS-aarch64, os: macos-latest, tool: aarch64-apple-ios } 43 | #- { name: Android-armv7, os: ubuntu-latest, tool: armv7-linux-androideabi } 44 | - { name: Android-aarch64, os: ubuntu-latest, tool: aarch64-linux-android } 45 | # - { name: Android-x64, os: ubuntu-latest, tool: x86_64-linux-android } 46 | #- { name: FreeBSD-x64, os: ubuntu-latest, tool: x86_64-unknown-freebsd } 47 | #- { name: NetBSD-x64, os: ubuntu-latest, tool: x86_64-unknown-netbsd } 48 | #- { name: Illumos-x64, os: ubuntu-latest, tool: x86_64-unknown-illumos } 49 | 50 | env: 51 | CI: 1 52 | CARGO_INCREMENTAL: 0 53 | CROSS_NO_WARNINGS: 0 54 | windows: ${{ startsWith(matrix.target.name, 'Windows') }} 55 | linux: ${{ startsWith(matrix.target.name, 'Linux') }} 56 | macos: ${{ startsWith(matrix.target.name, 'macOS') }} 57 | 58 | steps: 59 | - uses: actions/checkout@v3 60 | 61 | - name: Setup rust cross-platform toolchain 62 | run: | 63 | rustup default ${{ matrix.channel }} 64 | rustup target add ${{ matrix.target.tool }} 65 | cargo install cross --git https://github.com/cross-rs/cross --rev 4090beca3cfffa44371a5bba524de3a578aa46c3 66 | 67 | - name: Test 68 | run: cross test --target ${{ matrix.target.tool }} 69 | 70 | - name: Test Release 71 | run: cross test --target ${{ matrix.target.tool }} --release -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.o 3 | *.so 4 | *.rlib 5 | *.dll 6 | 7 | # Executables 8 | *.exe 9 | 10 | # Generated by Cargo 11 | /target/ 12 | Cargo.lock 13 | 14 | temp/ 15 | fuzz/ 16 | 17 | .vscode/ 18 | rls/ 19 | 20 | # Crash dumps from cross tool: 21 | *.core 22 | 23 | # Mac Junk 24 | .DS_Store -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | language: rust 3 | # cache: cargo 4 | 5 | os: 6 | - windows 7 | - linux 8 | - osx 9 | 10 | arch: 11 | - amd64 12 | - arm64 13 | - ppc64le 14 | 15 | rust: 16 | - stable 17 | - nightly 18 | 19 | evn: 20 | - RUST_BACKTRACE=1 21 | 22 | script: 23 | - cargo test 24 | - cargo test --release 25 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "generator" 3 | version = "0.8.5" 4 | edition = "2021" 5 | rust-version = "1.73" 6 | authors = ["Xudong Huang "] 7 | license = "MIT/Apache-2.0" 8 | repository = "https://github.com/Xudong-Huang/generator-rs.git" 9 | homepage = "https://github.com/Xudong-Huang/generator-rs.git" 10 | documentation = "https://docs.rs/generator" 11 | description = "Stackfull Generator Library in Rust" 12 | readme = "README.md" 13 | keywords = ["generator", "coroutine", "green", "thread", "fiber"] 14 | categories = ["data-structures", "algorithms"] 15 | exclude = [ 16 | ".gitignore", 17 | ".travis.yml", 18 | "appveyor.yml", 19 | "benches/**/*", 20 | ] 21 | build = "build.rs" 22 | 23 | 24 | [target.'cfg(windows)'.dependencies.windows] 25 | version = "0.61" 26 | features = [ 27 | "Win32_System_Memory", 28 | "Win32_System_Kernel", 29 | "Win32_Foundation", 30 | "Win32_System_SystemInformation", 31 | "Win32_System_Diagnostics_Debug" 32 | ] 33 | 34 | [target.'cfg(unix)'.dependencies] 35 | libc = "0.2.100" 36 | 37 | [dependencies] 38 | log = "0.4" 39 | cfg-if = "1.0.0" 40 | 41 | [build-dependencies] 42 | rustversion = "1.0" 43 | cc = "1.0" 44 | 45 | # release build 46 | [profile.release] 47 | lto = true 48 | 49 | [profile.dev.build-override] 50 | debug = true -------------------------------------------------------------------------------- /Cross.toml: -------------------------------------------------------------------------------- 1 | [target.aarch64-linux-android] 2 | image = "ghcr.io/cross-rs/aarch64-linux-android:edge" 3 | 4 | [target.x86_64-linux-android] 5 | image = "ghcr.io/cross-rs/x86_64-linux-android:edge" 6 | 7 | [target.powerpc64le-unkown-linux-gnu] 8 | image = "ghcr.io/cross-rs/powerpc64le-unknown-linux-gnu:edge" -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Xudong Huang 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://github.com/Xudong-Huang/generator-rs/workflows/CI/badge.svg)](https://github.com/Xudong-Huang/generator-rs/actions?query=workflow%3ACI) 2 | [![Current Crates.io Version](https://img.shields.io/crates/v/generator.svg)](https://crates.io/crates/generator) 3 | [![Document](https://img.shields.io/badge/doc-generator-green.svg)](https://docs.rs/generator) 4 | 5 | 6 | # Generator-rs 7 | 8 | rust stackful generator library 9 | 10 | ```toml 11 | [dependencies] 12 | generator = "0.8" 13 | ``` 14 | 15 | 16 | ## Usage 17 | ```rust 18 | use generator::{done, Gn}; 19 | 20 | fn main() { 21 | let g = Gn::new_scoped(|mut s| { 22 | let (mut a, mut b) = (0, 1); 23 | while b < 200 { 24 | std::mem::swap(&mut a, &mut b); 25 | b = a + b; 26 | s.yield_(b); 27 | } 28 | done!(); 29 | }); 30 | 31 | for i in g { 32 | println!("{}", i); 33 | } 34 | } 35 | ``` 36 | 37 | ## Output 38 | ``` 39 | 1 40 | 2 41 | 3 42 | 5 43 | 8 44 | 13 45 | 21 46 | 34 47 | 55 48 | 89 49 | 144 50 | 233 51 | ``` 52 | 53 | ## Goals 54 | 55 | - [x] basic send/yield with message support 56 | - [x] generator cancel support 57 | - [x] yield_from support 58 | - [x] panic inside generator support 59 | - [x] stack size tune support 60 | - [x] scoped static type support 61 | - [x] basic coroutine interface support 62 | - [x] stable rust support 63 | 64 | 65 | ## based on this basic library 66 | - we can easily port python library based on generator into rust 67 | - coroutine framework running on multi thread 68 | 69 | 70 | ## Notices 71 | 72 | * This crate supports below platforms, welcome to contribute with other arch and platforms 73 | 74 | - x86_64 Linux 75 | - x86_64 macOS 76 | - x86_64 Windows 77 | - x86_64 Fuchsia 78 | - ~~x86_64 Android~~ 79 | - aarch64 Linux 80 | - aarch64 macOS 81 | - aarch64 Fuchsia 82 | - aarch64 Android 83 | - loongarch64 Linux 84 | - armv7 Linux 85 | - riscv64 Linux 86 | - powerpc64le Linux 87 | 88 | ## License 89 | 90 | This project is licensed under either of the following, at your option: 91 | 92 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) 93 | * MIT License ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 94 | -------------------------------------------------------------------------------- /benches/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg(nightly)] 2 | #![feature(test)] 3 | #![allow(deprecated)] 4 | extern crate generator; 5 | extern crate test; 6 | 7 | use std::panic; 8 | 9 | use generator::*; 10 | use test::Bencher; 11 | 12 | // #[bench] 13 | // fn yield_bench(b: &mut Bencher) { 14 | // // don't print any panic info 15 | // // when cancel the generator 16 | // panic::set_hook(Box::new(|_| {})); 17 | 18 | // b.iter(|| { 19 | // let mut g = Gn::new(|| { 20 | // for i in 0.. { 21 | // yield_with(i); 22 | // } 23 | // 20 24 | // }); 25 | 26 | // for i in 0..1_000_000 { 27 | // let data = g.send(()); 28 | // assert_eq!(data, i); 29 | // } 30 | // }); 31 | // } 32 | 33 | #[bench] 34 | fn single_yield_with_bench(b: &mut Bencher) { 35 | // don't print any panic info 36 | panic::set_hook(Box::new(|_| {})); 37 | 38 | let mut g = Gn::new(|| { 39 | for i in 0.. { 40 | yield_with(i); 41 | } 42 | 20 43 | }); 44 | 45 | let mut i = 0; 46 | b.iter(|| { 47 | let data = g.send(()); 48 | assert_eq!(data, i); 49 | i += 1; 50 | }); 51 | } 52 | 53 | #[bench] 54 | fn single_yield_bench(b: &mut Bencher) { 55 | let mut g = Gn::new(|| { 56 | let mut i = 0; 57 | loop { 58 | let v: Option = yield_(i); 59 | i += 1; 60 | match v { 61 | Some(x) => assert_eq!(x, i), 62 | // for elegant exit 63 | None => break, 64 | } 65 | } 66 | 20usize 67 | }); 68 | 69 | // start g 70 | g.raw_send(None); 71 | 72 | let mut i: usize = 1; 73 | b.iter(|| { 74 | let data: usize = g.send(i); 75 | assert_eq!(data, i); 76 | i += 1; 77 | }); 78 | 79 | // quit g 80 | g.raw_send(None); 81 | } 82 | 83 | #[bench] 84 | fn scoped_yield_bench(b: &mut Bencher) { 85 | let mut g = Gn::new_scoped(|mut s| { 86 | let mut i = 0; 87 | loop { 88 | let v = s.yield_(i); 89 | i += 1; 90 | match v { 91 | Some(x) => { 92 | assert_eq!(x, i); 93 | } 94 | None => { 95 | // for elegant exit 96 | break; 97 | } 98 | } 99 | } 100 | 20usize 101 | }); 102 | 103 | // start g 104 | g.raw_send(None); 105 | 106 | let mut i: usize = 1; 107 | b.iter(|| { 108 | let data: usize = g.send(i); 109 | assert_eq!(data, i); 110 | i += 1; 111 | }); 112 | 113 | // quit g 114 | g.raw_send(None); 115 | } 116 | 117 | #[bench] 118 | fn create_gen(b: &mut Bencher) { 119 | b.iter(|| { 120 | let g = Gn::<()>::new_scoped(|mut s| { 121 | let mut i = 0; 122 | while s.yield_(i).is_some() { 123 | i += 1; 124 | } 125 | i 126 | }); 127 | test::black_box(g) 128 | }); 129 | } 130 | 131 | #[bench] 132 | fn init_gen(b: &mut Bencher) { 133 | let clo_gen = || { 134 | |mut s: Scope<'_, 'static, (), _>| { 135 | let mut i = 0; 136 | loop { 137 | match s.yield_(i) { 138 | Some(..) => { 139 | i += 1; 140 | } 141 | None => { 142 | i += 1; 143 | break; 144 | } 145 | } 146 | } 147 | i 148 | } 149 | }; 150 | 151 | let mut g = Gn::<()>::new_scoped(clo_gen()); 152 | assert_eq!(g.raw_send(None), Some(0)); // start 153 | assert_eq!(g.raw_send(None), Some(1)); // cancel 154 | assert!(g.is_done()); 155 | 156 | b.iter(|| { 157 | let clo = clo_gen(); 158 | // this cost about 20ns on unix and 60ns on windows 159 | // because windows Box::new take more time 160 | g.scoped_init(clo); 161 | // this cost about 70ns 162 | // assert_eq!(g.next(), Some(0)); 163 | }); 164 | } 165 | 166 | #[bench] 167 | fn fnbox_bench(b: &mut Bencher) { 168 | b.iter(|| { 169 | let a: [usize; 100] = [0; 100]; 170 | let f: Box = Box::new(|| { 171 | test::black_box(a); 172 | }); 173 | let _ = test::black_box(f); 174 | }); 175 | } 176 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | #[rustversion::nightly] 4 | const NIGHTLY: bool = true; 5 | 6 | #[rustversion::not(nightly)] 7 | const NIGHTLY: bool = false; 8 | 9 | fn main() { 10 | let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap(); 11 | let external_assembly_required = target_arch == "powerpc64"; 12 | println!("target: {target_arch}, ext: {external_assembly_required}"); 13 | 14 | println!("cargo:rustc-check-cfg=cfg(nightly)"); 15 | if NIGHTLY { 16 | println!("cargo:rustc-cfg=nightly"); 17 | } 18 | 19 | if external_assembly_required { 20 | cc::Build::new() 21 | .file("src/detail/asm/asm_ppc64le_elf.S") 22 | .compile("ppc64le-asm-lib"); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /examples/cd.rs: -------------------------------------------------------------------------------- 1 | use generator::*; 2 | 3 | #[derive(Debug)] 4 | enum Action { 5 | Play(&'static str), 6 | Stop, 7 | } 8 | 9 | #[derive(Debug, Clone, Copy, PartialEq)] 10 | enum State { 11 | Playing, 12 | Stopped, 13 | } 14 | 15 | use crate::Action::*; 16 | use crate::State::*; 17 | 18 | fn main() { 19 | let mut cd_player = Gn::new_scoped(|mut s| { 20 | let mut state = Stopped; 21 | loop { 22 | // println!("{:?}", *state); 23 | // in release mod without this there is bugs!!!!! (rustc 1.59.0 (9d1b2106e 2022-02-23)) 24 | std::sync::atomic::compiler_fence(std::sync::atomic::Ordering::AcqRel); 25 | 26 | match state { 27 | Stopped => match s.get_yield() { 28 | Some(Play(t)) => { 29 | println!("I'm playing {t}"); 30 | state = Playing; 31 | } 32 | Some(Stop) => println!("I'm already stopped"), 33 | _ => unreachable!("some thing wrong"), 34 | }, 35 | 36 | Playing => match s.get_yield() { 37 | Some(Stop) => { 38 | println!("I'm stopped"); 39 | state = Stopped; 40 | } 41 | Some(Play(_)) => println!("should first stop"), 42 | _ => unreachable!("some thing wrong"), 43 | }, 44 | } 45 | 46 | s.yield_with(state); 47 | } 48 | }); 49 | 50 | for _ in 0..1000 { 51 | let ret = cd_player.send(Play("hello world")); 52 | assert_eq!(ret, Playing); 53 | let ret = cd_player.send(Play("hello another day")); 54 | assert_eq!(ret, Playing); 55 | let ret = cd_player.send(Stop); 56 | assert_eq!(ret, Stopped); 57 | let ret = cd_player.send(Stop); 58 | assert_eq!(ret, Stopped); 59 | let ret = cd_player.send(Play("hello another day")); 60 | assert_eq!(ret, Playing); 61 | let ret = cd_player.send(Stop); 62 | assert_eq!(ret, Stopped); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /examples/fib.rs: -------------------------------------------------------------------------------- 1 | use generator::{done, Gn}; 2 | 3 | fn main() { 4 | let g = Gn::new_scoped(|mut s| { 5 | let (mut a, mut b) = (0, 1); 6 | while b < 200 { 7 | std::mem::swap(&mut a, &mut b); 8 | b += a; 9 | s.yield_(b); 10 | } 11 | done!(); 12 | }); 13 | 14 | for i in g { 15 | println!("{i}"); 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /examples/get_yield.rs: -------------------------------------------------------------------------------- 1 | #![allow(deprecated)] 2 | use generator::{get_yield, yield_with, Gn}; 3 | 4 | fn sum(a: u32) -> u32 { 5 | let mut sum = a; 6 | let mut recv: u32; 7 | while sum < 200 { 8 | recv = get_yield().unwrap(); 9 | yield_with(sum); 10 | sum += recv; 11 | } 12 | 13 | sum 14 | } 15 | 16 | fn main() { 17 | // we specify the send type is u32 18 | let mut s = Gn::::new(|| sum(1)); 19 | let mut i = 1u32; 20 | while !s.is_done() { 21 | i = s.send(i); 22 | println!("{i}"); 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /examples/lifetime.rs: -------------------------------------------------------------------------------- 1 | fn main() { 2 | let str = "foo".to_string(); 3 | 4 | let mut gen = generator::Gn::new_scoped(|mut s| { 5 | std::thread::scope(|s2| { 6 | s2.spawn(|| { 7 | std::thread::sleep(std::time::Duration::from_millis(500)); 8 | println!("{str}"); 9 | }); 10 | // here we can't use `yield_` because it still ref to `str` 11 | // `yield_` only impl for static captured lifetime 12 | // s.yield_(()); 13 | unsafe { s.yield_unsafe(()) }; 14 | }); 15 | generator::done!(); 16 | }); 17 | 18 | gen.next(); 19 | // std::mem::forget(gen); 20 | // drop(gen); 21 | // drop(str); 22 | std::thread::sleep(std::time::Duration::from_millis(1000)); 23 | } 24 | -------------------------------------------------------------------------------- /examples/number.rs: -------------------------------------------------------------------------------- 1 | use generator::*; 2 | 3 | fn factors(n: u32) -> Generator<'static, (), u32> { 4 | Gn::new_scoped(move |mut s| { 5 | if n == 0 { 6 | return 0; 7 | } 8 | 9 | s.yield_with(1); 10 | 11 | for i in 2..n { 12 | if n % i == 0 { 13 | s.yield_with(i); 14 | } 15 | } 16 | done!(); 17 | }) 18 | } 19 | 20 | fn main() { 21 | for i in factors(28) { 22 | println!("{i}"); 23 | } 24 | 25 | (0..10000) 26 | .filter(|n| factors(*n).sum::() == *n) 27 | .fold((), |_, n| { 28 | println!("n = {n}"); 29 | }) 30 | } 31 | -------------------------------------------------------------------------------- /examples/pipe.rs: -------------------------------------------------------------------------------- 1 | use generator::*; 2 | 3 | fn main() { 4 | // fn square<'a, T: Iterator + 'a>(input: T) -> impl Iterator + 'a { 5 | fn square<'a, T: Iterator + Send + 'a>(input: T) -> Generator<'a, (), u32> { 6 | Gn::new_scoped(|mut s| { 7 | for i in input { 8 | s.yield_with(i * i); 9 | } 10 | done!(); 11 | }) 12 | } 13 | 14 | // fn sum<'a, T: Iterator + 'a>(input: T) -> impl Iterator + 'a { 15 | fn sum<'a, T: Iterator + Send + 'a>(input: T) -> Generator<'a, (), u32> { 16 | Gn::new_scoped(|mut s| { 17 | let mut acc = 0; 18 | for i in input { 19 | acc += i; 20 | s.yield_with(acc); 21 | } 22 | done!(); 23 | }) 24 | } 25 | 26 | for (i, sum) in sum(square(0..20)).enumerate() { 27 | println!("square_sum_{i:<2} = {sum:^4}"); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /examples/range.rs: -------------------------------------------------------------------------------- 1 | use generator::{done, Gn}; 2 | 3 | fn main() { 4 | let n = 100000; 5 | let range = Gn::new_scoped(move |mut s| { 6 | let mut num = 0; 7 | while num < n { 8 | s.yield_(num); 9 | num += 1; 10 | } 11 | done!(); 12 | }); 13 | 14 | let sum: usize = range.sum(); 15 | println!("sum ={sum}"); 16 | } 17 | -------------------------------------------------------------------------------- /examples/send.rs: -------------------------------------------------------------------------------- 1 | #![allow(deprecated)] 2 | 3 | use generator::{yield_, Gn}; 4 | use std::mem; 5 | 6 | fn sum(a: u32) -> u32 { 7 | let mut sum = a; 8 | let mut recv: u32; 9 | while sum < 200 { 10 | // println!("sum={} ", sum); 11 | recv = yield_(sum).unwrap(); 12 | // println!("recv={}", recv); 13 | sum += recv; 14 | } 15 | sum 16 | } 17 | 18 | fn main() { 19 | // we specify the send type is u32 20 | let mut s = Gn::::new(|| sum(0)); 21 | // first start the generator 22 | assert_eq!(s.raw_send(None).unwrap(), 0); 23 | let mut cur = 1; 24 | let mut last = 1; 25 | 26 | while !s.is_done() { 27 | // println!("send={}", last); 28 | mem::swap(&mut cur, &mut last); 29 | cur = s.send(cur); // s += cur 30 | // println!("cur={} last={}", cur, last); 31 | println!("{cur}"); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /examples/yield_from.rs: -------------------------------------------------------------------------------- 1 | #![allow(deprecated)] 2 | 3 | use generator::*; 4 | 5 | fn xrange(start: u32, end: u32) -> u32 { 6 | for i in start..end { 7 | yield_with(i); 8 | } 9 | done!(); 10 | } 11 | 12 | fn main() { 13 | let g1 = Gn::new(|| xrange(0, 10)); 14 | let g2 = Gn::new(|| xrange(10, 20)); 15 | 16 | let g = Gn::new_scoped(|mut s| { 17 | s.yield_from(g1); 18 | s.yield_from(g2); 19 | done!(); 20 | }); 21 | 22 | g.fold(0, |sum, x| { 23 | println!("i={}, sum={}", x, sum + x); 24 | sum + x 25 | }); 26 | } 27 | -------------------------------------------------------------------------------- /src/detail/aarch64_unix.rs: -------------------------------------------------------------------------------- 1 | use crate::detail::align_down; 2 | use crate::stack::Stack; 3 | 4 | cfg_if::cfg_if! { 5 | if #[cfg(target_os = "macos")] { 6 | std::arch::global_asm!(include_str!("asm/asm_aarch64_aapcs_macho.S")); 7 | } else if #[cfg(target_os = "ios")] { 8 | std::arch::global_asm!(include_str!("asm/asm_aarch64_aapcs_macho.S")); 9 | } else { 10 | std::arch::global_asm!(include_str!("asm/asm_aarch64_aapcs_elf.S")); 11 | } 12 | } 13 | 14 | // first argument is task handle, second is thunk ptr 15 | pub type InitFn = extern "C" fn(usize, *mut usize) -> !; 16 | 17 | pub extern "C" fn gen_init(a1: usize, a2: *mut usize) -> ! { 18 | super::gen::gen_init_impl(a1, a2) 19 | } 20 | 21 | //#[link(name = "asm", kind = "static")] 22 | extern "C" { 23 | pub fn bootstrap_green_task(); 24 | pub fn prefetch(data: *const usize); 25 | pub fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers); 26 | } 27 | 28 | #[repr(C)] 29 | #[derive(Debug)] 30 | pub struct Registers { 31 | // We save the 13 callee-saved registers: 32 | // x19--x28, fp (x29), lr (x30), sp 33 | // and the 8 callee-saved floating point registers: 34 | // d8--d15 35 | gpr: [usize; 32], 36 | } 37 | 38 | impl Registers { 39 | pub fn new() -> Registers { 40 | Registers { gpr: [0; 32] } 41 | } 42 | 43 | #[inline] 44 | pub fn prefetch(&self) { 45 | let ptr = self.gpr[12] as *const usize; 46 | unsafe { 47 | prefetch(ptr); // RSP 48 | prefetch(ptr.add(8)); // RSP + 8 49 | } 50 | } 51 | } 52 | 53 | pub fn initialize_call_frame( 54 | regs: &mut Registers, 55 | fptr: InitFn, 56 | arg: usize, 57 | arg2: *mut usize, 58 | stack: &Stack, 59 | ) { 60 | // Callee-saved registers start at x19 61 | const X19: usize = 19 - 19; 62 | const X20: usize = 20 - 19; 63 | const X21: usize = 21 - 19; 64 | 65 | const FP: usize = 29 - 19; 66 | const LR: usize = 30 - 19; 67 | const SP: usize = 31 - 19; 68 | 69 | let sp = align_down(stack.end()); 70 | 71 | // These registers are frobbed by bootstrap_green_task into the right 72 | // location so we can invoke the "real init function", `fptr`. 73 | regs.gpr[X19] = arg; 74 | regs.gpr[X20] = arg2 as usize; 75 | regs.gpr[X21] = fptr as usize; 76 | 77 | // Aarch64 current stack frame pointer 78 | regs.gpr[FP] = sp as usize; 79 | 80 | regs.gpr[LR] = bootstrap_green_task as usize; 81 | 82 | // setup the init stack 83 | // this is prepared for the swap context 84 | regs.gpr[SP] = sp as usize; 85 | } 86 | -------------------------------------------------------------------------------- /src/detail/arm_unix.rs: -------------------------------------------------------------------------------- 1 | use crate::detail::{align_down, gen}; 2 | use crate::stack::Stack; 3 | 4 | // first argument is task handle, second is thunk ptr 5 | pub type InitFn = extern "aapcs" fn(usize, *mut usize) -> !; 6 | 7 | pub extern "aapcs" fn gen_init(a1: usize, a2: *mut usize) -> ! { 8 | gen::gen_init_impl(a1, a2) 9 | } 10 | 11 | std::arch::global_asm!(include_str!("asm/asm_arm_aapcs_elf.S")); 12 | 13 | extern "aapcs" { 14 | pub fn bootstrap_green_task(); 15 | pub fn prefetch(data: *const usize); 16 | pub fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers); 17 | } 18 | 19 | #[repr(C)] 20 | #[derive(Debug)] 21 | pub struct Registers { 22 | // We save the 10 callee-saved registers: 23 | // r4~r10(v1~v7), fp (r11), lr (r14), sp 24 | // and the 16 callee-saved floating point registers: 25 | // s16~s31 26 | gpr: [usize; 32], 27 | } 28 | 29 | impl Registers { 30 | pub fn new() -> Registers { 31 | Registers { gpr: [0; 32] } 32 | } 33 | 34 | #[inline] 35 | pub fn prefetch(&self) { 36 | let ptr = self.gpr[8 /* SP */] as *const usize; 37 | unsafe { 38 | prefetch(ptr); // SP 39 | prefetch(ptr.add(1)); // SP + 4 40 | } 41 | } 42 | } 43 | 44 | pub fn initialize_call_frame( 45 | regs: &mut Registers, 46 | fptr: InitFn, 47 | arg: usize, 48 | arg2: *mut usize, 49 | stack: &Stack, 50 | ) { 51 | // Callee-saved registers start at r4 52 | const R4: usize = 4 - 4; 53 | const R5: usize = 5 - 4; 54 | const R6: usize = 6 - 4; 55 | 56 | const FP: usize = 11 - 4; // R11 57 | const SP: usize = 12 - 4; // R13 58 | const LR: usize = 13 - 4; // R14 59 | 60 | let sp = align_down(stack.end()); 61 | 62 | // These registers are frobbed by bootstrap_green_task into the right 63 | // location so we can invoke the "real init function", `fptr`. 64 | regs.gpr[R4] = arg; 65 | regs.gpr[R5] = arg2 as usize; 66 | regs.gpr[R6] = fptr as usize; 67 | 68 | // arm current stack frame pointer 69 | regs.gpr[FP] = sp as usize; 70 | 71 | regs.gpr[LR] = bootstrap_green_task as usize; 72 | 73 | // setup the init stack 74 | // this is prepared for the swap context 75 | regs.gpr[SP] = sp as usize; 76 | } 77 | -------------------------------------------------------------------------------- /src/detail/asm/asm_aarch64_aapcs_elf.S: -------------------------------------------------------------------------------- 1 | .text 2 | .globl prefetch 3 | .type prefetch,@function 4 | .align 2 5 | prefetch: 6 | prfm pldl1keep, [x0] 7 | ret 8 | .size prefetch,.-prefetch 9 | 10 | .text 11 | .globl bootstrap_green_task 12 | .type bootstrap_green_task,@function 13 | .align 2 14 | bootstrap_green_task: 15 | mov x0, x19 // arg0 16 | mov x1, x20 // arg1 17 | mov x30, #0 // clear LR 18 | ret x21 19 | .size bootstrap_green_task,.-bootstrap_green_task 20 | 21 | .text 22 | .globl swap_registers 23 | .type swap_registers,@function 24 | .align 2 25 | swap_registers: 26 | stp x19, x20, [x0, #0] 27 | stp x21, x22, [x0, #16] 28 | stp x23, x24, [x0, #32] 29 | stp x25, x26, [x0, #48] 30 | stp x27, x28, [x0, #64] 31 | stp x29, x30, [x0, #80] 32 | 33 | mov x2, sp 34 | str x2, [x0, #96] 35 | 36 | stp d8, d9, [x0, #112] 37 | stp d10, d11, [x0, #128] 38 | stp d12, d13, [x0, #144] 39 | stp d14, d15, [x0, #160] 40 | 41 | ldp x19, x20, [x1, #0] 42 | ldp x21, x22, [x1, #16] 43 | ldp x23, x24, [x1, #32] 44 | ldp x25, x26, [x1, #48] 45 | ldp x27, x28, [x1, #64] 46 | ldp x29, x30, [x1, #80] 47 | 48 | ldr x2, [x1, #96] 49 | mov sp, x2 50 | 51 | ldp d8, d9, [x1, #112] 52 | ldp d10, d11, [x1, #128] 53 | ldp d12, d13, [x1, #144] 54 | ldp d14, d15, [x1, #160] 55 | 56 | br x30 57 | .size swap_registers,.-swap_registers 58 | 59 | /* Mark that we don't need executable stack. */ 60 | .section .note.GNU-stack,"",%progbits 61 | -------------------------------------------------------------------------------- /src/detail/asm/asm_aarch64_aapcs_macho.S: -------------------------------------------------------------------------------- 1 | .text 2 | .globl _prefetch 3 | .align 2 4 | _prefetch: 5 | prfm pldl1keep, [x0] 6 | ret 7 | 8 | .text 9 | .globl _bootstrap_green_task 10 | .align 2 11 | _bootstrap_green_task: 12 | mov x0, x19 // arg0 13 | mov x1, x20 // arg1 14 | mov x30, #0 // clear LR 15 | ret x21 16 | 17 | .text 18 | .globl _swap_registers 19 | .align 2 20 | _swap_registers: 21 | stp x19, x20, [x0, #0] 22 | stp x21, x22, [x0, #16] 23 | stp x23, x24, [x0, #32] 24 | stp x25, x26, [x0, #48] 25 | stp x27, x28, [x0, #64] 26 | stp x29, x30, [x0, #80] 27 | 28 | mov x2, sp 29 | str x2, [x0, #96] 30 | 31 | stp d8, d9, [x0, #112] 32 | stp d10, d11, [x0, #128] 33 | stp d12, d13, [x0, #144] 34 | stp d14, d15, [x0, #160] 35 | 36 | ldp x19, x20, [x1, #0] 37 | ldp x21, x22, [x1, #16] 38 | ldp x23, x24, [x1, #32] 39 | ldp x25, x26, [x1, #48] 40 | ldp x27, x28, [x1, #64] 41 | ldp x29, x30, [x1, #80] 42 | 43 | ldr x2, [x1, #96] 44 | mov sp, x2 45 | 46 | ldp d8, d9, [x1, #112] 47 | ldp d10, d11, [x1, #128] 48 | ldp d12, d13, [x1, #144] 49 | ldp d14, d15, [x1, #160] 50 | 51 | br x30 52 | -------------------------------------------------------------------------------- /src/detail/asm/asm_arm_aapcs_elf.S: -------------------------------------------------------------------------------- 1 | .text 2 | .globl prefetch 3 | .type prefetch, %function 4 | .align 2 5 | prefetch: 6 | pld [r0] 7 | bx lr 8 | .size prefetch,.-prefetch 9 | 10 | .text 11 | .globl bootstrap_green_task 12 | .type bootstrap_green_task, %function 13 | .align 2 14 | bootstrap_green_task: 15 | mov r0, r4 // arg0 16 | mov r1, r5 // arg1 17 | mov lr, 0 // clear LR 18 | bx r6 19 | .size bootstrap_green_task,.-bootstrap_green_task 20 | 21 | .text 22 | .globl swap_registers 23 | .type swap_registers, %function 24 | .align 2 25 | swap_registers: 26 | // Android doesn't like to use sp directly 27 | stmia r0!, {{v1-v7, fp}} 28 | mov r2, sp 29 | stmia r0!, {{r2, lr}} 30 | vstmia r0!, {{s16-s31}} 31 | ldmia r1!, {{v1-v7, fp}} 32 | ldmia r1!, {{r2, lr}} 33 | mov sp, r2 34 | vldmia r1!, {{s16-s31}} 35 | bx lr 36 | .size swap_registers,.-swap_registers 37 | 38 | /* Mark that we don't need executable stack. */ 39 | .section .note.GNU-stack,"",%progbits 40 | -------------------------------------------------------------------------------- /src/detail/asm/asm_loongarch64_sysv_elf.S: -------------------------------------------------------------------------------- 1 | .text 2 | .globl prefetch 3 | .type prefetch,@function 4 | .align 2 5 | prefetch: 6 | preld 0, $a0, 0 7 | ret 8 | .size prefetch,.-prefetch 9 | 10 | .text 11 | .globl bootstrap_green_task 12 | .type bootstrap_green_task,@function 13 | .align 2 14 | bootstrap_green_task: 15 | move $a0, $s0 // arg0 16 | move $a1, $s1 // arg1 17 | move $ra, $zero // clear LR 18 | jirl $zero, $s2, 0 19 | .size bootstrap_green_task,.-bootstrap_green_task 20 | 21 | .text 22 | .globl swap_registers 23 | .type swap_registers,@function 24 | .align 2 25 | swap_registers: 26 | st.d $ra, $a0, 0 27 | st.d $sp, $a0, 8 28 | st.d $fp, $a0, 16 29 | st.d $s0, $a0, 24 30 | st.d $s1, $a0, 32 31 | st.d $s2, $a0, 40 32 | st.d $s3, $a0, 48 33 | st.d $s4, $a0, 56 34 | st.d $s5, $a0, 64 35 | st.d $s6, $a0, 72 36 | st.d $s7, $a0, 80 37 | st.d $s8, $a0, 88 38 | 39 | fst.d $fs0, $a0, 96 40 | fst.d $fs1, $a0, 104 41 | fst.d $fs2, $a0, 112 42 | fst.d $fs3, $a0, 120 43 | fst.d $fs4, $a0, 128 44 | fst.d $fs5, $a0, 136 45 | fst.d $fs6, $a0, 144 46 | fst.d $fs7, $a0, 152 47 | 48 | ld.d $ra, $a1, 0 49 | ld.d $sp, $a1, 8 50 | ld.d $fp, $a1, 16 51 | ld.d $s0, $a1, 24 52 | ld.d $s1, $a1, 32 53 | ld.d $s2, $a1, 40 54 | ld.d $s3, $a1, 48 55 | ld.d $s4, $a1, 56 56 | ld.d $s5, $a1, 64 57 | ld.d $s6, $a1, 72 58 | ld.d $s7, $a1, 80 59 | ld.d $s8, $a1, 88 60 | 61 | fld.d $fs0, $a1, 96 62 | fld.d $fs1, $a1, 104 63 | fld.d $fs2, $a1, 112 64 | fld.d $fs3, $a1, 120 65 | fld.d $fs4, $a1, 128 66 | fld.d $fs5, $a1, 136 67 | fld.d $fs6, $a1, 144 68 | fld.d $fs7, $a1, 152 69 | 70 | ret 71 | .size swap_registers,.-swap_registers 72 | 73 | /* Mark that we don't need executable stack. */ 74 | .section .note.GNU-stack,"",%progbits 75 | -------------------------------------------------------------------------------- /src/detail/asm/asm_ppc64le_elf.S: -------------------------------------------------------------------------------- 1 | // unfortunately the IBM assembler just uses numbers for registers 2 | // making the assembly hard to read when registers are mixed with offsets. 3 | // therefore are here some defines for readability: 4 | 5 | #pragma region defines 6 | 7 | #define r0 0 8 | #define r1 1 9 | #define fp 1 10 | #define r2 2 11 | #define r3 3 12 | #define r4 4 13 | #define r5 5 14 | #define r6 6 15 | #define r7 7 16 | #define r8 8 17 | #define r9 9 18 | #define r10 10 19 | #define r11 11 20 | #define r12 12 21 | #define r13 13 22 | #define r14 14 23 | #define r15 15 24 | #define r16 16 25 | #define r17 17 26 | #define r18 18 27 | #define r19 19 28 | #define r20 20 29 | #define r21 21 30 | #define r22 22 31 | #define r23 23 32 | #define r24 24 33 | #define r25 25 34 | #define r26 26 35 | #define r27 27 36 | #define r28 28 37 | #define r29 29 38 | #define r30 30 39 | #define r31 31 40 | 41 | // floating-point registers 42 | #define f14 14 43 | #define f15 15 44 | #define f16 16 45 | #define f17 17 46 | #define f18 18 47 | #define f19 19 48 | #define f20 20 49 | #define f21 21 50 | #define f22 22 51 | #define f23 23 52 | #define f24 24 53 | #define f25 25 54 | #define f26 26 55 | #define f27 27 56 | #define f28 28 57 | #define f29 29 58 | #define f30 30 59 | #define f31 31 60 | 61 | // vector registers 62 | #define v20 20 63 | #define v21 21 64 | #define v22 22 65 | #define v23 23 66 | #define v24 24 67 | #define v25 25 68 | #define v26 26 69 | #define v27 27 70 | #define v28 28 71 | #define v29 29 72 | #define v30 30 73 | #define v31 31 74 | 75 | #pragma endregion defines 76 | 77 | .text 78 | .globl prefetch 79 | .type prefetch,@function 80 | .align 16 81 | prefetch: 82 | addis 2,12,.TOC.-prefetch@ha 83 | addi 2,2,.TOC.-prefetch@l 84 | .localentry prefetch, .-prefetch 85 | // NOTE: dcbt prefetches data, not instructions! 86 | dcbt 0, r3 87 | blr 88 | .size prefetch,.-prefetch 89 | 90 | 91 | .text 92 | .globl bootstrap_green_task 93 | .type bootstrap_green_task,@function 94 | .align 16 95 | bootstrap_green_task: 96 | // setting parameters from loaded non-volatile regs 97 | mr r3, r14 98 | mr r4, r15 99 | 100 | mr r12, r16 // setup entrypoint since position independent code can asssume 101 | // r12 to contain its GEP address (page 61 Power ABI) 102 | 103 | mtlr r16 104 | blr 105 | 106 | .size bootstrap_green_task,.-bootstrap_green_task 107 | 108 | 109 | .text 110 | .globl swap_registers 111 | .type swap_registers,@function 112 | .align 16 113 | swap_registers: 114 | // save non-volatile registers to the buffer in Registers (r3) 115 | // load non-volatile registers from new context buffer given via r4 116 | 117 | // standard function preamble: 118 | addis 2,12,.TOC.-swap_registers@ha 119 | addi 2,2,.TOC.-swap_registers@l 120 | .localentry swap_registers, .-swap_registers 121 | 122 | // save link & control registers 123 | mflr r0 124 | std r0,0(r3) 125 | std r0, 2*8(r1) 126 | mfcr r0 127 | std r0,1*8(r3) 128 | 129 | 130 | // non-volatile registers: r1 (fp), r2 (toc), r13, r14-r31, f14-f31, v20-v31, vrsave, 131 | // arguments passed in r3-r10, stack 132 | // => previous reg list (r3), new reg list (r4) 133 | 134 | // saving non-volatile gprs: 135 | std r1, 2*8(r3) // stack pointer 136 | std r2, 3*8(r3) // TOC pointer 137 | std r12, 4*8(r3) // gloabl entrypoint address (GEP) 138 | std r14, 5*8(r3) // local vars 139 | std r15, 6*8(r3) 140 | std r16, 7*8(r3) 141 | std r17, 8*8(r3) 142 | std r18, 9*8(r3) 143 | std r19, 10*8(r3) 144 | std r20, 11*8(r3) 145 | std r21, 12*8(r3) 146 | std r22, 13*8(r3) 147 | std r23, 14*8(r3) 148 | std r24, 15*8(r3) 149 | std r25, 16*8(r3) 150 | std r26, 17*8(r3) 151 | std r27, 18*8(r3) 152 | std r28, 19*8(r3) 153 | std r29, 20*8(r3) 154 | std r30, 21*8(r3) 155 | std r31, 22*8(r3) // end local vars 156 | 157 | // save non-volatile floating point registers 158 | addi r3, r3, 32*8 // start of fp array 159 | stfd f14, 0*8(r3) // local vars (floating point) 160 | stfd f15, 1*8(r3) 161 | stfd f16, 2*8(r3) 162 | stfd f17, 3*8(r3) 163 | stfd f18, 4*8(r3) 164 | stfd f19, 5*8(r3) 165 | stfd f20, 6*8(r3) 166 | stfd f21, 7*8(r3) 167 | stfd f22, 8*8(r3) 168 | stfd f23, 9*8(r3) 169 | stfd f24, 10*8(r3) 170 | stfd f25, 11*8(r3) 171 | stfd f26, 12*8(r3) 172 | stfd f27, 13*8(r3) 173 | stfd f28, 14*8(r3) 174 | stfd f29, 15*8(r3) 175 | stfd f30, 16*8(r3) 176 | stfd f31, 17*8(r3) // end local vars (fp) 177 | 178 | // and finally the vector registers 179 | addi r3, r3, 18*8 // start of vr area 180 | li r6, 0 181 | 182 | stvx v20, r3, r6 // start of vr saving 183 | addi r6, r6, 16 184 | stvx v21, r3, r6 185 | addi r6, r6, 16 186 | stvx v22, r3, r6 187 | addi r6, r6, 16 188 | stvx v23, r3, r6 189 | addi r6, r6, 16 190 | stvx v24, r3, r6 191 | addi r6, r6, 16 192 | stvx v25, r3, r6 193 | addi r6, r6, 16 194 | stvx v26, r3, r6 195 | addi r6, r6, 16 196 | stvx v27, r3, r6 197 | addi r6, r6, 16 198 | stvx v28, r3, r6 199 | addi r6, r6, 16 200 | stvx v29, r3, r6 201 | addi r6, r6, 16 202 | stvx v30, r3, r6 203 | addi r6, r6, 16 204 | stvx v31, r3, r6 // end of vr saving 205 | 206 | // begin restoration 207 | 208 | // restore floating point registers 209 | mr r5, r4 210 | addi r4, r4, 32*8 // start of fp array 211 | lfd f14, 0*8(r4) // start of fp restore 212 | lfd f15, 1*8(r4) 213 | lfd f16, 2*8(r4) 214 | lfd f17, 3*8(r4) 215 | lfd f18, 4*8(r4) 216 | lfd f19, 5*8(r4) 217 | lfd f20, 6*8(r4) 218 | lfd f21, 7*8(r4) 219 | lfd f22, 8*8(r4) 220 | lfd f23, 9*8(r4) 221 | lfd f24, 10*8(r4) 222 | lfd f25, 11*8(r4) 223 | lfd f26, 12*8(r4) 224 | lfd f27, 13*8(r4) 225 | lfd f28, 14*8(r4) 226 | lfd f29, 15*8(r4) 227 | lfd f30, 16*8(r4) 228 | lfd f31, 17*8(r4) // end of fp restore 229 | 230 | // restore vector registers 231 | addi r4, r4, 18*8 // start of vr array 232 | li r6, 0 233 | 234 | lvx v20, r4, r6 // start of vr restore 235 | addi r6, r6, 16 236 | lvx v21, r4, r6 237 | addi r6, r6, 16 238 | lvx v22, r4, r6 239 | addi r6, r6, 16 240 | lvx v23, r4, r6 241 | addi r6, r6, 16 242 | lvx v24, r4, r6 243 | addi r6, r6, 16 244 | lvx v25, r4, r6 245 | addi r6, r6, 16 246 | lvx v26, r4, r6 247 | addi r6, r6, 16 248 | lvx v27, r4, r6 249 | addi r6, r6, 16 250 | lvx v28, r4, r6 251 | addi r6, r6, 16 252 | lvx v29, r4, r6 253 | addi r6, r6, 16 254 | lvx v30, r4, r6 255 | addi r6, r6, 16 256 | lvx v31, r4, r6 // end of vr restore 257 | 258 | // restore gpr registers 259 | mr r4, r5 260 | ld r1, 2*8(r4) // start of gpr restore 261 | ld r2, 3*8(r4) 262 | ld r12, 4*8(r4) 263 | ld r14, 5*8(r4) 264 | ld r15, 6*8(r4) 265 | ld r16, 7*8(r4) 266 | ld r17, 8*8(r4) 267 | ld r18, 9*8(r4) 268 | ld r19, 10*8(r4) 269 | ld r20, 11*8(r4) 270 | ld r21, 12*8(r4) 271 | ld r22, 13*8(r4) 272 | ld r23, 14*8(r4) 273 | ld r24, 15*8(r4) 274 | ld r25, 16*8(r4) 275 | ld r26, 17*8(r4) 276 | ld r27, 18*8(r4) 277 | ld r28, 19*8(r4) 278 | ld r29, 20*8(r4) 279 | ld r30, 21*8(r4) 280 | ld r31, 22*8(r4) // end of gpr restore 281 | 282 | // restore/load lr and ctr registers 283 | ld r0, 8(r4) 284 | mtcr r0 285 | ld r0, 0(r4) // new link register 286 | mtlr r0 287 | 288 | blr 289 | 290 | .size swap_registers,.-swap_registers 291 | 292 | /* Mark that we don't need executable stack. */ 293 | .section .note.GNU-stack,"",%progbits -------------------------------------------------------------------------------- /src/detail/asm/asm_riscv64_c_elf.S: -------------------------------------------------------------------------------- 1 | .attribute arch, "rv64gc" 2 | 3 | .text 4 | .globl prefetch 5 | .type prefetch,@function 6 | .align 1 7 | prefetch: 8 | ret 9 | .size prefetch,.-prefetch 10 | 11 | .text 12 | .globl bootstrap_green_task 13 | .type bootstrap_green_task,@function 14 | .align 1 15 | bootstrap_green_task: 16 | mv a0, s2 // arg0 17 | mv a1, s3 // arg1 18 | mv ra, zero // clear RA 19 | jr s4 20 | .size bootstrap_green_task,.-bootstrap_green_task 21 | 22 | .text 23 | .globl swap_registers 24 | .type swap_registers,@function 25 | .align 1 26 | swap_registers: 27 | sd s2, 0*8(a0) 28 | sd s3, 1*8(a0) 29 | sd s4, 2*8(a0) 30 | sd s5, 3*8(a0) 31 | sd s6, 4*8(a0) 32 | sd s7, 5*8(a0) 33 | sd s8, 6*8(a0) 34 | sd s9, 7*8(a0) 35 | sd s10, 8*8(a0) 36 | sd s11, 9*8(a0) 37 | sd fp, 10*8(a0) 38 | sd s1, 11*8(a0) 39 | sd sp, 12*8(a0) 40 | sd ra, 13*8(a0) 41 | fsd fs0, 14*8(a0) 42 | fsd fs1, 15*8(a0) 43 | fsd fs2, 16*8(a0) 44 | fsd fs3, 17*8(a0) 45 | fsd fs4, 18*8(a0) 46 | fsd fs5, 19*8(a0) 47 | fsd fs6, 20*8(a0) 48 | fsd fs7, 21*8(a0) 49 | fsd fs8, 22*8(a0) 50 | fsd fs9, 23*8(a0) 51 | fsd fs10, 24*8(a0) 52 | fsd fs11, 25*8(a0) 53 | ld s2, 0*8(a1) 54 | ld s3, 1*8(a1) 55 | ld s4, 2*8(a1) 56 | ld s5, 3*8(a1) 57 | ld s6, 4*8(a1) 58 | ld s7, 5*8(a1) 59 | ld s8, 6*8(a1) 60 | ld s9, 7*8(a1) 61 | ld s10, 8*8(a1) 62 | ld s11, 9*8(a1) 63 | ld fp, 10*8(a1) 64 | ld s1, 11*8(a1) 65 | ld sp, 12*8(a1) 66 | ld ra, 13*8(a1) 67 | fld fs0, 14*8(a1) 68 | fld fs1, 15*8(a1) 69 | fld fs2, 16*8(a1) 70 | fld fs3, 17*8(a1) 71 | fld fs4, 18*8(a1) 72 | fld fs5, 19*8(a1) 73 | fld fs6, 20*8(a1) 74 | fld fs7, 21*8(a1) 75 | fld fs8, 22*8(a1) 76 | fld fs9, 23*8(a1) 77 | fld fs10, 24*8(a1) 78 | fld fs11, 25*8(a1) 79 | jr ra 80 | .size swap_registers,.-swap_registers 81 | 82 | /* Mark that we don't need executable stack. */ 83 | .section .note.GNU-stack,"",%progbits 84 | -------------------------------------------------------------------------------- /src/detail/asm/asm_x86_64_sysv_elf.S: -------------------------------------------------------------------------------- 1 | .text 2 | .globl prefetch 3 | .type prefetch,@function 4 | .align 16 5 | prefetch: 6 | prefetcht2 [rdi] 7 | ret 8 | .size prefetch,.-prefetch 9 | 10 | .text 11 | .globl bootstrap_green_task 12 | .type bootstrap_green_task,@function 13 | .align 16 14 | bootstrap_green_task: 15 | mov rdi, r12 /* setup the function arg */ 16 | mov rsi, r13 /* setup the function arg */ 17 | and rsp, -16 /* align the stack pointer */ 18 | mov [rsp], r14 /* this is the new return adrress */ 19 | ret 20 | .size bootstrap_green_task,.-bootstrap_green_task 21 | 22 | .text 23 | .globl swap_registers 24 | .type swap_registers,@function 25 | .align 16 26 | swap_registers: 27 | mov [rdi + 0*8], rbx 28 | mov [rdi + 1*8], rsp 29 | mov [rdi + 2*8], rbp 30 | mov [rdi + 4*8], r12 31 | mov [rdi + 5*8], r13 32 | mov [rdi + 6*8], r14 33 | mov [rdi + 7*8], r15 34 | mov rbx, [rsi + 0*8] 35 | mov rsp, [rsi + 1*8] 36 | mov rbp, [rsi + 2*8] 37 | mov r12, [rsi + 4*8] 38 | mov r13, [rsi + 5*8] 39 | mov r14, [rsi + 6*8] 40 | mov r15, [rsi + 7*8] 41 | pop rax 42 | jmp rax 43 | 44 | .size swap_registers,.-swap_registers 45 | 46 | /* Mark that we don't need executable stack. */ 47 | .section .note.GNU-stack,"",%progbits 48 | -------------------------------------------------------------------------------- /src/detail/asm/asm_x86_64_sysv_macho.S: -------------------------------------------------------------------------------- 1 | .text 2 | .globl _prefetch 3 | .align 8 4 | _prefetch: 5 | prefetcht2 [rdi] 6 | ret 7 | 8 | .text 9 | .globl _bootstrap_green_task 10 | .align 8 11 | _bootstrap_green_task: 12 | mov rdi, r12 /* setup the function arg */ 13 | mov rsi, r13 /* setup the function arg */ 14 | and rsp, -16 /* align the stack pointer */ 15 | mov [rsp], r14 /* this is the new return adrress */ 16 | ret 17 | 18 | .text 19 | .globl _swap_registers 20 | .align 8 21 | _swap_registers: 22 | mov [rdi + 0*8], rbx 23 | mov [rdi + 1*8], rsp 24 | mov [rdi + 2*8], rbp 25 | mov [rdi + 4*8], r12 26 | mov [rdi + 5*8], r13 27 | mov [rdi + 6*8], r14 28 | mov [rdi + 7*8], r15 29 | mov rbx, [rsi + 0*8] 30 | mov rsp, [rsi + 1*8] 31 | mov rbp, [rsi + 2*8] 32 | mov r12, [rsi + 4*8] 33 | mov r13, [rsi + 5*8] 34 | mov r14, [rsi + 6*8] 35 | mov r15, [rsi + 7*8] 36 | pop rax 37 | jmp rax 38 | -------------------------------------------------------------------------------- /src/detail/asm/asm_x86_64_sysv_pe.S: -------------------------------------------------------------------------------- 1 | .file "asm_x86_64_sysv_pe.S" 2 | .text 3 | .p2align 4,,15 4 | .globl prefetch_asm 5 | .def prefetch_asm; .scl 2; .type 32; .endef 6 | .seh_proc prefetch_asm 7 | prefetch_asm: 8 | .seh_endprologue 9 | prefetcht2 [rdi] 10 | ret 11 | .seh_endproc 12 | 13 | .section .drectve 14 | .ascii " -export:\"prefetch_asm\"" 15 | 16 | .text 17 | .p2align 4,,15 18 | .globl bootstrap_green_task 19 | .def bootstrap_green_task; .scl 2; .type 32; .endef 20 | .seh_proc bootstrap_green_task 21 | bootstrap_green_task: 22 | .seh_endprologue 23 | mov rdi, r12 /* setup the function arg */ 24 | mov rsi, r13 /* setup the function arg */ 25 | and rsp, -16 /* align the stack pointer */ 26 | mov [rsp], r14 /* this is the new return adrress */ 27 | ret 28 | .seh_endproc 29 | 30 | .section .drectve 31 | .ascii " -export:\"bootstrap_green_task\"" 32 | 33 | .text 34 | .p2align 4,,15 35 | .globl swap_registers 36 | .def swap_registers; .scl 2; .type 32; .endef 37 | .seh_proc swap_registers 38 | swap_registers: 39 | .seh_endprologue 40 | mov [rdi + 0*8], rbx 41 | mov [rdi + 1*8], rsp 42 | mov [rdi + 2*8], rbp 43 | mov [rdi + 4*8], r12 44 | mov [rdi + 5*8], r13 45 | mov [rdi + 6*8], r14 46 | mov [rdi + 7*8], r15 47 | mov rbx, [rsi + 0*8] 48 | mov rsp, [rsi + 1*8] 49 | mov rbp, [rsi + 2*8] 50 | mov r12, [rsi + 4*8] 51 | mov r13, [rsi + 5*8] 52 | mov r14, [rsi + 6*8] 53 | mov r15, [rsi + 7*8] 54 | 55 | /* load NT_TIB */ 56 | mov r10, gs:[0x30] 57 | /* save current stack base */ 58 | mov rax, [r10 + 0x08] 59 | mov [rdi + 11*8], rax 60 | /* save current stack limit */ 61 | mov rax, [r10 + 0x10] 62 | mov [rdi + 12*8], rax 63 | /* save current deallocation stack */ 64 | mov rax, [r10 + 0x1478] 65 | mov [rdi + 13*8], rax 66 | /* save fiber local storage */ 67 | /* movq 0x18(%r10), %rax */ 68 | /* mov %rax, (14*8)(%rcx) */ 69 | 70 | /* mov %rcx, (3*8)(%rcx) */ 71 | 72 | /* restore fiber local storage */ 73 | /* mov (14*8)(%rdx), %rax */ 74 | /* movq %rax, 0x18(%r10) */ 75 | /* restore deallocation stack */ 76 | mov rax, [rsi + 13*8] 77 | mov [r10 + 0x1478], rax 78 | /* restore stack limit */ 79 | mov rax, [rsi + 12*8] 80 | mov [r10 + 0x10], rax 81 | /* restore stack base */ 82 | mov rax, [rsi + 11*8] 83 | mov [r10 + 0x08], rax 84 | 85 | /* mov (3*8)(%rdx), %rcx */ 86 | 87 | pop rax 88 | jmp rax 89 | .seh_endproc 90 | 91 | .section .drectve 92 | .ascii " -export:\"swap_registers\"" 93 | -------------------------------------------------------------------------------- /src/detail/gen.rs: -------------------------------------------------------------------------------- 1 | use crate::rt::ContextStack; 2 | use crate::stack::{overflow, Func}; 3 | use crate::yield_::yield_now; 4 | use crate::Error; 5 | use std::any::Any; 6 | use std::panic; 7 | 8 | /// don't print panic info for Done/Cancel 9 | fn catch_unwind_filter R + panic::UnwindSafe, R>(f: F) -> std::thread::Result { 10 | use std::sync::Once; 11 | static INIT: Once = Once::new(); 12 | INIT.call_once(|| { 13 | let prev_hook = panic::take_hook(); 14 | panic::set_hook(Box::new(move |info| { 15 | // this is not an error at all, ignore it 16 | if let Some(Error::Cancel | Error::Done) = info.payload().downcast_ref::() { 17 | return; 18 | } 19 | prev_hook(info); 20 | })); 21 | }); 22 | 23 | panic::catch_unwind(f) 24 | } 25 | 26 | /// the init function passed to reg_context 27 | #[inline] 28 | pub fn gen_init_impl(_: usize, f: *mut usize) -> ! { 29 | overflow::init_once(); 30 | 31 | let clo = move || { 32 | // consume self.f 33 | let f: &mut Option = unsafe { &mut *(f as *mut _) }; 34 | let func = f.take().unwrap(); 35 | func.call_once(); 36 | }; 37 | 38 | fn check_err(cause: Box) { 39 | // this is not an error at all, ignore it 40 | if let Some(Error::Cancel | Error::Done) = cause.downcast_ref::() { 41 | return; 42 | } 43 | 44 | error!("set panic inside generator"); 45 | ContextStack::current().top().err = Some(cause); 46 | } 47 | 48 | // we can't panic inside the generator context 49 | // need to propagate the panic to the main thread 50 | if let Err(cause) = catch_unwind_filter(clo) { 51 | check_err(cause); 52 | } 53 | 54 | yield_now(); 55 | 56 | unreachable!("Should never come back"); 57 | } 58 | -------------------------------------------------------------------------------- /src/detail/loongarch64_unix.rs: -------------------------------------------------------------------------------- 1 | use crate::detail::align_down; 2 | use crate::stack::Stack; 3 | 4 | std::arch::global_asm!(include_str!("asm/asm_loongarch64_sysv_elf.S")); 5 | 6 | // first argument is task handle, second is thunk ptr 7 | pub type InitFn = extern "C" fn(usize, *mut usize) -> !; 8 | 9 | pub extern "C" fn gen_init(a1: usize, a2: *mut usize) -> ! { 10 | super::gen::gen_init_impl(a1, a2) 11 | } 12 | 13 | //#[link(name = "asm", kind = "static")] 14 | extern "C" { 15 | pub fn bootstrap_green_task(); 16 | pub fn prefetch(data: *const usize); 17 | pub fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers); 18 | } 19 | 20 | #[repr(C, align(16))] 21 | #[derive(Debug)] 22 | pub struct Registers { 23 | // We save the 12 callee-saved registers: 24 | // 0: ra 25 | // 1: sp 26 | // 2: fp 27 | // 3: s0 28 | // 4: s1 29 | // 5: s2 30 | // 6: s3 31 | // 7: s4 32 | // 8: s5 33 | // 9: s6 34 | // 10: s7 35 | // 11: s8 36 | // and the 8 callee-saved floating point registers: 37 | // 12: fs0 38 | // 13: fs1 39 | // 14: fs2 40 | // 15: fs3 41 | // 16: fs4 42 | // 17: fs5 43 | // 18: fs6 44 | // 19: fs7 45 | gpr: [usize; 20], 46 | } 47 | 48 | impl Registers { 49 | pub fn new() -> Registers { 50 | Registers { gpr: [0; 20] } 51 | } 52 | 53 | #[inline] 54 | pub fn prefetch(&self) { 55 | let ptr = self.gpr[1] as *const usize; 56 | unsafe { 57 | prefetch(ptr); // SP 58 | prefetch(ptr.add(8)); // SP + 8 59 | } 60 | } 61 | } 62 | 63 | pub fn initialize_call_frame( 64 | regs: &mut Registers, 65 | fptr: InitFn, 66 | arg: usize, 67 | arg2: *mut usize, 68 | stack: &Stack, 69 | ) { 70 | const RA: usize = 0; 71 | const SP: usize = 1; 72 | const FP: usize = 2; 73 | const S0: usize = 3; 74 | const S1: usize = 4; 75 | const S2: usize = 5; 76 | 77 | let sp = align_down(stack.end()); 78 | 79 | // These registers are frobbed by bootstrap_green_task into the right 80 | // location so we can invoke the "real init function", `fptr`. 81 | regs.gpr[S0] = arg; 82 | regs.gpr[S1] = arg2 as usize; 83 | regs.gpr[S2] = fptr as usize; 84 | 85 | // LoongArch64 current stack frame pointer 86 | regs.gpr[FP] = sp as usize; 87 | 88 | regs.gpr[RA] = bootstrap_green_task as usize; 89 | 90 | // setup the init stack 91 | // this is prepared for the swap context 92 | regs.gpr[SP] = sp as usize; 93 | } 94 | -------------------------------------------------------------------------------- /src/detail/mod.rs: -------------------------------------------------------------------------------- 1 | // Register contexts used in various architectures 2 | // 3 | // These structures all represent a context of one task throughout its 4 | // execution. Each struct is a representation of the architecture's register 5 | // set. When swapping between tasks, these register sets are used to save off 6 | // the current registers into one struct, and load them all from another. 7 | // 8 | // Note that this is only used for context switching, which means that some of 9 | // the registers may go unused. For example, for architectures with 10 | // callee/caller saved registers, the context will only reflect the callee-saved 11 | // registers. This is because the caller saved registers are already stored 12 | // elsewhere on the stack (if it was necessary anyway). 13 | // 14 | // Additionally, there may be fields on various architectures which are unused 15 | // entirely because they only reflect what is theoretically possible for a 16 | // "complete register set" to show, but user-space cannot alter these registers. 17 | // An example of this would be the segment selectors for x86. 18 | // 19 | // These structures/functions are roughly in-sync with the source files inside 20 | // of src/rt/arch/$arch. The only currently used function from those folders is 21 | // the `rust_swap_registers` function, but that's only because for now segmented 22 | // stacks are disabled. 23 | 24 | #[cfg_attr(all(unix, target_arch = "aarch64"), path = "aarch64_unix.rs")] 25 | #[cfg_attr(all(unix, target_arch = "arm"), path = "arm_unix.rs")] 26 | #[cfg_attr(all(unix, target_arch = "x86_64"), path = "x86_64_unix.rs")] 27 | #[cfg_attr(all(windows, target_arch = "x86_64"), path = "x86_64_windows.rs")] 28 | #[cfg_attr(all(windows, target_arch = "aarch64"), path = "aarch64_windows.rs")] 29 | #[cfg_attr(all(unix, target_arch = "loongarch64"), path = "loongarch64_unix.rs")] 30 | #[cfg_attr(all(unix, target_arch = "riscv64"), path = "riscv64_unix.rs")] 31 | #[cfg_attr(all(unix, target_arch = "powerpc64"), path = "ppc64le_unix.rs")] 32 | pub mod asm; 33 | 34 | mod gen; 35 | 36 | pub use self::asm::{gen_init, initialize_call_frame, swap_registers, InitFn, Registers}; 37 | 38 | #[inline] 39 | fn align_down(sp: *mut usize) -> *mut usize { 40 | let sp = (sp as usize) & !(16 - 1); 41 | sp as *mut usize 42 | } 43 | 44 | // ptr::mut_offset is positive isize only 45 | #[inline] 46 | #[allow(dead_code)] 47 | fn mut_offset(ptr: *mut T, count: isize) -> *mut T { 48 | // use std::mem::size_of; 49 | // (ptr as isize + count * (size_of::() as isize)) as *mut T 50 | unsafe { ptr.offset(count) } 51 | } 52 | -------------------------------------------------------------------------------- /src/detail/ppc64le_unix.rs: -------------------------------------------------------------------------------- 1 | use crate::detail::{align_down, mut_offset}; 2 | use crate::stack::Stack; 3 | 4 | // first argument is task handle, second is thunk ptr 5 | pub type InitFn = extern "C" fn(usize, *mut usize) -> !; 6 | 7 | pub extern "C" fn gen_init(a1: usize, a2: *mut usize) -> ! { 8 | super::gen::gen_init_impl(a1, a2); 9 | } 10 | 11 | extern "C" { 12 | pub fn bootstrap_green_task(); 13 | pub fn prefetch(data: *const usize); 14 | #[allow(improper_ctypes)] // allow declaring u128 in Registers (since f128 is not stable yet) 15 | pub fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers); 16 | } 17 | 18 | #[repr(C)] 19 | #[derive(Debug)] 20 | #[allow(improper_ctypes)] 21 | pub struct Registers { 22 | // array containing all non-volatile registers. in order: 23 | // 0: lr 24 | // 1: cr 25 | // 2: fp 26 | // 3: toc (r2) 27 | // 4: r12 28 | // 5-22: r14-r31 29 | // we use r14 and r15 to store the parameters when initialising a call frame. 30 | // r16 is used to pass the entry point addres (GEP) of the bootstrap function. 31 | // similar to the x86_64 implementation 32 | gpr: [usize; 32], 33 | 34 | // all non-volatile floating point registers (14-31) 35 | fp: [f64; 18], 36 | 37 | // all non-volatile vector registers (128Bit, 20-31) 38 | vr: [u128; 12], // f128 is not stable on ppc64le in rust 39 | // and since these are never accessed in rust, just use u128 40 | // to allocate the required memory. 41 | } 42 | 43 | // register indices: 44 | const REG_LR: usize = 0; 45 | // const REG_CR: usize = 1; 46 | const REG_FP: usize = 2; 47 | // const REG_TOC: usize = 3; 48 | const REG_GLOB_ENTRY: usize = 4; 49 | const REG_R14: usize = 5; // used to pass parameters on initialisation 50 | const REG_R15: usize = 6; // used to pass parameters on initialistaion 51 | const REG_R16: usize = 7; // used to pass parameters on initialisation 52 | 53 | impl Registers { 54 | pub fn new() -> Self { 55 | Self { 56 | gpr: [0; 32], 57 | fp: [0.0; 18], 58 | vr: [0; 12], 59 | } 60 | } 61 | 62 | pub fn prefetch(&self) { 63 | unsafe { 64 | prefetch(&self.gpr[0]); 65 | } 66 | } 67 | } 68 | 69 | pub fn initialize_call_frame( 70 | regs: &mut Registers, 71 | fptr: InitFn, 72 | arg: usize, 73 | arg2: *mut usize, 74 | stack: &Stack, 75 | ) { 76 | // stack grows towards lower addresses (downward) 77 | let end = stack.end(); 78 | let sp = align_down(end); 79 | let sp = mut_offset(sp, -2); // allow for back chain and CR save word 80 | 81 | regs.gpr[REG_FP] = sp as usize; 82 | 83 | // parameters passed in non-volatile regs and moved to ABI regs in bootstrap_green_task 84 | regs.gpr[REG_R14] = arg; 85 | regs.gpr[REG_R15] = arg2 as usize; 86 | regs.gpr[REG_R16] = fptr as usize; 87 | 88 | regs.gpr[REG_LR] = bootstrap_green_task as usize; 89 | regs.gpr[REG_GLOB_ENTRY] = bootstrap_green_task as usize; 90 | } 91 | -------------------------------------------------------------------------------- /src/detail/riscv64_unix.rs: -------------------------------------------------------------------------------- 1 | use crate::detail::{align_down, gen}; 2 | use crate::stack::Stack; 3 | 4 | // first argument is task handle, second is thunk ptr 5 | pub type InitFn = extern "C" fn(usize, *mut usize) -> !; 6 | 7 | pub extern "C" fn gen_init(a1: usize, a2: *mut usize) -> ! { 8 | gen::gen_init_impl(a1, a2) 9 | } 10 | 11 | std::arch::global_asm!(include_str!("asm/asm_riscv64_c_elf.S")); 12 | 13 | extern "C" { 14 | pub fn bootstrap_green_task(); 15 | pub fn prefetch(data: *const usize); 16 | pub fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers); 17 | } 18 | 19 | #[repr(C)] 20 | #[derive(Debug)] 21 | pub struct Registers { 22 | // We save the 13 callee-saved registers: 23 | // x18~x27(s2~s11), fp (s0), s1, sp, ra 24 | // and the 12 callee-saved floating point registers: 25 | // f8~f9(fs0~fs1), f18~f27(fs2~fs11) 26 | gpr: [usize; 32], 27 | } 28 | 29 | impl Registers { 30 | pub fn new() -> Registers { 31 | Registers { gpr: [0; 32] } 32 | } 33 | 34 | #[inline] 35 | pub fn prefetch(&self) { 36 | let ptr = self.gpr[12] as *const usize; 37 | unsafe { 38 | prefetch(ptr); // SP 39 | prefetch(ptr.add(1)); // SP + 8 40 | } 41 | } 42 | } 43 | 44 | pub fn initialize_call_frame( 45 | regs: &mut Registers, 46 | fptr: InitFn, 47 | arg: usize, 48 | arg2: *mut usize, 49 | stack: &Stack, 50 | ) { 51 | const S2: usize = 18 - 18; 52 | const S3: usize = 19 - 18; 53 | const S4: usize = 20 - 18; 54 | 55 | const FP: usize = 28 - 18; // S0 56 | const S1: usize = 29 - 18; 57 | const SP: usize = 30 - 18; 58 | const RA: usize = 31 - 18; 59 | 60 | let sp = align_down(stack.end()); 61 | 62 | // These registers are frobbed by bootstrap_green_task into the right 63 | // location so we can invoke the "real init function", `fptr`. 64 | regs.gpr[S2] = arg; 65 | regs.gpr[S3] = arg2 as usize; 66 | regs.gpr[S4] = fptr as usize; 67 | 68 | regs.gpr[FP] = sp as usize; 69 | regs.gpr[S1] = 0; 70 | regs.gpr[SP] = sp as usize; 71 | regs.gpr[RA] = bootstrap_green_task as usize; 72 | } 73 | -------------------------------------------------------------------------------- /src/detail/x86_64_unix.rs: -------------------------------------------------------------------------------- 1 | use crate::detail::{align_down, mut_offset}; 2 | use crate::stack::Stack; 3 | 4 | // first argument is task handle, second is thunk ptr 5 | pub type InitFn = extern "sysv64" fn(usize, *mut usize) -> !; 6 | 7 | pub extern "sysv64" fn gen_init(a1: usize, a2: *mut usize) -> ! { 8 | super::gen::gen_init_impl(a1, a2) 9 | } 10 | 11 | cfg_if::cfg_if! { 12 | if #[cfg(target_os = "macos")] { 13 | std::arch::global_asm!(include_str!("asm/asm_x86_64_sysv_macho.S")); 14 | } else { 15 | std::arch::global_asm!(include_str!("asm/asm_x86_64_sysv_elf.S")); 16 | } 17 | } 18 | 19 | // #[cfg(not(nightly))] 20 | //#[link(name = "asm", kind = "static")] 21 | extern "sysv64" { 22 | pub fn bootstrap_green_task(); 23 | pub fn prefetch(data: *const usize); 24 | pub fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers); 25 | } 26 | 27 | /* 28 | #[cfg(nightly)] 29 | mod asm_impl { 30 | use super::Registers; 31 | /// prefetch data 32 | #[inline] 33 | pub unsafe extern "C" fn prefetch(data: *const usize) { 34 | llvm_asm!( 35 | "prefetcht1 $0" 36 | : // no output 37 | : "m"(*data) 38 | : 39 | : "volatile" 40 | ); 41 | } 42 | 43 | #[naked] 44 | #[inline(never)] 45 | pub unsafe extern "C" fn bootstrap_green_task() { 46 | llvm_asm!( 47 | " 48 | mov %r12, %rdi // setup the function arg 49 | mov %r13, %rsi // setup the function arg 50 | and $$-16, %rsp // align the stack pointer 51 | mov %r14, (%rsp) // this is the new return address 52 | " 53 | : // no output 54 | : // no input 55 | : "memory" 56 | : "volatile" 57 | ); 58 | } 59 | 60 | #[naked] 61 | #[inline(never)] 62 | pub unsafe extern "C" fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers) { 63 | // The first argument is in %rdi, and the second one is in %rsi 64 | llvm_asm!( 65 | "" 66 | : 67 | : "{rdi}"(out_regs), "{rsi}"(in_regs) 68 | : 69 | : 70 | ); 71 | 72 | // introduce this function to workaround rustc bug! (#6) 73 | #[naked] 74 | unsafe extern "C" fn _swap_reg() { 75 | // Save registers 76 | llvm_asm!( 77 | " 78 | mov %rbx, (0*8)(%rdi) 79 | mov %rsp, (1*8)(%rdi) 80 | mov %rbp, (2*8)(%rdi) 81 | mov %r12, (4*8)(%rdi) 82 | mov %r13, (5*8)(%rdi) 83 | mov %r14, (6*8)(%rdi) 84 | mov %r15, (7*8)(%rdi) 85 | 86 | mov (0*8)(%rsi), %rbx 87 | mov (1*8)(%rsi), %rsp 88 | mov (2*8)(%rsi), %rbp 89 | mov (4*8)(%rsi), %r12 90 | mov (5*8)(%rsi), %r13 91 | mov (6*8)(%rsi), %r14 92 | mov (7*8)(%rsi), %r15 93 | " 94 | : 95 | : //"{rdi}"(out_regs), "{rsi}"(in_regs) 96 | : "memory" 97 | : "volatile" 98 | ); 99 | } 100 | 101 | _swap_reg() 102 | } 103 | } 104 | #[cfg(nightly)] 105 | pub use self::asm_impl::*; 106 | */ 107 | 108 | #[repr(C)] 109 | #[derive(Debug)] 110 | pub struct Registers { 111 | gpr: [usize; 8], 112 | } 113 | 114 | impl Registers { 115 | pub fn new() -> Registers { 116 | Registers { gpr: [0; 8] } 117 | } 118 | 119 | #[inline] 120 | pub fn prefetch(&self) { 121 | let ptr = self.gpr[1] as *const usize; 122 | unsafe { 123 | prefetch(ptr); // RSP 124 | prefetch(ptr.add(8)); // RSP + 8 125 | } 126 | } 127 | } 128 | 129 | pub fn initialize_call_frame( 130 | regs: &mut Registers, 131 | fptr: InitFn, 132 | arg: usize, 133 | arg2: *mut usize, 134 | stack: &Stack, 135 | ) { 136 | // Redefinitions from rt/arch/x86_64/regs.h 137 | const RUSTRT_RSP: usize = 1; 138 | const RUSTRT_RBP: usize = 2; 139 | const RUSTRT_R12: usize = 4; 140 | const RUSTRT_R13: usize = 5; 141 | const RUSTRT_R14: usize = 6; 142 | 143 | let sp = align_down(stack.end()); 144 | 145 | // These registers are frobbed by bootstrap_green_task into the right 146 | // location so we can invoke the "real init function", `fptr`. 147 | regs.gpr[RUSTRT_R12] = arg; 148 | regs.gpr[RUSTRT_R13] = arg2 as usize; 149 | regs.gpr[RUSTRT_R14] = fptr as usize; 150 | 151 | // Last base pointer on the stack should be 0 152 | regs.gpr[RUSTRT_RBP] = 0; 153 | 154 | // setup the init stack 155 | // this is prepared for the swap context 156 | regs.gpr[RUSTRT_RSP] = mut_offset(sp, -2) as usize; 157 | 158 | unsafe { 159 | // leave enough space for RET 160 | *mut_offset(sp, -2) = bootstrap_green_task as usize; 161 | *mut_offset(sp, -1) = 0; 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /src/detail/x86_64_windows.rs: -------------------------------------------------------------------------------- 1 | use crate::detail::{align_down, mut_offset}; 2 | use crate::stack::Stack; 3 | 4 | // first argument is task handle, second is thunk ptr 5 | pub type InitFn = extern "sysv64" fn(usize, *mut usize) -> !; 6 | 7 | pub extern "sysv64" fn gen_init(a1: usize, a2: *mut usize) -> ! { 8 | super::gen::gen_init_impl(a1, a2) 9 | } 10 | 11 | std::arch::global_asm!(include_str!("asm/asm_x86_64_sysv_pe.S")); 12 | 13 | // #[cfg(not(nightly))] 14 | //#[link(name = "asm", kind = "static")] 15 | extern "sysv64" { 16 | pub fn bootstrap_green_task(); 17 | pub fn prefetch_asm(data: *const usize); 18 | pub fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers); 19 | } 20 | 21 | #[inline] 22 | pub fn prefetch(data: *const usize) { 23 | unsafe { prefetch_asm(data) } 24 | } 25 | 26 | /* 27 | #[cfg(nightly)] 28 | mod asm_impl { 29 | use super::Registers; 30 | /// prefetch data 31 | #[inline] 32 | pub unsafe extern "C" fn prefetch_asm(data: *const usize) { 33 | llvm_asm!( 34 | "prefetcht1 $0" 35 | : // no output 36 | : "m"(*data) 37 | : 38 | : "volatile" 39 | ); 40 | } 41 | 42 | #[naked] 43 | #[inline(never)] 44 | pub unsafe extern "C" fn bootstrap_green_task() { 45 | llvm_asm!( 46 | " 47 | mov %r12, %rcx // setup the function arg 48 | mov %r13, %rdx // setup the function arg 49 | and $$-16, %rsp // align the stack pointer 50 | mov %r14, (%rsp) // this is the new return address 51 | " 52 | : // no output 53 | : // no input 54 | : "memory" 55 | : "volatile" 56 | ); 57 | } 58 | 59 | #[naked] 60 | #[inline(never)] 61 | pub unsafe extern "C" fn swap_registers(out_regs: *mut Registers, in_regs: *const Registers) { 62 | // The first argument is in %rcx, and the second one is in %rdx 63 | llvm_asm!( 64 | "" 65 | : 66 | : "{rcx}"(out_regs), "{rdx}"(in_regs) 67 | : 68 | : 69 | ); 70 | 71 | // introduce this function to workaround rustc bug! (#6) 72 | #[naked] 73 | unsafe extern "C" fn _swap_reg() { 74 | // Save registers 75 | llvm_asm!( 76 | " 77 | mov %rbx, (0*8)(%rcx) 78 | mov %rsp, (1*8)(%rcx) 79 | mov %rbp, (2*8)(%rcx) 80 | mov %r12, (4*8)(%rcx) 81 | mov %r13, (5*8)(%rcx) 82 | mov %r14, (6*8)(%rcx) 83 | mov %r15, (7*8)(%rcx) 84 | mov %rdi, (9*8)(%rcx) 85 | mov %rsi, (10*8)(%rcx) 86 | 87 | // mov %rcx, %r10 88 | // and $$0xf0, %r10b 89 | 90 | // Save non-volatile XMM registers: 91 | movapd %xmm6, (16*8)(%rcx) 92 | movapd %xmm7, (18*8)(%rcx) 93 | movapd %xmm8, (20*8)(%rcx) 94 | movapd %xmm9, (22*8)(%rcx) 95 | movapd %xmm10, (24*8)(%rcx) 96 | movapd %xmm11, (26*8)(%rcx) 97 | movapd %xmm12, (28*8)(%rcx) 98 | movapd %xmm13, (30*8)(%rcx) 99 | movapd %xmm14, (32*8)(%rcx) 100 | movapd %xmm15, (34*8)(%rcx) 101 | 102 | /* load NT_TIB */ 103 | movq %gs:(0x30), %r10 104 | /* save current stack base */ 105 | movq 0x08(%r10), %rax 106 | mov %rax, (11*8)(%rcx) 107 | /* save current stack limit */ 108 | movq 0x10(%r10), %rax 109 | mov %rax, (12*8)(%rcx) 110 | /* save current deallocation stack */ 111 | movq 0x1478(%r10), %rax 112 | mov %rax, (13*8)(%rcx) 113 | /* save fiber local storage */ 114 | // movq 0x18(%r10), %rax 115 | // mov %rax, (14*8)(%rcx) 116 | 117 | // mov %rcx, (3*8)(%rcx) 118 | 119 | mov (0*8)(%rdx), %rbx 120 | mov (1*8)(%rdx), %rsp 121 | mov (2*8)(%rdx), %rbp 122 | mov (4*8)(%rdx), %r12 123 | mov (5*8)(%rdx), %r13 124 | mov (6*8)(%rdx), %r14 125 | mov (7*8)(%rdx), %r15 126 | mov (9*8)(%rdx), %rdi 127 | mov (10*8)(%rdx), %rsi 128 | 129 | // Restore non-volatile XMM registers: 130 | movapd (16*8)(%rdx), %xmm6 131 | movapd (18*8)(%rdx), %xmm7 132 | movapd (20*8)(%rdx), %xmm8 133 | movapd (22*8)(%rdx), %xmm9 134 | movapd (24*8)(%rdx), %xmm10 135 | movapd (26*8)(%rdx), %xmm11 136 | movapd (28*8)(%rdx), %xmm12 137 | movapd (30*8)(%rdx), %xmm13 138 | movapd (32*8)(%rdx), %xmm14 139 | movapd (34*8)(%rdx), %xmm15 140 | 141 | /* load NT_TIB */ 142 | movq %gs:(0x30), %r10 143 | /* restore fiber local storage */ 144 | // mov (14*8)(%rdx), %rax 145 | // movq %rax, 0x18(%r10) 146 | /* restore deallocation stack */ 147 | mov (13*8)(%rdx), %rax 148 | movq %rax, 0x1478(%r10) 149 | /* restore stack limit */ 150 | mov (12*8)(%rdx), %rax 151 | movq %rax, 0x10(%r10) 152 | /* restore stack base */ 153 | mov (11*8)(%rdx), %rax 154 | movq %rax, 0x8(%r10) 155 | 156 | // mov (3*8)(%rdx), %rcx 157 | " 158 | // why save the rcx and rdx in stack? this will overwrite something! 159 | // the naked function should only use the asm block, debug version breaks 160 | // since rustc 1.27.0-nightly, we have to use O2 level optimization (#6) 161 | : 162 | : //"{rcx}"(out_regs), "{rdx}"(in_regs) 163 | : "memory" 164 | : "volatile" 165 | ); 166 | } 167 | 168 | _swap_reg() 169 | } 170 | } 171 | #[cfg(nightly)] 172 | pub use self::asm_impl::*; 173 | */ 174 | 175 | // windows need to restore xmm6~xmm15, for most cases only use two xmm registers 176 | // so we use sysv64 177 | #[repr(C)] 178 | #[derive(Debug)] 179 | pub struct Registers { 180 | pub(crate) gpr: [usize; 16], 181 | } 182 | 183 | impl Registers { 184 | pub fn new() -> Registers { 185 | Registers { gpr: [0; 16] } 186 | } 187 | 188 | #[inline] 189 | pub fn prefetch(&self) { 190 | let ptr = self.gpr[1] as *const usize; 191 | unsafe { 192 | prefetch(ptr); // RSP 193 | prefetch(ptr.add(8)); // RSP + 8 194 | } 195 | } 196 | } 197 | 198 | pub fn initialize_call_frame( 199 | regs: &mut Registers, 200 | fptr: InitFn, 201 | arg: usize, 202 | arg2: *mut usize, 203 | stack: &Stack, 204 | ) { 205 | // Redefinitions from rt/arch/x86_64/regs.h 206 | const RUSTRT_RSP: usize = 1; 207 | const RUSTRT_RBP: usize = 2; 208 | const RUSTRT_R12: usize = 4; 209 | const RUSTRT_R13: usize = 5; 210 | const RUSTRT_R14: usize = 6; 211 | const RUSTRT_STACK_BASE: usize = 11; 212 | const RUSTRT_STACK_LIMIT: usize = 12; 213 | const RUSTRT_STACK_DEALLOC: usize = 13; 214 | 215 | let sp = align_down(stack.end()); 216 | 217 | // These registers are frobbed by bootstrap_green_task into the right 218 | // location so we can invoke the "real init function", `fptr`. 219 | regs.gpr[RUSTRT_R12] = arg; 220 | regs.gpr[RUSTRT_R13] = arg2 as usize; 221 | regs.gpr[RUSTRT_R14] = fptr as usize; 222 | 223 | // Last base pointer on the stack should be 0 224 | regs.gpr[RUSTRT_RBP] = 0; 225 | 226 | regs.gpr[RUSTRT_STACK_BASE] = stack.end() as usize; 227 | regs.gpr[RUSTRT_STACK_LIMIT] = stack.begin() as usize; 228 | regs.gpr[RUSTRT_STACK_DEALLOC] = 0; //mut_offset(sp, -8192) as usize; 229 | 230 | // setup the init stack 231 | // this is prepared for the swap context 232 | regs.gpr[RUSTRT_RSP] = mut_offset(sp, -2) as usize; 233 | 234 | unsafe { 235 | // leave enough space for RET 236 | *mut_offset(sp, -2) = bootstrap_green_task as usize; 237 | *mut_offset(sp, -1) = 0; 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /src/gen_impl.rs: -------------------------------------------------------------------------------- 1 | //! # generator 2 | //! 3 | //! Rust generator implementation 4 | //! 5 | 6 | use crate::detail::gen_init; 7 | use crate::reg_context::RegContext; 8 | use crate::rt::{Context, ContextStack, Error}; 9 | use crate::scope::Scope; 10 | use crate::stack::{Func, Stack, StackBox}; 11 | 12 | use std::any::Any; 13 | use std::fmt; 14 | use std::marker::PhantomData; 15 | use std::panic; 16 | use std::thread; 17 | 18 | /// The default stack size for generators, in bytes. 19 | // windows has a minimal size as 0x4a8!!!! 20 | pub const DEFAULT_STACK_SIZE: usize = 0x1000; 21 | 22 | #[inline] 23 | #[cold] 24 | fn cold() {} 25 | 26 | // #[inline] 27 | // fn likely(b: bool) -> bool { 28 | // if !b { cold() } 29 | // b 30 | // } 31 | 32 | #[inline] 33 | pub(crate) fn unlikely(b: bool) -> bool { 34 | if b { 35 | cold() 36 | } 37 | b 38 | } 39 | 40 | /// the generator obj type, the functor passed to it must be Send 41 | pub struct GeneratorObj<'a, A, T, const LOCAL: bool> { 42 | gen: StackBox>, 43 | } 44 | 45 | /// the generator type, the functor passed to it must be Send 46 | pub type Generator<'a, A, T> = GeneratorObj<'a, A, T, false>; 47 | 48 | // only when A, T and Functor are all sendable, the generator could be send 49 | unsafe impl Send for Generator<'static, A, T> {} 50 | 51 | impl<'a, A, T> Generator<'a, A, T> { 52 | /// init a heap based generator with scoped closure 53 | pub fn scoped_init(&mut self, f: F) 54 | where 55 | for<'scope> F: FnOnce(Scope<'scope, 'a, A, T>) -> T + Send + 'a, 56 | T: Send + 'a, 57 | A: Send + 'a, 58 | { 59 | self.gen.scoped_init(f); 60 | } 61 | 62 | /// init a heap based generator 63 | // it's can be used to re-init a 'done' generator before it's get dropped 64 | pub fn init_code T + Send + 'a>(&mut self, f: F) 65 | where 66 | T: Send + 'a, 67 | { 68 | self.gen.init_code(f); 69 | } 70 | } 71 | 72 | /// the local generator type, can't Send 73 | pub type LocalGenerator<'a, A, T> = GeneratorObj<'a, A, T, true>; 74 | 75 | impl<'a, A, T> LocalGenerator<'a, A, T> { 76 | /// init a heap based generator with scoped closure 77 | pub fn scoped_init(&mut self, f: F) 78 | where 79 | for<'scope> F: FnOnce(Scope<'scope, 'a, A, T>) -> T + 'a, 80 | T: 'a, 81 | A: 'a, 82 | { 83 | self.gen.scoped_init(f); 84 | } 85 | } 86 | 87 | impl<'a, A, T, const LOCAL: bool> GeneratorObj<'a, A, T, LOCAL> { 88 | /// Constructs a Generator from a raw pointer. 89 | /// 90 | /// # Safety 91 | /// 92 | /// This function is unsafe because improper use may lead to 93 | /// memory problems. For example, a double-free may occur if the 94 | /// function is called twice on the same raw pointer. 95 | #[inline] 96 | pub unsafe fn from_raw(raw: *mut usize) -> Self { 97 | GeneratorObj { 98 | gen: StackBox::from_raw(raw as *mut GeneratorImpl<'a, A, T>), 99 | } 100 | } 101 | 102 | /// Consumes the `Generator`, returning a wrapped raw pointer. 103 | #[inline] 104 | pub fn into_raw(self) -> *mut usize { 105 | let ret = self.gen.as_ptr() as *mut usize; 106 | std::mem::forget(self); 107 | ret 108 | } 109 | 110 | /// prefetch the generator into cache 111 | #[inline] 112 | pub fn prefetch(&self) { 113 | self.gen.prefetch(); 114 | } 115 | 116 | /// prepare the para that passed into generator before send 117 | #[inline] 118 | pub fn set_para(&mut self, para: A) { 119 | self.gen.set_para(para); 120 | } 121 | 122 | /// set the generator local data 123 | #[inline] 124 | pub fn set_local_data(&mut self, data: *mut u8) { 125 | self.gen.set_local_data(data); 126 | } 127 | 128 | /// get the generator local data 129 | #[inline] 130 | pub fn get_local_data(&self) -> *mut u8 { 131 | self.gen.get_local_data() 132 | } 133 | 134 | /// get the generator panic data 135 | #[inline] 136 | pub fn get_panic_data(&mut self) -> Option> { 137 | self.gen.get_panic_data() 138 | } 139 | 140 | /// resume the generator without touch the para 141 | /// you should call `set_para` before this method 142 | #[inline] 143 | pub fn resume(&mut self) -> Option { 144 | self.gen.resume() 145 | } 146 | 147 | /// `raw_send` 148 | #[inline] 149 | pub fn raw_send(&mut self, para: Option) -> Option { 150 | self.gen.raw_send(para) 151 | } 152 | 153 | /// send interface 154 | pub fn send(&mut self, para: A) -> T { 155 | self.gen.send(para) 156 | } 157 | 158 | /// cancel the generator 159 | /// this will trigger a Cancel panic to unwind the stack and finish the generator 160 | pub fn cancel(&mut self) { 161 | self.gen.cancel() 162 | } 163 | 164 | /// is finished 165 | #[inline] 166 | pub fn is_done(&self) -> bool { 167 | self.gen.is_done() 168 | } 169 | 170 | /// get stack total size and used size in word 171 | pub fn stack_usage(&self) -> (usize, usize) { 172 | self.gen.stack_usage() 173 | } 174 | } 175 | 176 | impl Iterator for GeneratorObj<'_, (), T, LOCAL> { 177 | type Item = T; 178 | fn next(&mut self) -> Option { 179 | self.resume() 180 | } 181 | } 182 | 183 | impl fmt::Debug for GeneratorObj<'_, A, T, LOCAL> { 184 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 185 | write!( 186 | f, 187 | "Generator<{}, Output={}, Local={}> {{ ... }}", 188 | std::any::type_name::(), 189 | std::any::type_name::(), 190 | LOCAL 191 | ) 192 | } 193 | } 194 | 195 | /// Generator helper 196 | pub struct Gn { 197 | dummy: PhantomData, 198 | } 199 | 200 | impl Gn { 201 | /// create a scoped generator with default stack size 202 | pub fn new_scoped<'a, T, F>(f: F) -> Generator<'a, A, T> 203 | where 204 | for<'scope> F: FnOnce(Scope<'scope, 'a, A, T>) -> T + Send + 'a, 205 | T: Send + 'a, 206 | A: Send + 'a, 207 | { 208 | Self::new_scoped_opt(DEFAULT_STACK_SIZE, f) 209 | } 210 | 211 | /// create a scoped local generator with default stack size 212 | pub fn new_scoped_local<'a, T, F>(f: F) -> LocalGenerator<'a, A, T> 213 | where 214 | F: FnOnce(Scope) -> T + 'a, 215 | T: 'a, 216 | A: 'a, 217 | { 218 | Self::new_scoped_opt_local(DEFAULT_STACK_SIZE, f) 219 | } 220 | 221 | /// create a scoped generator with specified stack size 222 | pub fn new_scoped_opt<'a, T, F>(size: usize, f: F) -> Generator<'a, A, T> 223 | where 224 | for<'scope> F: FnOnce(Scope<'scope, 'a, A, T>) -> T + Send + 'a, 225 | T: Send + 'a, 226 | A: Send + 'a, 227 | { 228 | let mut gen = GeneratorImpl::::new(Stack::new(size)); 229 | gen.scoped_init(f); 230 | Generator { gen } 231 | } 232 | 233 | /// create a scoped local generator with specified stack size 234 | pub fn new_scoped_opt_local<'a, T, F>(size: usize, f: F) -> LocalGenerator<'a, A, T> 235 | where 236 | F: FnOnce(Scope) -> T + 'a, 237 | T: 'a, 238 | A: 'a, 239 | { 240 | let mut gen = GeneratorImpl::::new(Stack::new(size)); 241 | gen.scoped_init(f); 242 | LocalGenerator { gen } 243 | } 244 | } 245 | 246 | impl Gn { 247 | /// create a new generator with default stack size 248 | #[allow(clippy::new_ret_no_self)] 249 | #[deprecated(since = "0.6.18", note = "please use `scope` version instead")] 250 | pub fn new<'a, T: Any, F>(f: F) -> Generator<'a, A, T> 251 | where 252 | F: FnOnce() -> T + Send + 'a, 253 | { 254 | Self::new_opt(DEFAULT_STACK_SIZE, f) 255 | } 256 | 257 | /// create a new generator with specified stack size 258 | // the `may` library use this API so we can't deprecated it yet. 259 | pub fn new_opt<'a, T: Any, F>(size: usize, f: F) -> Generator<'a, A, T> 260 | where 261 | F: FnOnce() -> T + Send + 'a, 262 | { 263 | let mut gen = GeneratorImpl::::new(Stack::new(size)); 264 | gen.init_context(); 265 | gen.init_code(f); 266 | Generator { gen } 267 | } 268 | } 269 | 270 | /// `GeneratorImpl` 271 | #[repr(C)] 272 | struct GeneratorImpl<'a, A, T> { 273 | // run time context 274 | context: Context, 275 | // stack 276 | stack: Stack, 277 | // save the input 278 | para: Option, 279 | // save the output 280 | ret: Option, 281 | // boxed functor 282 | f: Option, 283 | // phantom lifetime 284 | phantom: PhantomData<&'a T>, 285 | } 286 | 287 | impl GeneratorImpl<'_, A, T> { 288 | /// create a new generator with default stack size 289 | fn init_context(&mut self) { 290 | unsafe { 291 | std::ptr::write( 292 | self.context.para.as_mut_ptr(), 293 | &mut self.para as &mut dyn Any, 294 | ); 295 | std::ptr::write(self.context.ret.as_mut_ptr(), &mut self.ret as &mut dyn Any); 296 | } 297 | } 298 | } 299 | 300 | impl<'a, A, T> GeneratorImpl<'a, A, T> { 301 | /// create a new generator with specified stack size 302 | fn new(mut stack: Stack) -> StackBox { 303 | // the stack box would finally dealloc the stack! 304 | unsafe { 305 | let mut stack_box = stack.alloc_uninit_box::>(); 306 | (*stack_box.as_mut_ptr()).init(GeneratorImpl { 307 | para: None, 308 | stack, 309 | ret: None, 310 | f: None, 311 | context: Context::new(), 312 | phantom: PhantomData, 313 | }); 314 | stack_box.assume_init() 315 | } 316 | } 317 | 318 | /// prefetch the generator into cache 319 | #[inline] 320 | pub fn prefetch(&self) { 321 | self.context.regs.prefetch(); 322 | } 323 | 324 | /// init a heap based generator with scoped closure 325 | fn scoped_init(&mut self, f: F) 326 | where 327 | for<'scope> F: FnOnce(Scope<'scope, 'a, A, T>) -> T + 'a, 328 | T: 'a, 329 | A: 'a, 330 | { 331 | use std::mem::transmute; 332 | let scope: Scope = unsafe { transmute(Scope::new(&mut self.para, &mut self.ret)) }; 333 | self.init_code(move || f(scope)); 334 | } 335 | 336 | /// init a heap based generator 337 | // it's can be used to re-init a 'done' generator before it's get dropped 338 | fn init_code T + 'a>(&mut self, f: F) 339 | where 340 | T: 'a, 341 | { 342 | // make sure the last one is finished 343 | if self.f.is_none() && self.context._ref == 0 { 344 | self.cancel(); 345 | } else { 346 | let _ = self.f.take(); 347 | } 348 | 349 | // init ctx parent to itself, this would be the new top 350 | self.context.parent = &mut self.context; 351 | 352 | // init the ref to 0 means that it's ready to start 353 | self.context._ref = 0; 354 | let ret = &mut self.ret as *mut _; 355 | // alloc the function on stack 356 | let func = StackBox::new_fn_once(&mut self.stack, move || { 357 | let r = f(); 358 | unsafe { *ret = Some(r) }; 359 | }); 360 | 361 | self.f = Some(func); 362 | 363 | let guard = (self.stack.begin() as usize, self.stack.end() as usize); 364 | self.context.stack_guard = guard; 365 | self.context.regs.init_with( 366 | gen_init, 367 | 0, 368 | &mut self.f as *mut _ as *mut usize, 369 | &self.stack, 370 | ); 371 | } 372 | 373 | /// resume the generator 374 | #[inline] 375 | fn resume_gen(&mut self) { 376 | let env = ContextStack::current(); 377 | // get the current regs 378 | let cur = &mut env.top().regs; 379 | 380 | // switch to new context, always use the top context's reg 381 | // for normal generator self.context.parent == self.context 382 | // for coroutine self.context.parent == top generator context 383 | debug_assert!(!self.context.parent.is_null()); 384 | let top = unsafe { &mut *self.context.parent }; 385 | 386 | // save current generator context on stack 387 | env.push_context(&mut self.context); 388 | 389 | // swap to the generator 390 | RegContext::swap(cur, &top.regs); 391 | 392 | // comes back, check the panic status 393 | // this would propagate the panic until root context 394 | // if it's a coroutine just stop propagate 395 | if !self.context.local_data.is_null() { 396 | return; 397 | } 398 | 399 | if let Some(err) = self.context.err.take() { 400 | // pass the error to the parent until root 401 | panic::resume_unwind(err); 402 | } 403 | } 404 | 405 | #[inline] 406 | fn is_started(&self) -> bool { 407 | // when the f is consumed we think it's running 408 | self.f.is_none() 409 | } 410 | 411 | /// prepare the para that passed into generator before send 412 | #[inline] 413 | fn set_para(&mut self, para: A) { 414 | self.para = Some(para); 415 | } 416 | 417 | /// set the generator local data 418 | #[inline] 419 | fn set_local_data(&mut self, data: *mut u8) { 420 | self.context.local_data = data; 421 | } 422 | 423 | /// get the generator local data 424 | #[inline] 425 | fn get_local_data(&self) -> *mut u8 { 426 | self.context.local_data 427 | } 428 | 429 | /// get the generator panic data 430 | #[inline] 431 | fn get_panic_data(&mut self) -> Option> { 432 | self.context.err.take() 433 | } 434 | 435 | /// resume the generator without touch the para 436 | /// you should call `set_para` before this method 437 | #[inline] 438 | fn resume(&mut self) -> Option { 439 | if unlikely(self.is_done()) { 440 | return None; 441 | } 442 | 443 | // every time we call the function, increase the ref count 444 | // yield will decrease it and return will not 445 | self.context._ref += 1; 446 | self.resume_gen(); 447 | 448 | self.ret.take() 449 | } 450 | 451 | /// `raw_send` 452 | #[inline] 453 | fn raw_send(&mut self, para: Option) -> Option { 454 | if unlikely(self.is_done()) { 455 | return None; 456 | } 457 | 458 | // this is the passed in value of the send primitive 459 | // the yield part would read out this value in the next round 460 | self.para = para; 461 | 462 | // every time we call the function, increase the ref count 463 | // yield will decrease it and return will not 464 | self.context._ref += 1; 465 | self.resume_gen(); 466 | 467 | self.ret.take() 468 | } 469 | 470 | /// send interface 471 | fn send(&mut self, para: A) -> T { 472 | let ret = self.raw_send(Some(para)); 473 | ret.expect("send got None return") 474 | } 475 | 476 | /// cancel the generator without any check 477 | #[inline] 478 | fn raw_cancel(&mut self) { 479 | // tell the func to panic 480 | // so that we can stop the inner func 481 | self.context._ref = 2; 482 | // save the old panic hook, we don't want to print anything for the Cancel 483 | let old = panic::take_hook(); 484 | panic::set_hook(Box::new(|_| {})); 485 | self.resume_gen(); 486 | panic::set_hook(old); 487 | } 488 | 489 | /// cancel the generator 490 | /// this will trigger a Cancel panic to unwind the stack 491 | fn cancel(&mut self) { 492 | if self.is_done() { 493 | return; 494 | } 495 | 496 | // consume the fun if it's not started 497 | if !self.is_started() { 498 | self.f.take(); 499 | self.context._ref = 1; 500 | } else { 501 | self.raw_cancel(); 502 | } 503 | } 504 | 505 | /// is finished 506 | #[inline] 507 | fn is_done(&self) -> bool { 508 | self.is_started() && (self.context._ref & 0x3) != 0 509 | } 510 | 511 | /// get stack total size and used size in word 512 | fn stack_usage(&self) -> (usize, usize) { 513 | (self.stack.size(), self.stack.get_used_size()) 514 | } 515 | } 516 | 517 | impl Drop for GeneratorImpl<'_, A, T> { 518 | fn drop(&mut self) { 519 | // when the thread is already panic, do nothing 520 | if thread::panicking() { 521 | return; 522 | } 523 | 524 | if !self.is_started() { 525 | // not started yet, just drop the gen 526 | return; 527 | } 528 | 529 | if !self.is_done() { 530 | trace!("generator is not done while drop"); 531 | self.raw_cancel() 532 | } 533 | 534 | assert!(self.is_done()); 535 | 536 | let (total_stack, used_stack) = self.stack_usage(); 537 | if used_stack < total_stack { 538 | // here we should record the stack in the class 539 | // next time will just use 540 | // set_stack_size::(used_stack); 541 | } else { 542 | error!("stack overflow detected!"); 543 | panic::panic_any(Error::StackErr); 544 | } 545 | } 546 | } 547 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # generator 2 | //! 3 | //! Rust generator library 4 | //! 5 | 6 | #![deny(warnings, missing_docs)] 7 | #![allow(deprecated)] 8 | 9 | #[macro_use] 10 | extern crate log; 11 | 12 | mod detail; 13 | mod gen_impl; 14 | mod reg_context; 15 | mod rt; 16 | mod scope; 17 | mod stack; 18 | mod yield_; 19 | 20 | pub use crate::gen_impl::{Generator, Gn, LocalGenerator, DEFAULT_STACK_SIZE}; 21 | pub use crate::rt::{get_local_data, is_generator, Error}; 22 | pub use crate::scope::Scope; 23 | pub use crate::yield_::{ 24 | co_get_yield, co_set_para, co_yield_with, done, get_yield, yield_, yield_from, yield_with, 25 | }; 26 | -------------------------------------------------------------------------------- /src/reg_context.rs: -------------------------------------------------------------------------------- 1 | use crate::detail::{initialize_call_frame, swap_registers, InitFn, Registers}; 2 | use crate::stack::Stack; 3 | 4 | #[derive(Debug)] 5 | pub struct RegContext { 6 | /// Hold the registers while the task or scheduler is suspended 7 | pub(crate) regs: Registers, 8 | } 9 | 10 | impl RegContext { 11 | pub fn empty() -> RegContext { 12 | RegContext { 13 | regs: Registers::new(), 14 | } 15 | } 16 | 17 | #[inline] 18 | pub fn prefetch(&self) { 19 | self.regs.prefetch(); 20 | } 21 | 22 | /// Create a new context, only used in tests 23 | #[cfg(test)] 24 | fn new(init: InitFn, arg: usize, start: *mut usize, stack: &Stack) -> RegContext { 25 | let mut ctx = RegContext::empty(); 26 | ctx.init_with(init, arg, start, stack); 27 | ctx 28 | } 29 | 30 | /// init the generator register 31 | #[inline] 32 | pub fn init_with(&mut self, init: InitFn, arg: usize, start: *mut usize, stack: &Stack) { 33 | // Save and then immediately load the current context, 34 | // we will modify it to call the given function when restored back 35 | initialize_call_frame(&mut self.regs, init, arg, start, stack); 36 | } 37 | 38 | /// Switch contexts 39 | /// 40 | /// Suspend the current execution context and resume another by 41 | /// saving the registers values of the executing thread to a Context 42 | /// then loading the registers from a previously saved Context. 43 | #[inline] 44 | pub fn swap(out_context: &mut RegContext, in_context: &RegContext) { 45 | // debug!("register raw swap"); 46 | unsafe { swap_registers(&mut out_context.regs, &in_context.regs) } 47 | } 48 | 49 | /// Load the context and switch. This function will never return. 50 | #[inline] 51 | #[cfg(test)] 52 | pub fn load(to_context: &RegContext) { 53 | let mut cur = Registers::new(); 54 | let regs: &Registers = &to_context.regs; 55 | 56 | unsafe { swap_registers(&mut cur, regs) } 57 | } 58 | } 59 | 60 | #[cfg(test)] 61 | mod test { 62 | use std::mem::transmute; 63 | 64 | use crate::reg_context::RegContext; 65 | use crate::stack::Stack; 66 | 67 | const MIN_STACK: usize = 1024; 68 | 69 | fn init_fn_impl(arg: usize, f: *mut usize) -> ! { 70 | let func: fn() = unsafe { transmute(f) }; 71 | func(); 72 | 73 | let ctx: &RegContext = unsafe { transmute(arg) }; 74 | RegContext::load(ctx); 75 | 76 | unreachable!("Should never comeback"); 77 | } 78 | 79 | #[cfg(target_arch = "x86_64")] 80 | extern "sysv64" fn init_fn(arg: usize, f: *mut usize) -> ! { 81 | init_fn_impl(arg, f) 82 | } 83 | 84 | #[cfg(target_arch = "aarch64")] 85 | extern "C" fn init_fn(arg: usize, f: *mut usize) -> ! { 86 | init_fn_impl(arg, f) 87 | } 88 | 89 | #[cfg(target_arch = "loongarch64")] 90 | extern "C" fn init_fn(arg: usize, f: *mut usize) -> ! { 91 | init_fn_impl(arg, f) 92 | } 93 | 94 | #[cfg(target_arch = "riscv64")] 95 | extern "C" fn init_fn(arg: usize, f: *mut usize) -> ! { 96 | init_fn_impl(arg, f) 97 | } 98 | 99 | #[cfg(target_arch = "powerpc64")] 100 | extern "C" fn init_fn(arg: usize, f: *mut usize) -> ! { 101 | init_fn_impl(arg, f) 102 | } 103 | 104 | #[cfg(target_arch = "arm")] 105 | extern "aapcs" fn init_fn(arg: usize, f: *mut usize) -> ! { 106 | init_fn_impl(arg, f) 107 | } 108 | 109 | #[test] 110 | fn test_swap_context() { 111 | static mut VAL: bool = false; 112 | let mut cur = RegContext::empty(); 113 | 114 | fn callback() { 115 | unsafe { VAL = true }; 116 | } 117 | 118 | let stk = Stack::new(MIN_STACK); 119 | let ctx = RegContext::new( 120 | init_fn, 121 | &cur as *const _ as usize, 122 | callback as *mut usize, 123 | &stk, 124 | ); 125 | 126 | RegContext::swap(&mut cur, &ctx); 127 | unsafe { 128 | assert!(VAL); 129 | } 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/rt.rs: -------------------------------------------------------------------------------- 1 | //! # generator run time support 2 | //! 3 | //! generator run time context management 4 | //! 5 | use std::any::Any; 6 | use std::cell::Cell; 7 | use std::mem::MaybeUninit; 8 | use std::ptr; 9 | 10 | use crate::reg_context::RegContext; 11 | 12 | thread_local! { 13 | // each thread has it's own generator context stack 14 | static ROOT_CONTEXT_P: Cell<*mut Context> = const { Cell::new(ptr::null_mut()) }; 15 | } 16 | 17 | /// yield panic error types 18 | #[derive(Debug, Copy, Clone, Eq, PartialEq)] 19 | pub enum Error { 20 | /// Done panic 21 | Done, 22 | /// Cancel panic 23 | Cancel, 24 | /// Type mismatch panic 25 | TypeErr, 26 | /// Stack overflow panic 27 | StackErr, 28 | /// Wrong Context panic 29 | ContextErr, 30 | } 31 | 32 | /// generator context 33 | #[repr(C)] 34 | #[repr(align(128))] 35 | pub struct Context { 36 | /// generator regs context 37 | pub regs: RegContext, 38 | /// child context 39 | child: *mut Context, 40 | /// parent context 41 | pub parent: *mut Context, 42 | /// passed in para for send 43 | pub para: MaybeUninit<*mut dyn Any>, 44 | /// this is just a buffer for the return value 45 | pub ret: MaybeUninit<*mut dyn Any>, 46 | /// track generator ref, yield will -1, send will +1 47 | pub _ref: usize, 48 | /// context local storage 49 | pub local_data: *mut u8, 50 | /// propagate panic 51 | pub err: Option>, 52 | /// cached stack guard for fast path 53 | pub stack_guard: (usize, usize), 54 | } 55 | 56 | impl Context { 57 | /// return a default generator context 58 | pub fn new() -> Context { 59 | Context { 60 | regs: RegContext::empty(), 61 | para: MaybeUninit::zeroed(), 62 | ret: MaybeUninit::zeroed(), 63 | _ref: 1, // none zero means it's not running 64 | err: None, 65 | child: ptr::null_mut(), 66 | parent: ptr::null_mut(), 67 | local_data: ptr::null_mut(), 68 | stack_guard: (0, 0), 69 | } 70 | } 71 | 72 | /// judge it's generator context 73 | #[inline] 74 | pub fn is_generator(&self) -> bool { 75 | !std::ptr::eq(self.parent, self) 76 | } 77 | 78 | /// get current generator send para 79 | #[inline] 80 | pub fn get_para(&mut self) -> Option 81 | where 82 | A: Any, 83 | { 84 | let para = unsafe { 85 | let para_ptr = *self.para.as_mut_ptr(); 86 | assert!(!para_ptr.is_null()); 87 | &mut *para_ptr 88 | }; 89 | match para.downcast_mut::>() { 90 | Some(v) => v.take(), 91 | None => type_error::("get yield type mismatch error detected"), 92 | } 93 | } 94 | 95 | /// get coroutine send para 96 | #[inline] 97 | pub fn co_get_para(&mut self) -> Option { 98 | let para = unsafe { 99 | let para_ptr = *self.para.as_mut_ptr(); 100 | debug_assert!(!para_ptr.is_null()); 101 | &mut *(para_ptr as *mut Option) 102 | }; 103 | para.take() 104 | } 105 | 106 | // /// set current generator send para 107 | // #[inline] 108 | // pub fn set_para(&self, data: A) 109 | // where 110 | // A: Any, 111 | // { 112 | // let para = unsafe { &mut *self.para }; 113 | // match para.downcast_mut::>() { 114 | // Some(v) => *v = Some(data), 115 | // None => type_error::("set yield type mismatch error detected"), 116 | // } 117 | // } 118 | 119 | /// set coroutine send para 120 | /// without check the data type for coroutine performance reason 121 | #[inline] 122 | pub fn co_set_para(&mut self, data: A) { 123 | let para = unsafe { 124 | let para_ptr = *self.para.as_mut_ptr(); 125 | debug_assert!(!para_ptr.is_null()); 126 | &mut *(para_ptr as *mut Option) 127 | }; 128 | *para = Some(data); 129 | } 130 | 131 | /// set current generator return value 132 | #[inline] 133 | pub fn set_ret(&mut self, v: T) 134 | where 135 | T: Any, 136 | { 137 | let ret = unsafe { 138 | let ret_ptr = *self.ret.as_mut_ptr(); 139 | assert!(!ret_ptr.is_null()); 140 | &mut *ret_ptr 141 | }; 142 | match ret.downcast_mut::>() { 143 | Some(r) => *r = Some(v), 144 | None => type_error::("yield type mismatch error detected"), 145 | } 146 | } 147 | 148 | /// set coroutine return value 149 | /// without check the data type for coroutine performance reason 150 | #[inline] 151 | pub fn co_set_ret(&mut self, v: T) { 152 | let ret = unsafe { 153 | let ret_ptr = *self.ret.as_mut_ptr(); 154 | debug_assert!(!ret_ptr.is_null()); 155 | &mut *(ret_ptr as *mut Option) 156 | }; 157 | *ret = Some(v); 158 | } 159 | } 160 | 161 | /// Coroutine managing environment 162 | pub struct ContextStack { 163 | pub(crate) root: *mut Context, 164 | } 165 | 166 | impl ContextStack { 167 | #[cold] 168 | fn init_root() -> *mut Context { 169 | let root = { 170 | let mut root = Box::new(Context::new()); 171 | let p = &mut *root as *mut _; 172 | root.parent = p; // init top to current 173 | Box::leak(root) 174 | }; 175 | ROOT_CONTEXT_P.set(root); 176 | root 177 | } 178 | 179 | /// get the current context stack 180 | pub fn current() -> ContextStack { 181 | let mut root = ROOT_CONTEXT_P.get(); 182 | 183 | if root.is_null() { 184 | root = Self::init_root(); 185 | } 186 | ContextStack { root } 187 | } 188 | 189 | /// get the top context 190 | #[inline] 191 | pub fn top(&self) -> &'static mut Context { 192 | let root = unsafe { &mut *self.root }; 193 | unsafe { &mut *root.parent } 194 | } 195 | 196 | /// get the coroutine context 197 | #[inline] 198 | pub fn co_ctx(&self) -> Option<&'static mut Context> { 199 | let root = unsafe { &mut *self.root }; 200 | 201 | // search from top 202 | let mut ctx = unsafe { &mut *root.parent }; 203 | while !std::ptr::eq(ctx, root) { 204 | if !ctx.local_data.is_null() { 205 | return Some(ctx); 206 | } 207 | ctx = unsafe { &mut *ctx.parent }; 208 | } 209 | // not find any coroutine 210 | None 211 | } 212 | 213 | /// push the context to the thread context list 214 | #[inline] 215 | pub fn push_context(&self, ctx: *mut Context) { 216 | let root = unsafe { &mut *self.root }; 217 | let ctx = unsafe { &mut *ctx }; 218 | let top = unsafe { &mut *root.parent }; 219 | let new_top = ctx.parent; 220 | 221 | // link top and new ctx 222 | top.child = ctx; 223 | ctx.parent = top; 224 | 225 | // save the new top 226 | root.parent = new_top; 227 | } 228 | 229 | /// pop the context from the thread context list and return it's parent context 230 | #[inline] 231 | pub fn pop_context(&self, ctx: *mut Context) -> &'static mut Context { 232 | let root = unsafe { &mut *self.root }; 233 | let ctx = unsafe { &mut *ctx }; 234 | let parent = unsafe { &mut *ctx.parent }; 235 | 236 | // save the old top in ctx's parent 237 | ctx.parent = root.parent; 238 | // unlink ctx and it's parent 239 | parent.child = ptr::null_mut(); 240 | 241 | // save the new top 242 | root.parent = parent; 243 | 244 | parent 245 | } 246 | } 247 | 248 | #[inline] 249 | #[cold] 250 | fn type_error(msg: &str) -> ! { 251 | error!("{msg}, expected type: {}", std::any::type_name::()); 252 | std::panic::panic_any(Error::TypeErr) 253 | } 254 | 255 | /// check the current context if it's generator 256 | #[inline] 257 | pub fn is_generator() -> bool { 258 | let env = ContextStack::current(); 259 | let root = unsafe { &mut *env.root }; 260 | !root.child.is_null() 261 | } 262 | 263 | /// get the current context local data 264 | /// only coroutine support local data 265 | #[inline] 266 | pub fn get_local_data() -> *mut u8 { 267 | let env = ContextStack::current(); 268 | let root = unsafe { &mut *env.root }; 269 | 270 | // search from top 271 | let mut ctx = unsafe { &mut *root.parent }; 272 | while !std::ptr::eq(ctx, root) { 273 | if !ctx.local_data.is_null() { 274 | return ctx.local_data; 275 | } 276 | ctx = unsafe { &mut *ctx.parent }; 277 | } 278 | 279 | ptr::null_mut() 280 | } 281 | 282 | pub mod guard { 283 | use crate::is_generator; 284 | use crate::rt::ContextStack; 285 | use crate::stack::sys::page_size; 286 | use std::ops::Range; 287 | 288 | pub type Guard = Range; 289 | 290 | pub fn current() -> Guard { 291 | assert!(is_generator()); 292 | let guard = unsafe { (*(*ContextStack::current().root).child).stack_guard }; 293 | 294 | guard.0 - page_size()..guard.1 295 | } 296 | } 297 | 298 | #[cfg(test)] 299 | mod test { 300 | use super::is_generator; 301 | 302 | #[test] 303 | fn test_is_context() { 304 | // this is the root context 305 | assert!(!is_generator()); 306 | } 307 | 308 | #[test] 309 | fn test_overflow() { 310 | use crate::*; 311 | use std::panic::catch_unwind; 312 | 313 | // test signal mask 314 | for _ in 0..2 { 315 | let result = catch_unwind(|| { 316 | let mut g = Gn::new_scoped(move |_s: Scope<(), ()>| { 317 | let guard = super::guard::current(); 318 | 319 | // make sure the compiler does not apply any optimization on it 320 | std::hint::black_box(unsafe { *(guard.start as *const usize) }); 321 | 322 | eprintln!("entered unreachable code"); 323 | std::process::abort(); 324 | }); 325 | 326 | g.next(); 327 | }); 328 | 329 | assert!(matches!( 330 | result.map_err(|err| *err.downcast::().unwrap()), 331 | Err(Error::StackErr) 332 | )); 333 | } 334 | } 335 | } 336 | -------------------------------------------------------------------------------- /src/scope.rs: -------------------------------------------------------------------------------- 1 | //! # yield 2 | //! 3 | //! generator yield implementation 4 | //! 5 | 6 | use std::marker::PhantomData; 7 | use std::sync::atomic; 8 | 9 | use crate::gen_impl::Generator; 10 | use crate::rt::{Context, ContextStack, Error}; 11 | use crate::yield_::raw_yield_now; 12 | 13 | /// passed in scope type 14 | /// it not use the context to pass data, but keep it's own data ref 15 | /// this struct provide both compile type info and runtime data 16 | pub struct Scope<'scope, 'a, A, T> { 17 | para: &'a mut Option, 18 | ret: &'a mut Option, 19 | scope: PhantomData<&'scope mut &'scope ()>, 20 | } 21 | 22 | impl<'a, A, T> Scope<'_, 'a, A, T> { 23 | /// create a new scope object 24 | pub(crate) fn new(para: &'a mut Option, ret: &'a mut Option) -> Self { 25 | Scope { 26 | para, 27 | ret, 28 | scope: PhantomData, 29 | } 30 | } 31 | 32 | /// set current generator return value 33 | #[inline] 34 | fn set_ret(&mut self, v: T) { 35 | *self.ret = Some(v); 36 | } 37 | 38 | /// raw yield without catch passed in para 39 | #[inline] 40 | fn raw_yield(&mut self, env: &ContextStack, context: &mut Context, v: T) { 41 | // check the context 42 | if !context.is_generator() { 43 | panic!("yield from none generator context"); 44 | } 45 | 46 | self.set_ret(v); 47 | context._ref -= 1; 48 | raw_yield_now(env, context); 49 | 50 | // here we just panic to exit the func 51 | if context._ref != 1 { 52 | std::panic::panic_any(Error::Cancel); 53 | } 54 | } 55 | 56 | /// yield something without catch passed in para 57 | #[inline] 58 | pub fn yield_with(&mut self, v: T) { 59 | let env = ContextStack::current(); 60 | let context = env.top(); 61 | self.raw_yield(&env, context, v); 62 | } 63 | 64 | /// get current generator send para 65 | #[inline] 66 | pub fn get_yield(&mut self) -> Option { 67 | self.para.take() 68 | } 69 | 70 | /// yield and get the send para 71 | /// # Safety 72 | /// When yield out, the reference of the captured data must be still valid 73 | /// normally, you should always call the `drop` of the generator 74 | #[inline] 75 | pub unsafe fn yield_unsafe(&mut self, v: T) -> Option { 76 | self.yield_with(v); 77 | atomic::compiler_fence(atomic::Ordering::Acquire); 78 | self.get_yield() 79 | } 80 | 81 | /// `yield_from_unsafe` 82 | /// the from generator must has the same type as itself 83 | /// # Safety 84 | /// When yield out, the reference of the captured data must be still valid 85 | /// normally, you should always call the `drop` of the generator 86 | pub unsafe fn yield_from_unsafe(&mut self, mut g: Generator) -> Option { 87 | let env = ContextStack::current(); 88 | let context = env.top(); 89 | let mut p = self.get_yield(); 90 | while !g.is_done() { 91 | match g.raw_send(p) { 92 | None => return None, 93 | Some(r) => self.raw_yield(&env, context, r), 94 | } 95 | p = self.get_yield(); 96 | } 97 | drop(g); // explicitly consume g 98 | p 99 | } 100 | } 101 | 102 | impl Scope<'_, 'static, A, T> { 103 | /// yield and get the send para 104 | // it's totally safe that we can refer to the function block 105 | // since we will come back later 106 | #[inline] 107 | pub fn yield_(&mut self, v: T) -> Option { 108 | unsafe { self.yield_unsafe(v) } 109 | } 110 | 111 | /// `yield_from` 112 | /// the from generator must has the same type as itself 113 | pub fn yield_from(&mut self, g: Generator) -> Option { 114 | unsafe { self.yield_from_unsafe(g) } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/stack/mod.rs: -------------------------------------------------------------------------------- 1 | //! # generator stack 2 | //! 3 | //! 4 | 5 | use std::error::Error; 6 | use std::fmt::{self, Display}; 7 | use std::io; 8 | use std::mem::MaybeUninit; 9 | use std::os::raw::c_void; 10 | use std::ptr; 11 | 12 | #[cfg_attr(unix, path = "unix.rs")] 13 | #[cfg_attr(windows, path = "windows.rs")] 14 | pub mod sys; 15 | 16 | pub use sys::overflow; 17 | 18 | // must align with StackBoxHeader 19 | const ALIGN: usize = std::mem::size_of::(); 20 | const HEADER_SIZE: usize = std::mem::size_of::() / std::mem::size_of::(); 21 | 22 | struct StackBoxHeader { 23 | // track the stack 24 | stack: Stack, 25 | // track how big the data is (in usize) 26 | data_size: usize, 27 | // non zero dealloc the stack 28 | need_drop: usize, 29 | } 30 | 31 | /// A pointer type for stack allocation. 32 | pub struct StackBox { 33 | // the stack memory 34 | ptr: ptr::NonNull, 35 | } 36 | 37 | impl StackBox { 38 | /// create uninit stack box 39 | fn new_uninit(stack: &mut Stack, need_drop: usize) -> MaybeUninit { 40 | // cheat #[warn(clippy::needless_pass_by_ref_mut)] 41 | // we need mutable ref for ownership 42 | let _ = stack as *mut Stack; 43 | 44 | let offset = unsafe { &mut *stack.get_offset() }; 45 | // alloc the data 46 | let layout = std::alloc::Layout::new::(); 47 | let align = std::cmp::max(layout.align(), ALIGN); 48 | let size = ((layout.size() + align - 1) & !(align - 1)) / std::mem::size_of::(); 49 | let u_align = align / std::mem::size_of::(); 50 | let pad_size = u_align - (*offset + size) % u_align; 51 | let data_size = size + pad_size; 52 | *offset += data_size; 53 | let ptr = unsafe { ptr::NonNull::new_unchecked(stack.end() as *mut T) }; 54 | 55 | // init the header 56 | *offset += HEADER_SIZE; 57 | unsafe { 58 | let mut header = ptr::NonNull::new_unchecked(stack.end() as *mut StackBoxHeader); 59 | let header = header.as_mut(); 60 | header.data_size = data_size; 61 | header.need_drop = need_drop; 62 | header.stack = stack.shadow_clone(); 63 | MaybeUninit::new(StackBox { ptr }) 64 | } 65 | } 66 | 67 | fn get_header(&self) -> &StackBoxHeader { 68 | unsafe { 69 | let header = (self.ptr.as_ptr() as *mut usize).offset(0 - HEADER_SIZE as isize); 70 | &*(header as *const StackBoxHeader) 71 | } 72 | } 73 | 74 | /// move data into the box 75 | pub(crate) unsafe fn init(&mut self, data: T) { 76 | ptr::write(self.ptr.as_ptr(), data); 77 | } 78 | 79 | // get the stack ptr 80 | pub(crate) fn as_ptr(&self) -> *mut T { 81 | self.ptr.as_ptr() 82 | } 83 | 84 | /// Constructs a StackBox from a raw pointer. 85 | /// 86 | /// # Safety 87 | /// 88 | /// This function is unsafe because improper use may lead to 89 | /// memory problems. For example, a double-free may occur if the 90 | /// function is called twice on the same raw pointer. 91 | #[inline] 92 | pub(crate) unsafe fn from_raw(raw: *mut T) -> Self { 93 | StackBox { 94 | ptr: ptr::NonNull::new_unchecked(raw), 95 | } 96 | } 97 | 98 | // Consumes the `StackBox`, returning a wrapped raw pointer. 99 | // #[inline] 100 | // pub(crate) fn into_raw(b: StackBox) -> *mut T { 101 | // let ret = b.ptr.as_ptr(); 102 | // std::mem::forget(b); 103 | // ret 104 | // } 105 | } 106 | 107 | pub struct Func { 108 | data: *mut (), 109 | size: usize, 110 | offset: *mut usize, 111 | func: fn(*mut ()), 112 | drop: fn(*mut ()), 113 | } 114 | 115 | impl Func { 116 | pub fn call_once(mut self) { 117 | let data = self.data; 118 | self.data = ptr::null_mut(); 119 | (self.func)(data); 120 | } 121 | } 122 | 123 | impl Drop for Func { 124 | fn drop(&mut self) { 125 | if !self.data.is_null() { 126 | (self.drop)(self.data); 127 | } 128 | unsafe { *self.offset -= self.size }; 129 | } 130 | } 131 | 132 | impl StackBox { 133 | fn call_once(data: *mut ()) { 134 | unsafe { 135 | let data = data as *mut F; 136 | let f = data.read(); 137 | f(); 138 | } 139 | } 140 | 141 | fn drop_inner(data: *mut ()) { 142 | unsafe { 143 | let data = data as *mut F; 144 | ptr::drop_in_place(data); 145 | } 146 | } 147 | 148 | /// create a functor on the stack 149 | pub(crate) fn new_fn_once(stack: &mut Stack, data: F) -> Func { 150 | unsafe { 151 | let mut d = Self::new_uninit(stack, 0); 152 | (*d.as_mut_ptr()).init(data); 153 | let d = d.assume_init(); 154 | let header = d.get_header(); 155 | let f = Func { 156 | data: d.ptr.as_ptr() as *mut (), 157 | size: header.data_size + HEADER_SIZE, 158 | offset: stack.get_offset(), 159 | func: Self::call_once, 160 | drop: Self::drop_inner, 161 | }; 162 | std::mem::forget(d); 163 | f 164 | } 165 | } 166 | } 167 | 168 | impl std::ops::Deref for StackBox { 169 | type Target = T; 170 | 171 | fn deref(&self) -> &T { 172 | unsafe { self.ptr.as_ref() } 173 | } 174 | } 175 | 176 | impl std::ops::DerefMut for StackBox { 177 | fn deref_mut(&mut self) -> &mut T { 178 | unsafe { &mut *self.ptr.as_mut() } 179 | } 180 | } 181 | 182 | impl Drop for StackBox { 183 | fn drop(&mut self) { 184 | let header = self.get_header(); 185 | unsafe { 186 | *header.stack.get_offset() -= header.data_size + HEADER_SIZE; 187 | ptr::drop_in_place(self.ptr.as_ptr()); 188 | if header.need_drop != 0 { 189 | header.stack.drop_stack(); 190 | } 191 | } 192 | } 193 | } 194 | 195 | /// Error type returned by stack allocation methods. 196 | #[derive(Debug)] 197 | pub enum StackError { 198 | /// Contains the maximum amount of memory allowed to be allocated as stack space. 199 | ExceedsMaximumSize(usize), 200 | 201 | /// Returned if some kind of I/O error happens during allocation. 202 | IoError(io::Error), 203 | } 204 | 205 | impl Display for StackError { 206 | fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { 207 | match *self { 208 | StackError::ExceedsMaximumSize(size) => write!( 209 | fmt, 210 | "Requested more than max size of {size} bytes for a stack" 211 | ), 212 | StackError::IoError(ref e) => e.fmt(fmt), 213 | } 214 | } 215 | } 216 | 217 | impl Error for StackError { 218 | fn source(&self) -> Option<&(dyn Error + 'static)> { 219 | match *self { 220 | StackError::ExceedsMaximumSize(_) => None, 221 | StackError::IoError(ref e) => Some(e), 222 | } 223 | } 224 | } 225 | 226 | /// Represents any kind of stack memory. 227 | /// 228 | /// `FixedSizeStack` as well as `ProtectedFixedSizeStack` 229 | /// can be used to allocate actual stack space. 230 | #[derive(Debug)] 231 | pub struct SysStack { 232 | top: *mut c_void, 233 | bottom: *mut c_void, 234 | } 235 | 236 | impl SysStack { 237 | /// Creates a (non-owning) representation of some stack memory. 238 | /// 239 | /// It is unsafe because it is your responsibility to make sure that `top` and `bottom` are valid 240 | /// addresses. 241 | #[inline] 242 | pub unsafe fn new(top: *mut c_void, bottom: *mut c_void) -> SysStack { 243 | debug_assert!(top >= bottom); 244 | 245 | SysStack { top, bottom } 246 | } 247 | 248 | /// Returns the top of the stack from which on it grows downwards towards bottom(). 249 | #[inline] 250 | pub fn top(&self) -> *mut c_void { 251 | self.top 252 | } 253 | 254 | /// Returns the bottom of the stack and thus it's end. 255 | #[inline] 256 | pub fn bottom(&self) -> *mut c_void { 257 | self.bottom 258 | } 259 | 260 | /// Returns the size of the stack between top() and bottom(). 261 | #[inline] 262 | pub fn len(&self) -> usize { 263 | self.top as usize - self.bottom as usize 264 | } 265 | 266 | /// Returns the minimal stack size allowed by the current platform. 267 | #[inline] 268 | pub fn min_size() -> usize { 269 | sys::min_stack_size() 270 | } 271 | 272 | /// Allocates a new stack of `size`. 273 | fn allocate(mut size: usize, protected: bool) -> Result { 274 | let page_size = sys::page_size(); 275 | let min_stack_size = sys::min_stack_size(); 276 | let max_stack_size = sys::max_stack_size(); 277 | let add_shift = i32::from(protected); 278 | let add = page_size << add_shift; 279 | 280 | if size < min_stack_size { 281 | size = min_stack_size; 282 | } 283 | 284 | size = (size - 1) & !(page_size.overflowing_sub(1).0); 285 | 286 | if let Some(size) = size.checked_add(add) { 287 | if size <= max_stack_size { 288 | let mut ret = unsafe { sys::allocate_stack(size) }; 289 | 290 | if protected { 291 | if let Ok(stack) = ret { 292 | ret = unsafe { sys::protect_stack(&stack) }; 293 | } 294 | } 295 | 296 | return ret.map_err(StackError::IoError); 297 | } 298 | } 299 | 300 | Err(StackError::ExceedsMaximumSize(max_stack_size - add)) 301 | } 302 | } 303 | 304 | unsafe impl Send for SysStack {} 305 | 306 | /// generator stack 307 | /// this struct will not dealloc the memory 308 | /// instead StackBox<> would track it's usage and dealloc it 309 | pub struct Stack { 310 | buf: SysStack, 311 | } 312 | 313 | impl Stack { 314 | /// Allocate a new stack of `size`. If size = 0, this is a `dummy_stack` 315 | pub fn new(size: usize) -> Stack { 316 | let track = (size & 1) != 0; 317 | 318 | let bytes = usize::max(size * std::mem::size_of::(), SysStack::min_size()); 319 | 320 | let buf = SysStack::allocate(bytes, true).expect("failed to alloc sys stack"); 321 | 322 | let stk = Stack { buf }; 323 | 324 | // if size is not even we do the full foot print test 325 | let count = if track { 326 | stk.size() 327 | } else { 328 | // we only check the last few words 329 | 8 330 | }; 331 | 332 | unsafe { 333 | let buf = stk.buf.bottom as *mut usize; 334 | ptr::write_bytes(buf, 0xEE, count); 335 | } 336 | 337 | // init the stack box usage 338 | let offset = stk.get_offset(); 339 | unsafe { *offset = 1 }; 340 | 341 | stk 342 | } 343 | 344 | /// get used stack size 345 | pub fn get_used_size(&self) -> usize { 346 | let mut offset: usize = 0; 347 | unsafe { 348 | let mut magic: usize = 0xEE; 349 | ptr::write_bytes(&mut magic, 0xEE, 1); 350 | let mut ptr = self.buf.bottom as *mut usize; 351 | while *ptr == magic { 352 | offset += 1; 353 | ptr = ptr.offset(1); 354 | } 355 | } 356 | let cap = self.size(); 357 | cap - offset 358 | } 359 | 360 | /// get the stack cap 361 | #[inline] 362 | pub fn size(&self) -> usize { 363 | self.buf.len() / std::mem::size_of::() 364 | } 365 | 366 | /// Point to the high end of the allocated stack 367 | pub fn end(&self) -> *mut usize { 368 | let offset = self.get_offset(); 369 | unsafe { (self.buf.top as *mut usize).offset(0 - *offset as isize) } 370 | } 371 | 372 | /// Point to the low end of the allocated stack 373 | pub fn begin(&self) -> *mut usize { 374 | self.buf.bottom as *mut _ 375 | } 376 | 377 | /// alloc buffer on this stack 378 | pub fn alloc_uninit_box(&mut self) -> MaybeUninit> { 379 | // the first obj should set need drop to non zero 380 | StackBox::::new_uninit(self, 1) 381 | } 382 | 383 | // get offset 384 | fn get_offset(&self) -> *mut usize { 385 | unsafe { (self.buf.top as *mut usize).offset(-1) } 386 | } 387 | 388 | // dealloc the stack 389 | fn drop_stack(&self) { 390 | if self.buf.len() == 0 { 391 | return; 392 | } 393 | let page_size = sys::page_size(); 394 | let guard = (self.buf.bottom as usize - page_size) as *mut c_void; 395 | let size_with_guard = self.buf.len() + page_size; 396 | unsafe { 397 | sys::deallocate_stack(guard, size_with_guard); 398 | } 399 | } 400 | 401 | fn shadow_clone(&self) -> Self { 402 | Stack { 403 | buf: SysStack { 404 | top: self.buf.top, 405 | bottom: self.buf.bottom, 406 | }, 407 | } 408 | } 409 | } 410 | 411 | impl fmt::Debug for Stack { 412 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 413 | let offset = self.get_offset(); 414 | write!(f, "Stack<{:?}, Offset={}>", self.buf, unsafe { *offset }) 415 | } 416 | } 417 | -------------------------------------------------------------------------------- /src/stack/overflow_unix.rs: -------------------------------------------------------------------------------- 1 | use crate::rt::{guard, ContextStack}; 2 | 3 | use crate::yield_::yield_now; 4 | use libc::{sigaction, sighandler_t, SA_ONSTACK, SA_SIGINFO, SIGBUS, SIGSEGV}; 5 | use std::mem; 6 | use std::mem::MaybeUninit; 7 | use std::ptr::null_mut; 8 | use std::sync::{Mutex, Once}; 9 | 10 | static SIG_ACTION: Mutex> = Mutex::new(MaybeUninit::uninit()); 11 | 12 | // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages 13 | // (unmapped pages) at the end of every thread's stack, so if a thread ends 14 | // up running into the guard page it'll trigger this handler. We want to 15 | // detect these cases and print out a helpful error saying that the stack 16 | // has overflowed. All other signals, however, should go back to what they 17 | // were originally supposed to do. 18 | // 19 | // If this is not a stack overflow, the handler un-registers itself and 20 | // then returns (to allow the original signal to be delivered again). 21 | // Returning from this kind of signal handler is technically not defined 22 | // to work when reading the POSIX spec strictly, but in practice it turns 23 | // out many large systems and all implementations allow returning from a 24 | // signal handler to work. For a more detailed explanation see the 25 | // comments on https://github.com/rust-lang/rust/issues/26458. 26 | // 27 | // A pointer to the exception context is passed as the third argument. This 28 | // context is usually compatible with libc::ucontext_t. However some architectures 29 | // (like powerpc64) do not provide the ucontext_t type in glibc. Since we do not 30 | // use the context information, it is represented as a generic pointer. 31 | // see: https://github.com/rust-lang/libc/pull/3986 / https://github.com/rust-lang/libc/pull/3986 32 | unsafe extern "C" fn signal_handler( 33 | signum: libc::c_int, 34 | info: *mut libc::siginfo_t, 35 | ctx: *mut libc::c_void, // workaroung for ppc64le missing ucontext_t in rust libc. See: https://github.com/rust-lang/libc/issues/3964 36 | ) { 37 | let _ctx = &mut *ctx; 38 | let addr = (*info).si_addr() as usize; 39 | let stack_guard = guard::current(); 40 | 41 | if !stack_guard.contains(&addr) { 42 | println!("{}", std::backtrace::Backtrace::force_capture()); 43 | // SIG_ACTION is available after we registered our handler 44 | let old_action = SIG_ACTION.lock().unwrap(); 45 | sigaction(signum, old_action.assume_init_ref(), null_mut()); 46 | 47 | // we are unable to handle this 48 | return; 49 | } 50 | 51 | eprintln!( 52 | "\ncoroutine in thread '{}' has overflowed its stack\n", 53 | std::thread::current().name().unwrap_or("") 54 | ); 55 | 56 | ContextStack::current().top().err = Some(Box::new(crate::Error::StackErr)); 57 | 58 | let mut sigset: libc::sigset_t = mem::zeroed(); 59 | libc::sigemptyset(&mut sigset); 60 | libc::sigaddset(&mut sigset, signum); 61 | libc::sigprocmask(libc::SIG_UNBLOCK, &sigset, null_mut()); 62 | 63 | yield_now(); 64 | 65 | std::process::abort(); 66 | } 67 | 68 | #[cold] 69 | unsafe fn init() { 70 | let mut action: sigaction = mem::zeroed(); 71 | 72 | action.sa_flags = SA_SIGINFO | SA_ONSTACK; 73 | action.sa_sigaction = signal_handler as sighandler_t; 74 | 75 | let mut old_action = SIG_ACTION.lock().unwrap(); 76 | 77 | for signal in [SIGSEGV, SIGBUS] { 78 | sigaction(signal, &action, old_action.assume_init_mut()); 79 | } 80 | } 81 | 82 | pub fn init_once() { 83 | static INIT_ONCE: Once = Once::new(); 84 | 85 | INIT_ONCE.call_once(|| unsafe { 86 | init(); 87 | }) 88 | } 89 | -------------------------------------------------------------------------------- /src/stack/overflow_windows.rs: -------------------------------------------------------------------------------- 1 | use crate::rt::{guard, Context, ContextStack}; 2 | use std::sync::Once; 3 | use windows::Win32::Foundation::EXCEPTION_STACK_OVERFLOW; 4 | use windows::Win32::System::Diagnostics::Debug::{ 5 | AddVectoredExceptionHandler, CONTEXT, EXCEPTION_POINTERS, 6 | }; 7 | 8 | unsafe extern "system" fn vectored_handler(exception_info: *mut EXCEPTION_POINTERS) -> i32 { 9 | const EXCEPTION_CONTINUE_SEARCH: i32 = 0; 10 | const EXCEPTION_CONTINUE_EXECUTION: i32 = -1; 11 | 12 | let info = &*exception_info; 13 | let rec = &(*info.ExceptionRecord); 14 | let context = &mut (*info.ContextRecord); 15 | 16 | if rec.ExceptionCode == EXCEPTION_STACK_OVERFLOW 17 | && guard::current().contains(&(context.Rsp as usize)) 18 | { 19 | eprintln!( 20 | "\ncoroutine in thread '{}' has overflowed its stack\n", 21 | std::thread::current().name().unwrap_or("") 22 | ); 23 | 24 | let env = ContextStack::current(); 25 | let cur = env.top(); 26 | cur.err = Some(Box::new(crate::Error::StackErr)); 27 | 28 | context_init(env.pop_context(cur as *mut _), context); 29 | 30 | //yield_now(); 31 | 32 | EXCEPTION_CONTINUE_EXECUTION 33 | } else { 34 | EXCEPTION_CONTINUE_SEARCH 35 | } 36 | } 37 | 38 | unsafe fn init() { 39 | AddVectoredExceptionHandler(1, Some(vectored_handler)); 40 | } 41 | 42 | pub fn init_once() { 43 | static INIT_ONCE: Once = Once::new(); 44 | 45 | INIT_ONCE.call_once(|| unsafe { 46 | init(); 47 | }) 48 | } 49 | 50 | #[cfg(target_arch = "x86_64")] 51 | unsafe fn context_init(parent: &mut Context, context: &mut CONTEXT) { 52 | let [rbx, rsp, rbp, _, r12, r13, r14, r15, _, _, _, stack_base, stack_limit, dealloc_stack, ..] = 53 | parent.regs.regs.gpr; 54 | 55 | let rip = *(rsp as *const usize); 56 | let rsp = rsp + std::mem::size_of::(); 57 | 58 | context.Rbx = rbx as u64; 59 | context.Rsp = rsp as u64; 60 | context.Rbp = rbp as u64; 61 | context.R12 = r12 as u64; 62 | context.R13 = r13 as u64; 63 | context.R14 = r14 as u64; 64 | context.R15 = r15 as u64; 65 | context.Rip = rip as u64; 66 | 67 | let teb: usize; 68 | 69 | unsafe { 70 | std::arch::asm!( 71 | "mov {0}, gs:[0x30]", 72 | out(reg) teb 73 | ); 74 | } 75 | 76 | *((teb + 0x08) as *mut usize) = stack_base; 77 | *((teb + 0x10) as *mut usize) = stack_limit; 78 | *((teb + 0x1478) as *mut usize) = dealloc_stack; 79 | } 80 | -------------------------------------------------------------------------------- /src/stack/unix.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::mem; 3 | use std::os::raw::c_void; 4 | use std::sync::atomic::{AtomicUsize, Ordering}; 5 | 6 | use super::SysStack; 7 | 8 | #[path = "overflow_unix.rs"] 9 | pub mod overflow; 10 | 11 | #[cfg(any( 12 | target_os = "openbsd", 13 | target_os = "macos", 14 | target_os = "ios", 15 | target_os = "android", 16 | target_os = "illumos", 17 | target_os = "solaris" 18 | ))] 19 | const MAP_STACK: libc::c_int = 0; 20 | 21 | #[cfg(not(any( 22 | target_os = "openbsd", 23 | target_os = "macos", 24 | target_os = "ios", 25 | target_os = "android", 26 | target_os = "illumos", 27 | target_os = "solaris" 28 | )))] 29 | const MAP_STACK: libc::c_int = libc::MAP_STACK; 30 | 31 | pub unsafe fn allocate_stack(size: usize) -> io::Result { 32 | const NULL: *mut libc::c_void = 0 as *mut libc::c_void; 33 | const PROT: libc::c_int = libc::PROT_READ | libc::PROT_WRITE; 34 | const TYPE: libc::c_int = libc::MAP_PRIVATE | libc::MAP_ANON | MAP_STACK; 35 | 36 | let ptr = libc::mmap(NULL, size, PROT, TYPE, -1, 0); 37 | 38 | if std::ptr::eq(ptr, libc::MAP_FAILED) { 39 | Err(io::Error::last_os_error()) 40 | } else { 41 | Ok(SysStack::new( 42 | (ptr as usize + size) as *mut c_void, 43 | ptr as *mut c_void, 44 | )) 45 | } 46 | } 47 | 48 | pub unsafe fn protect_stack(stack: &SysStack) -> io::Result { 49 | let page_size = page_size(); 50 | 51 | debug_assert!(stack.len() % page_size == 0 && stack.len() != 0); 52 | 53 | let ret = { 54 | let bottom = stack.bottom(); 55 | libc::mprotect(bottom, page_size, libc::PROT_NONE) 56 | }; 57 | 58 | if ret != 0 { 59 | Err(io::Error::last_os_error()) 60 | } else { 61 | let bottom = (stack.bottom() as usize + page_size) as *mut c_void; 62 | Ok(SysStack::new(stack.top(), bottom)) 63 | } 64 | } 65 | 66 | pub unsafe fn deallocate_stack(ptr: *mut c_void, size: usize) { 67 | libc::munmap(ptr, size); 68 | } 69 | 70 | pub fn page_size() -> usize { 71 | static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0); 72 | 73 | let mut ret = PAGE_SIZE.load(Ordering::Relaxed); 74 | 75 | if ret == 0 { 76 | unsafe { 77 | ret = libc::sysconf(libc::_SC_PAGESIZE) as usize; 78 | } 79 | 80 | PAGE_SIZE.store(ret, Ordering::Relaxed); 81 | } 82 | 83 | ret 84 | } 85 | 86 | pub fn min_stack_size() -> usize { 87 | // Previously libc::SIGSTKSZ has been used for this, but it proofed to be very unreliable, 88 | // because the resulting values varied greatly between platforms. 89 | page_size() 90 | } 91 | 92 | #[cfg(not(target_os = "fuchsia"))] 93 | pub fn max_stack_size() -> usize { 94 | static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0); 95 | 96 | let mut ret = PAGE_SIZE.load(Ordering::Relaxed); 97 | 98 | if ret == 0 { 99 | let mut limit = mem::MaybeUninit::uninit(); 100 | let limitret = unsafe { libc::getrlimit(libc::RLIMIT_STACK, limit.as_mut_ptr()) }; 101 | let limit = unsafe { limit.assume_init() }; 102 | 103 | if limitret == 0 { 104 | ret = if limit.rlim_max == libc::RLIM_INFINITY 105 | || limit.rlim_max > (usize::MAX as libc::rlim_t) 106 | { 107 | usize::MAX 108 | } else { 109 | limit.rlim_max as usize 110 | }; 111 | 112 | PAGE_SIZE.store(ret, Ordering::Relaxed); 113 | } else { 114 | ret = 1024 * 1024 * 1024; 115 | } 116 | } 117 | 118 | ret 119 | } 120 | 121 | #[cfg(target_os = "fuchsia")] 122 | pub fn max_stack_size() -> usize { 123 | // Fuchsia doesn't have a platform defined hard cap. 124 | usize::MAX 125 | } 126 | -------------------------------------------------------------------------------- /src/stack/windows.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::mem; 3 | use std::os::raw::c_void; 4 | use std::ptr; 5 | use std::sync::atomic::{AtomicUsize, Ordering}; 6 | 7 | use windows::Win32::System::Memory::*; 8 | use windows::Win32::System::SystemInformation::*; 9 | 10 | use super::SysStack; 11 | 12 | #[path = "overflow_windows.rs"] 13 | pub mod overflow; 14 | 15 | pub unsafe fn allocate_stack(size: usize) -> io::Result { 16 | let ptr = VirtualAlloc( 17 | Some(ptr::null()), 18 | size, 19 | MEM_COMMIT | MEM_RESERVE, 20 | PAGE_READWRITE, 21 | ); 22 | 23 | if ptr.is_null() { 24 | Err(io::Error::last_os_error()) 25 | } else { 26 | Ok(SysStack::new( 27 | (ptr as usize + size) as *mut c_void, 28 | ptr as *mut c_void, 29 | )) 30 | } 31 | } 32 | 33 | pub unsafe fn protect_stack(stack: &SysStack) -> io::Result { 34 | let page_size = page_size(); 35 | let mut old_prot = mem::zeroed(); 36 | 37 | debug_assert!(stack.len() % page_size == 0 && stack.len() != 0); 38 | 39 | let ret = VirtualProtect( 40 | stack.bottom(), 41 | page_size, 42 | PAGE_READONLY | PAGE_GUARD, 43 | &mut old_prot, 44 | ); 45 | 46 | if ret.is_err() { 47 | Err(io::Error::last_os_error()) 48 | } else { 49 | let bottom = (stack.bottom() as usize + page_size) as *mut c_void; 50 | Ok(SysStack::new(stack.top(), bottom)) 51 | } 52 | } 53 | 54 | pub unsafe fn deallocate_stack(ptr: *mut c_void, _: usize) { 55 | let _ = VirtualFree(ptr, 0, MEM_RELEASE); 56 | } 57 | 58 | pub fn page_size() -> usize { 59 | static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0); 60 | 61 | let mut ret = PAGE_SIZE.load(Ordering::Relaxed); 62 | 63 | if ret == 0 { 64 | ret = unsafe { 65 | let mut info = mem::zeroed(); 66 | GetSystemInfo(&mut info); 67 | info.dwPageSize as usize 68 | }; 69 | 70 | PAGE_SIZE.store(ret, Ordering::Relaxed); 71 | } 72 | 73 | ret 74 | } 75 | 76 | // Windows does not seem to provide a stack limit API 77 | pub fn min_stack_size() -> usize { 78 | page_size() 79 | } 80 | 81 | // Windows does not seem to provide a stack limit API 82 | pub fn max_stack_size() -> usize { 83 | usize::MAX 84 | } 85 | -------------------------------------------------------------------------------- /src/yield_.rs: -------------------------------------------------------------------------------- 1 | //! # yield 2 | //! 3 | //! generator yield implementation 4 | //! 5 | use std::any::Any; 6 | use std::sync::atomic; 7 | 8 | use crate::gen_impl::{unlikely, Generator}; 9 | use crate::reg_context::RegContext; 10 | use crate::rt::{is_generator, Context, ContextStack, Error}; 11 | 12 | /// it's a special return instruction that yield nothing 13 | /// but only terminate the generator safely 14 | #[macro_export] 15 | macro_rules! done { 16 | () => {{ 17 | return $crate::done(); 18 | }}; 19 | } 20 | 21 | /// don't use it directly, use done!() macro instead 22 | /// would panic if use in none generator context 23 | #[doc(hidden)] 24 | #[inline] 25 | pub fn done() -> T { 26 | assert!(is_generator(), "done is only possible in a generator"); 27 | std::panic::panic_any(Error::Done) 28 | } 29 | 30 | /// switch back to parent context 31 | #[inline] 32 | pub fn yield_now() { 33 | let env = ContextStack::current(); 34 | let cur = env.top(); 35 | raw_yield_now(&env, cur); 36 | } 37 | 38 | #[inline] 39 | pub fn raw_yield_now(env: &ContextStack, cur: &mut Context) { 40 | let parent = env.pop_context(cur as *mut _); 41 | RegContext::swap(&mut cur.regs, &parent.regs); 42 | } 43 | 44 | /// raw yield without catch passed in para 45 | #[inline] 46 | fn raw_yield(env: &ContextStack, context: &mut Context, v: T) { 47 | // check the context 48 | if unlikely(!context.is_generator()) { 49 | panic!("yield from none generator context"); 50 | } 51 | 52 | context.set_ret(v); 53 | context._ref -= 1; 54 | raw_yield_now(env, context); 55 | 56 | // here we just panic to exit the func 57 | if unlikely(context._ref != 1) { 58 | std::panic::panic_any(Error::Cancel); 59 | } 60 | } 61 | 62 | /// yield something without catch passed in para 63 | #[inline] 64 | #[deprecated(since = "0.6.18", note = "please use `scope` version instead")] 65 | pub fn yield_with(v: T) { 66 | let env = ContextStack::current(); 67 | let context = env.top(); 68 | raw_yield(&env, context, v); 69 | } 70 | 71 | /// get the passed in para 72 | #[inline] 73 | #[deprecated(since = "0.6.18", note = "please use `scope` version instead")] 74 | pub fn get_yield() -> Option { 75 | let context = ContextStack::current().top(); 76 | raw_get_yield(context) 77 | } 78 | 79 | /// get the passed in para from context 80 | #[inline] 81 | fn raw_get_yield(context: &mut Context) -> Option { 82 | // check the context 83 | if unlikely(!context.is_generator()) { 84 | { 85 | error!("get yield from none generator context"); 86 | std::panic::panic_any(Error::ContextErr); 87 | } 88 | } 89 | 90 | context.get_para() 91 | } 92 | 93 | /// yield and get the send para 94 | // here yield need to return a static lifetime value, which is Any required 95 | // this is fine, but it's totally safe that we can refer to the function block 96 | // since we will come back later 97 | #[inline] 98 | #[deprecated(since = "0.6.18", note = "please use `scope` version instead")] 99 | pub fn yield_(v: T) -> Option { 100 | let env = ContextStack::current(); 101 | let context = env.top(); 102 | raw_yield(&env, context, v); 103 | atomic::compiler_fence(atomic::Ordering::Acquire); 104 | raw_get_yield(context) 105 | } 106 | 107 | /// `yield_from` 108 | #[deprecated(since = "0.6.18", note = "please use `scope` version instead")] 109 | pub fn yield_from(mut g: Generator) -> Option { 110 | let env = ContextStack::current(); 111 | let context = env.top(); 112 | let mut p = context.get_para(); 113 | while unlikely(!g.is_done()) { 114 | match g.raw_send(p) { 115 | None => return None, 116 | Some(r) => raw_yield(&env, context, r), 117 | } 118 | p = context.get_para(); 119 | } 120 | drop(g); // explicitly consume g 121 | p 122 | } 123 | 124 | /// coroutine yield 125 | pub fn co_yield_with(v: T) { 126 | let env = ContextStack::current(); 127 | let context = env.co_ctx().unwrap(); 128 | 129 | // check the context, already checked in co_ctx() 130 | // if !context.is_generator() { 131 | // info!("yield from none coroutine context"); 132 | // // do nothing, just return 133 | // return; 134 | // } 135 | 136 | // here we just panic to exit the func 137 | if unlikely(context._ref != 1) { 138 | std::panic::panic_any(Error::Cancel); 139 | } 140 | 141 | context.co_set_ret(v); 142 | context._ref -= 1; 143 | 144 | let parent = env.pop_context(context); 145 | let top = unsafe { &mut *context.parent }; 146 | // here we should use the top regs 147 | RegContext::swap(&mut top.regs, &parent.regs); 148 | } 149 | 150 | /// coroutine get passed in yield para 151 | pub fn co_get_yield() -> Option { 152 | ContextStack::current() 153 | .co_ctx() 154 | .and_then(|ctx| ctx.co_get_para()) 155 | } 156 | 157 | /// set current coroutine para in user space 158 | pub fn co_set_para(para: A) { 159 | if let Some(ctx) = ContextStack::current().co_ctx() { 160 | ctx.co_set_para(para) 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /tests/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(deprecated)] 2 | #![allow(unused_assignments)] 3 | 4 | extern crate generator; 5 | 6 | use generator::*; 7 | 8 | #[test] 9 | fn test_return() { 10 | let mut g = Gn::new_scoped(|_s| 42u32); 11 | assert_eq!(g.next(), Some(42)); 12 | assert!(g.is_done()); 13 | } 14 | 15 | #[test] 16 | fn generator_is_done() { 17 | let mut g = Gn::<()>::new(|| { 18 | yield_with(()); 19 | }); 20 | 21 | g.next(); 22 | assert!(!g.is_done()); 23 | g.next(); 24 | assert!(g.is_done()); 25 | } 26 | 27 | #[test] 28 | fn generator_is_done1() { 29 | let mut g = Gn::new_scoped(|mut s| { 30 | s.yield_(2); 31 | done!(); 32 | }); 33 | 34 | assert_eq!(g.next(), Some(2)); 35 | assert!(!g.is_done()); 36 | assert_eq!(g.next(), None); 37 | assert!(g.is_done()); 38 | } 39 | 40 | #[test] 41 | fn generator_is_done_with_drop() { 42 | let mut g = Gn::new_scoped(|mut s| { 43 | s.yield_(String::from("string")); 44 | done!(); 45 | }); 46 | 47 | assert_eq!(g.next(), Some(String::from("string"))); 48 | assert!(!g.is_done()); 49 | assert_eq!(g.next(), None); 50 | assert!(g.is_done()); 51 | } 52 | 53 | #[test] 54 | fn test_yield_a() { 55 | let mut g = Gn::::new(|| { 56 | let r: i32 = yield_(10).unwrap(); 57 | r * 2 58 | }); 59 | 60 | // first start the generator 61 | let i = g.raw_send(None).unwrap(); 62 | assert_eq!(i, 10); 63 | let i = g.send(3); 64 | assert_eq!(i, 6); 65 | assert!(g.is_done()); 66 | } 67 | 68 | #[test] 69 | fn test_yield_with() { 70 | let mut g = Gn::new(|| { 71 | yield_with(10); 72 | 20 73 | }); 74 | 75 | // the para type could be deduced here 76 | let i = g.send(()); 77 | assert!(i == 10); 78 | 79 | let j = g.next(); 80 | assert!(j.unwrap() == 20); 81 | } 82 | 83 | #[test] 84 | #[should_panic] 85 | fn test_yield_with_type_error() { 86 | let mut g = Gn::<()>::new(|| { 87 | // yield_with::(10); 88 | yield_with(10u32); 89 | 20i32 90 | }); 91 | 92 | g.next(); 93 | } 94 | 95 | #[test] 96 | #[should_panic] 97 | fn test_get_yield_type_error() { 98 | let mut g = Gn::::new(|| { 99 | get_yield::(); 100 | }); 101 | 102 | g.send(10); 103 | } 104 | 105 | #[test] 106 | #[should_panic] 107 | fn test_deep_yield_with_type_error() { 108 | let mut g = Gn::<()>::new(|| { 109 | let mut g = Gn::<()>::new(|| { 110 | yield_with(0); 111 | }); 112 | g.next(); 113 | }); 114 | 115 | g.next(); 116 | } 117 | 118 | #[test] 119 | fn test_scoped() { 120 | use std::cell::RefCell; 121 | use std::rc::Rc; 122 | 123 | let x = Rc::new(RefCell::new(10)); 124 | 125 | let x1 = x.clone(); 126 | let mut g = Gn::<()>::new_scoped_local(move |mut s| { 127 | *x1.borrow_mut() = 20; 128 | s.yield_with(()); 129 | *x1.borrow_mut() = 5; 130 | }); 131 | 132 | g.next(); 133 | assert!(*x.borrow() == 20); 134 | 135 | g.next(); 136 | assert!(*x.borrow() == 5); 137 | 138 | assert!(g.is_done()); 139 | } 140 | 141 | #[test] 142 | fn test_scoped_1() { 143 | let mut x = 10; 144 | { 145 | let mut g = Gn::<()>::new(|| { 146 | x = 5; 147 | }); 148 | g.next(); 149 | } 150 | 151 | assert!(x == 5); 152 | } 153 | 154 | #[test] 155 | fn test_scoped_yield() { 156 | let mut g = Gn::new_scoped(|mut s| { 157 | let mut i = 0; 158 | loop { 159 | let v = s.yield_(i); 160 | i += 1; 161 | match v { 162 | Some(x) => { 163 | // dbg!(x, i); 164 | assert_eq!(x, i); 165 | } 166 | None => { 167 | // for elegant exit 168 | break; 169 | } 170 | } 171 | } 172 | 20usize 173 | }); 174 | 175 | // start g 176 | g.raw_send(None); 177 | 178 | for i in 1..100 { 179 | let data: usize = g.send(i); 180 | assert_eq!(data, i); 181 | } 182 | 183 | // quit g 184 | g.raw_send(None); 185 | } 186 | 187 | #[test] 188 | fn test_inner_ref() { 189 | let mut g = Gn::<()>::new_scoped(|mut s| { 190 | // setup something 191 | let mut x: u32 = 10; 192 | 193 | // return internal ref not compiled because the 194 | // lifetime of internal ref is smaller than the generator 195 | // but the generator interface require the return type's 196 | // lifetime bigger than the generator 197 | 198 | // the x memory remains on heap even returned! 199 | // the life time of x is associated with the generator 200 | // however modify this internal value is really unsafe 201 | // but this is useful pattern for setup and teardown 202 | // which can be put in the same place 203 | unsafe { 204 | let mut_ref: &mut u32 = std::mem::transmute(&mut x); 205 | s.yield_unsafe(mut_ref); 206 | }; 207 | 208 | // this was modified by the invoker 209 | assert!(x == 5); 210 | // teardown happened when the generator get dropped 211 | done!() 212 | }); 213 | 214 | // use the resource setup from generator 215 | let a = g.next().unwrap(); 216 | assert!(*a == 10); 217 | *a = 5; 218 | // a keeps valid until the generator dropped 219 | } 220 | 221 | #[test] 222 | fn test_drop() { 223 | let mut x = 10; 224 | { 225 | let mut g = Gn::<()>::new(|| { 226 | x = 1; 227 | yield_with(()); 228 | x = 5; 229 | }); 230 | g.send(()); 231 | } 232 | 233 | assert!(x == 1); 234 | } 235 | 236 | #[test] 237 | fn test_ill_drop() { 238 | let mut x = 10u32; 239 | { 240 | Gn::::new(|| { 241 | x = 5; 242 | // here we got None from drop 243 | x = get_yield().unwrap_or(0); 244 | }); 245 | // not started the gen, change nothing 246 | } 247 | 248 | assert!(x == 10); 249 | } 250 | 251 | #[test] 252 | fn test_loop_drop() { 253 | let mut x = 10u32; 254 | { 255 | let mut g = Gn::<()>::new(|| { 256 | x = 5; 257 | loop { 258 | yield_with(()); 259 | } 260 | }); 261 | g.send(()); 262 | // here the generator drop will cancel the loop 263 | } 264 | 265 | assert!(x == 5); 266 | } 267 | 268 | #[test] 269 | fn test_panic_inside() { 270 | use std::panic::{catch_unwind, AssertUnwindSafe}; 271 | let mut x = 10; 272 | { 273 | let mut wrapper = AssertUnwindSafe(&mut x); 274 | if let Err(panic) = catch_unwind(move || { 275 | let mut g = Gn::<()>::new(|| { 276 | **wrapper = 5; 277 | panic!("panic inside!"); 278 | }); 279 | g.resume(); 280 | }) { 281 | match panic.downcast_ref::<&str>() { 282 | // why can't get the message here?? is it lost? 283 | Some(msg) => println!("get panic: {msg:?}"), 284 | None => println!("can't get panic message"), 285 | } 286 | } 287 | // wrapper dropped here 288 | } 289 | 290 | assert!(x == 5); 291 | } 292 | 293 | #[test] 294 | #[allow(unreachable_code)] 295 | fn test_cancel() { 296 | let mut g = Gn::<()>::new(|| { 297 | let mut i = 0; 298 | loop { 299 | yield_with(i); 300 | i += 1; 301 | } 302 | i 303 | }); 304 | 305 | loop { 306 | let i = g.next().unwrap(); 307 | if i > 10 { 308 | g.cancel(); 309 | break; 310 | } 311 | } 312 | 313 | assert!(g.is_done()); 314 | } 315 | 316 | #[test] 317 | #[should_panic] 318 | fn test_yield_from_functor_context() { 319 | // this is not run from generator 320 | yield_::<(), _>(0); 321 | } 322 | 323 | #[test] 324 | #[should_panic] 325 | fn test_yield_with_from_functor_context() { 326 | // this is not run from generator 327 | yield_with(0); 328 | } 329 | 330 | #[test] 331 | fn test_yield_from_generator_context() { 332 | let mut g = Gn::<()>::new(|| { 333 | let mut g1 = Gn::<()>::new(|| { 334 | yield_with(5); 335 | 10 336 | }); 337 | 338 | let i = g1.send(()); 339 | yield_with(i); 340 | 0 341 | }); 342 | 343 | let n = g.send(()); 344 | assert!(n == 5); 345 | 346 | let n = g.send(()); 347 | assert!(n == 0); 348 | } 349 | 350 | #[test] 351 | fn test_yield_from() { 352 | let mut g = Gn::<()>::new(|| { 353 | let g1 = Gn::<()>::new(|| { 354 | yield_with(5); 355 | 10 356 | }); 357 | 358 | yield_from(g1); 359 | 0 360 | }); 361 | 362 | let n = g.send(()); 363 | assert!(n == 5); 364 | let n = g.send(()); 365 | assert!(n == 10); 366 | let n = g.send(()); 367 | assert!(n == 0); 368 | assert!(g.is_done()); 369 | } 370 | 371 | #[test] 372 | fn test_yield_from_send() { 373 | let mut g = Gn::::new(|| { 374 | let g1 = Gn::::new(|| { 375 | let mut i: u32 = yield_(1u32).unwrap(); 376 | i = yield_(i * 2).unwrap(); 377 | i * 2 378 | }); 379 | 380 | let i = yield_from(g1).unwrap(); 381 | assert_eq!(i, 10); 382 | 383 | // here we need a unused return to indicate this function's return type 384 | 0u32 385 | }); 386 | 387 | // first start the generator 388 | let n = g.raw_send(None).unwrap(); 389 | assert!(n == 1); 390 | 391 | let n = g.send(3); 392 | assert!(n == 6); 393 | let n = g.send(4); 394 | assert!(n == 8); 395 | let n = g.send(10); 396 | assert!(n == 0); 397 | assert!(g.is_done()); 398 | } 399 | 400 | #[test] 401 | #[should_panic] 402 | fn test_yield_from_send_type_miss_match() { 403 | let mut g = Gn::::new(|| { 404 | let g1 = Gn::::new(|| { 405 | let mut i: u32 = yield_(1u32).unwrap(); 406 | i = yield_(i * 2).unwrap(); 407 | i * 2 408 | }); 409 | 410 | yield_from(g1); 411 | // here the return type should be 0u32 412 | 0 413 | }); 414 | 415 | let n = g.send(3); 416 | assert!(n == 1); 417 | let n = g.send(4); 418 | assert!(n == 6); 419 | let n = g.send(10); 420 | assert!(n == 8); 421 | // the last send has no meaning for the return 422 | let n = g.send(0); 423 | assert!(n == 0); 424 | assert!(g.is_done()); 425 | } 426 | 427 | // windows has it's own check, this test would make the app abort 428 | // #[test] 429 | // #[should_panic] 430 | // fn test_stack_overflow() { 431 | // // here the stack size is not big enough 432 | // // and will panic when get detected in drop 433 | // let clo = || { 434 | // let big_data = [0usize; 0x400]; 435 | // println!("this would overflow the stack, {}", big_data[100]); 436 | // }; 437 | // Gn::<()>::new_opt(clo, 10); 438 | // } 439 | 440 | #[test] 441 | fn test_scope_gen() { 442 | // now we can even deduce the input para type 443 | let mut g = Gn::new_scoped(|mut s| { 444 | let i = s.yield_(0).unwrap(); 445 | // below would have a compile error, nice! 446 | // s.yield_(Box::new(0)); 447 | i * 2 448 | }); 449 | 450 | assert_eq!(g.raw_send(None), Some(0)); 451 | assert_eq!(g.raw_send(Some(3)), Some(6)); 452 | assert_eq!(g.raw_send(None), None); 453 | } 454 | 455 | #[test] 456 | fn test_scope_yield_from_send() { 457 | let mut g = Gn::new_scoped(|mut s| { 458 | let g1 = Gn::new_scoped(|mut s| { 459 | let mut i: u32 = s.yield_(1u32).unwrap(); 460 | i = s.yield_(i * 2).unwrap(); 461 | i * 2 462 | }); 463 | 464 | let i = s.yield_from(g1).unwrap(); 465 | // here the return type should be 0u32 466 | i * 2 467 | }); 468 | 469 | let n = g.send(3); 470 | assert_eq!(n, 1); 471 | let n = g.send(4); 472 | assert_eq!(n, 8); 473 | let n = g.send(10); 474 | assert_eq!(n, 20); 475 | // the last send has no meaning for the return 476 | let n = g.send(7); 477 | assert!(n == 14); 478 | assert!(g.is_done()); 479 | } 480 | 481 | #[test] 482 | fn test_re_init() { 483 | let clo = || { 484 | |mut s: Scope<'_, 'static, (), _>| { 485 | s.yield_(0); 486 | s.yield_(3); 487 | 5 488 | } 489 | }; 490 | 491 | let mut g = Gn::new_opt(0x800, || 0); 492 | g.scoped_init(clo()); 493 | 494 | assert_eq!(g.next(), Some(0)); 495 | assert_eq!(g.next(), Some(3)); 496 | assert_eq!(g.next(), Some(5)); 497 | assert!(g.is_done()); 498 | 499 | // re-init generator 500 | g.scoped_init(clo()); 501 | 502 | assert_eq!(g.next(), Some(0)); 503 | assert_eq!(g.next(), Some(3)); 504 | assert_eq!(g.next(), Some(5)); 505 | assert!(g.is_done()); 506 | } 507 | 508 | #[test] 509 | #[should_panic] 510 | fn done_in_normal() { 511 | done!(); 512 | } 513 | 514 | #[test] 515 | #[should_panic] 516 | fn invalid_yield_in_scope() { 517 | let g = Gn::new_scoped(|_| { 518 | // invalid use raw yield API with scope 519 | yield_::(()); 520 | }); 521 | 522 | for () in g {} 523 | } 524 | 525 | #[test] 526 | fn test_yield_float() { 527 | let mut g = Gn::::new(|| { 528 | let r: f64 = yield_(10.0).unwrap(); 529 | let x = r * 2.0; // 6 530 | let y = x * 9.0; // 54 531 | let z = y / 3.0; // 18 532 | let r: f64 = yield_(6.0).unwrap(); 533 | x * r * y * z 534 | }); 535 | 536 | // first start the generator 537 | let i = g.raw_send(None).unwrap(); 538 | let x = i * 10.0; 539 | assert_eq!(i, 10.0); 540 | let i = g.send(3.0); 541 | assert_eq!(i, 6.0); 542 | let i = g.send(x / 25.0); 543 | assert_eq!(i, 23328.0); 544 | assert!(g.is_done()); 545 | } 546 | --------------------------------------------------------------------------------