├── .github └── workflows │ └── ci.yml ├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── ci.sh ├── rust-toolchain.toml ├── rustfmt.toml ├── src ├── allocator.rs ├── cstr.rs ├── env.rs ├── error.rs ├── fmt.rs ├── fs │ ├── directory.rs │ └── mod.rs ├── io.rs ├── lib.rs ├── mem │ ├── impls.rs │ ├── macros.rs │ ├── mod.rs │ └── x86_64.rs ├── net.rs ├── prelude.rs ├── spinlock.rs └── syscalls.rs └── veneer-macros ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT └── src └── lib.rs /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - main 5 | pull_request: 6 | branches: 7 | - main 8 | 9 | name: CI 10 | 11 | jobs: 12 | suite_matrix: 13 | strategy: 14 | matrix: 15 | suite: [style, test] 16 | runs-on: ubuntu-latest 17 | name: ${{ matrix.suite }} 18 | steps: 19 | - uses: actions/checkout@v3 20 | - name: Rustup 21 | run: | 22 | rustup self update 23 | rustup update 24 | - uses: actions/cache@v3 25 | with: 26 | path: | 27 | ~/.cargo/ 28 | target/ 29 | key: ${{ runner.os }}-${{ matrix.suite }}-${{ hashFiles('**/Cargo.lock') }} 30 | restore-keys: ${{ runner.os }}-$(rustc --version) 31 | - name: Run 32 | env: 33 | AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} 34 | AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} 35 | AWS_DEFAULT_REGION: us-east-1 36 | CARGO_INCREMENTAL: 0 37 | RUSTFLAGS: -Cdebuginfo=0 38 | run: | 39 | echo "::group::Install dependencies" 40 | set -o pipefail 41 | cargo install htmlpty --locked --git https://github.com/saethlin/miri-tools 42 | set +e 43 | echo "::endgroup" 44 | htmlpty bash ci.sh ${{ matrix.suite }} 2> output.html 45 | FAILED=$? 46 | aws s3 cp output.html s3://miri-bot-dev/${GITHUB_REPOSITORY}/${GITHUB_RUN_ID}/${{ matrix.suite }}.html 47 | LOG_URL=https://miri-bot-dev.s3.amazonaws.com/${GITHUB_REPOSITORY}/${GITHUB_RUN_ID}/${{ matrix.suite }}.html 48 | if [ $FAILED -ne 0 ] 49 | then 50 | curl -L \ 51 | -X POST \ 52 | -H "Accept: application/vnd.github+json" \ 53 | -H "Authorization: Bearer ${{ secrets.github_token }}" \ 54 | -H "X-GitHub-Api-Version: 2022-11-28" \ 55 | ${{ github.event.pull_request.comments_url }} \ 56 | -d "{\"body\":\"$LOG_URL\"}" 57 | fi 58 | exit $FAILED 59 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "veneer" 3 | version = "0.2.3" 4 | authors = ["Ben Kimock "] 5 | license = "MIT OR Apache-2.0" 6 | description = "A very thin std-like library that doesn't depend on libc" 7 | documentation = "https://docs.rs/veneer" 8 | repository = "https://github.com/saethlin/veneer" 9 | edition = "2018" 10 | include = ["src/**/*.rs", "README.md", "LICENSE-MIT", "LICENSE-APACHE"] 11 | 12 | [dependencies] 13 | bitflags = "2" 14 | libc = { version = "0.2", default-features = false } 15 | veneer-macros = "0.1" 16 | 17 | [target.'cfg(unix)'.dependencies] 18 | sc = "0.2" 19 | 20 | [dev-dependencies] 21 | itoa = { version = "1", default-features = false } 22 | 23 | [features] 24 | rt = [] 25 | mem = [] 26 | default = ["mem"] 27 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Essentially, a replacement for the Rust standard library on Linux. 2 | 3 | The Rust standard library makes tradeoffs in both API and implementation which are generally good but are inappropriate for some uses. This library offers an alternative perspective. In particular, it aims for: 4 | 5 | * No linkage against a libc 6 | * A minimum of unsafe code outside of that required to write syscall wrappers 7 | * The lowest runtime overhead possible, even where that makes interfaces awkward 8 | 9 | These motivations primarily come from my experience trying to implement a POSIX ls that isn't significantly larger or slower than GNU's ls. For small programs, the accidental complexity of combining Rust's standard library with a libc implementation becomes the dominant contributor of both code size and execution speed. 10 | -------------------------------------------------------------------------------- /ci.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | exec 2>&1 5 | export TERM=xterm-256color 6 | 7 | function group { 8 | echo "::group::$@" 9 | $@ 10 | echo "::endgroup" 11 | } 12 | 13 | if [[ "$1" == "style" ]] 14 | then 15 | group cargo fmt --check 16 | else 17 | group cargo test 18 | group cargo test --features=rt 19 | fi 20 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "nightly" 3 | components = ["rustfmt", "clippy"] 4 | profile = "minimal" 5 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | reorder_imports = true 2 | imports_granularity = "Crate" 3 | -------------------------------------------------------------------------------- /src/allocator.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::missing_inline_in_public_items)] 2 | use crate::{spinlock::SpinLock, syscalls}; 3 | use core::{ 4 | alloc::{GlobalAlloc, Layout}, 5 | ptr, 6 | }; 7 | 8 | // Allocates in chunks of 64 bytes. The `usage_mask` is a bitmask that is 1 where something is 9 | // allocated. 10 | pub struct SmallAllocator { 11 | slab: Slab, 12 | usage_mask: u64, 13 | } 14 | 15 | #[repr(align(64))] 16 | struct Slab([u8; 4096]); 17 | 18 | impl core::ops::Deref for Slab { 19 | type Target = [u8]; 20 | fn deref(&self) -> &[u8] { 21 | &self.0[..] 22 | } 23 | } 24 | 25 | impl core::ops::DerefMut for Slab { 26 | fn deref_mut(&mut self) -> &mut [u8] { 27 | &mut self.0[..] 28 | } 29 | } 30 | 31 | impl SmallAllocator { 32 | fn to_blocks(size: usize) -> usize { 33 | let remainder = size % 64; 34 | let size = if remainder == 0 { 35 | size 36 | } else { 37 | size + 64 - remainder 38 | }; 39 | size / 64 40 | } 41 | 42 | pub fn alloc(&mut self, layout: Layout) -> *mut u8 { 43 | if layout.align() > 64 || layout.size() == 0 || layout.size() > 4096 { 44 | return core::ptr::null_mut(); 45 | } 46 | 47 | let blocks = Self::to_blocks(layout.size()); 48 | 49 | let my_mask = u64::MAX << (64 - blocks); 50 | 51 | for i in 0..=(64 - blocks) { 52 | if ((my_mask >> i) & self.usage_mask) == 0 { 53 | self.usage_mask |= my_mask >> i; 54 | return self.slab[64 * i..].as_mut_ptr(); 55 | } 56 | } 57 | 58 | core::ptr::null_mut() 59 | } 60 | 61 | unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) -> bool { 62 | let offset = ptr.offset_from(self.slab.as_mut_ptr()); 63 | if !(0..4096).contains(&offset) { 64 | return false; 65 | } 66 | 67 | let offset_blocks = offset as usize / 64; 68 | let blocks = Self::to_blocks(layout.size()); 69 | 70 | let my_mask = (u64::MAX << (64 - blocks)) >> offset_blocks; 71 | 72 | assert!(my_mask & self.usage_mask == my_mask); 73 | self.usage_mask ^= my_mask; 74 | 75 | true 76 | } 77 | } 78 | 79 | pub struct Allocator { 80 | cache: SpinLock<[(bool, *mut u8, usize); 64]>, 81 | small: SpinLock, 82 | } 83 | 84 | impl Allocator { 85 | pub const fn new() -> Self { 86 | Self { 87 | cache: SpinLock::new([(false, ptr::null_mut(), 0); 64]), 88 | small: SpinLock::new(SmallAllocator { 89 | slab: Slab([0u8; 4096]), 90 | usage_mask: 0, 91 | }), 92 | } 93 | } 94 | } 95 | 96 | fn round_to_page(layout: Layout) -> Layout { 97 | let remainder = layout.size() % 4096; 98 | let size = if remainder == 0 { 99 | layout.size() 100 | } else { 101 | layout.size() + 4096 - remainder 102 | }; 103 | match Layout::from_size_align(size, layout.align()) { 104 | Ok(l) => l, 105 | Err(_) => alloc::alloc::handle_alloc_error(layout), 106 | } 107 | } 108 | 109 | unsafe impl GlobalAlloc for Allocator { 110 | unsafe fn alloc(&self, mut layout: Layout) -> *mut u8 { 111 | let small_ptr = self.small.lock().alloc(layout); 112 | if !small_ptr.is_null() { 113 | return small_ptr; 114 | } 115 | 116 | layout = round_to_page(layout); 117 | 118 | let mut cache = self.cache.lock(); 119 | 120 | // Find the closest fit 121 | if let Some((is_used, ptr, _)) = cache 122 | .iter_mut() 123 | .filter(|(is_used, _, len)| !*is_used && *len >= layout.size()) 124 | .min_by_key(|(_, _, len)| *len - layout.size()) 125 | { 126 | *is_used = true; 127 | return *ptr; 128 | } 129 | 130 | // We didn't find a mapping that's already big enough, resize the largest one. 131 | if let Some((is_used, ptr, len)) = cache 132 | .iter_mut() 133 | .filter(|(is_used, ptr, _)| !*is_used && !ptr.is_null()) 134 | .max_by_key(|(_, _, len)| *len) 135 | { 136 | *is_used = true; 137 | *ptr = syscalls::mremap(*ptr, *len, layout.size(), libc::MREMAP_MAYMOVE) 138 | .unwrap_or(core::ptr::null_mut()); 139 | return *ptr; 140 | } 141 | 142 | syscalls::mmap( 143 | core::ptr::null_mut(), 144 | layout.size(), 145 | libc::PROT_READ | libc::PROT_WRITE, 146 | libc::MAP_ANON | libc::MAP_PRIVATE, 147 | -1, 148 | 0, 149 | ) 150 | .unwrap_or(core::ptr::null_mut()) 151 | } 152 | 153 | unsafe fn dealloc(&self, ptr: *mut u8, mut layout: Layout) { 154 | if self.small.lock().dealloc(ptr, layout) { 155 | return; 156 | } 157 | 158 | layout = round_to_page(layout); 159 | 160 | let mut cache = self.cache.lock(); 161 | 162 | // Look for this entry in the cache and mark it as unused 163 | for (is_used, cache_ptr, _len) in cache.iter_mut() { 164 | if ptr == *cache_ptr { 165 | *is_used = false; 166 | return; 167 | } 168 | } 169 | 170 | // We didn't find it in the cache, try to add it 171 | for (is_used, cache_ptr, len) in cache.iter_mut() { 172 | if !*is_used { 173 | *cache_ptr = ptr; 174 | *len = layout.size(); 175 | return; 176 | } 177 | } 178 | // This is technically fallible, but it seems like there isn't a way to indicate failure 179 | // when deallocating. 180 | let _ = syscalls::munmap(ptr, layout.size()); 181 | } 182 | 183 | unsafe fn realloc(&self, ptr: *mut u8, mut layout: Layout, mut new_size: usize) -> *mut u8 { 184 | let mut small = self.small.lock(); 185 | let offset = ptr.offset_from(small.slab.as_mut_ptr()); 186 | if (0..4096).contains(&offset) { 187 | drop(small); 188 | let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); 189 | let new_ptr = self.alloc(new_layout); 190 | core::ptr::copy_nonoverlapping(ptr, new_ptr, core::cmp::min(layout.size(), new_size)); 191 | self.dealloc(ptr, layout); 192 | return new_ptr; 193 | } 194 | 195 | layout = round_to_page(layout); 196 | let remainder = new_size % 4096; 197 | new_size = if remainder == 0 { 198 | new_size 199 | } else { 200 | new_size + 4096 - remainder 201 | }; 202 | 203 | if layout.size() >= new_size { 204 | return ptr; 205 | } 206 | 207 | let mut cache = self.cache.lock(); 208 | 209 | for (is_used, cache_ptr, len) in cache.iter_mut() { 210 | if *cache_ptr == ptr { 211 | *len = new_size; 212 | assert!(*is_used); 213 | *cache_ptr = syscalls::mremap(ptr, layout.size(), new_size, libc::MREMAP_MAYMOVE) 214 | .unwrap_or(core::ptr::null_mut()); 215 | return *cache_ptr; 216 | } 217 | } 218 | 219 | syscalls::mremap(ptr, layout.size(), new_size, libc::MREMAP_MAYMOVE) 220 | .unwrap_or(core::ptr::null_mut()) 221 | } 222 | } 223 | 224 | impl Drop for Allocator { 225 | fn drop(&mut self) { 226 | for (_, ptr, len) in self.cache.lock().iter() { 227 | unsafe { 228 | let _ = syscalls::munmap(*ptr, *len); 229 | } 230 | } 231 | } 232 | } 233 | 234 | #[cfg(test)] 235 | mod tests { 236 | use super::*; 237 | 238 | #[test] 239 | fn small() { 240 | let mut alloc = SmallAllocator { 241 | slab: Slab([0u8; 4096]), 242 | usage_mask: 0, 243 | }; 244 | 245 | for size in 1..4096 { 246 | let remainder = size % 64; 247 | let rounded = if remainder == 0 { 248 | size 249 | } else { 250 | size + 64 - remainder 251 | }; 252 | let blocks = rounded / 64; 253 | 254 | assert!(blocks * 64 >= size); 255 | 256 | let layout = Layout::from_size_align(size, 1).unwrap(); 257 | 258 | let ptr = alloc.alloc(layout); 259 | 260 | assert!(!ptr.is_null()); 261 | 262 | assert_eq!(blocks, alloc.usage_mask.count_ones() as usize); 263 | 264 | unsafe { 265 | assert!(alloc.dealloc(ptr, layout)); 266 | } 267 | 268 | assert_eq!(0, alloc.usage_mask.count_ones()); 269 | } 270 | } 271 | } 272 | -------------------------------------------------------------------------------- /src/cstr.rs: -------------------------------------------------------------------------------- 1 | use core::{fmt, str}; 2 | 3 | #[derive(Clone, Copy, PartialEq)] 4 | pub struct CStr<'a> { 5 | bytes: &'a [u8], 6 | } 7 | 8 | impl Default for CStr<'static> { 9 | #[inline] 10 | fn default() -> Self { 11 | CStr::from_bytes(&[0]) 12 | } 13 | } 14 | 15 | impl<'a> CStr<'a> { 16 | /// # Safety 17 | /// 18 | /// This function must be called with a pointer to a null-terminated array of bytes 19 | #[inline] 20 | pub unsafe fn from_ptr<'b>(ptr: *const u8) -> CStr<'b> { 21 | let mut len = 0; 22 | while *ptr.add(len) != 0 { 23 | len += 1; 24 | } 25 | CStr { 26 | bytes: core::slice::from_raw_parts(ptr.cast::(), len + 1), 27 | } 28 | } 29 | 30 | #[inline] 31 | pub fn from_bytes(bytes: &'a [u8]) -> CStr<'a> { 32 | assert!( 33 | bytes.last() == Some(&0), 34 | "attempted to construct a CStr from a slice without a null terminator" 35 | ); 36 | CStr { bytes } 37 | } 38 | 39 | #[inline] 40 | pub fn as_bytes(&self) -> &'a [u8] { 41 | unsafe { self.bytes.get_unchecked(..self.bytes.len() - 1) } 42 | } 43 | 44 | #[inline] 45 | pub fn get(&self, i: usize) -> Option { 46 | self.bytes.get(i).copied() 47 | } 48 | 49 | #[inline] 50 | pub fn as_ptr(&self) -> *const u8 { 51 | self.bytes.as_ptr() 52 | } 53 | } 54 | 55 | impl core::ops::Deref for CStr<'_> { 56 | type Target = [u8]; 57 | #[inline] 58 | fn deref(&self) -> &Self::Target { 59 | self.as_bytes() 60 | } 61 | } 62 | 63 | impl PartialEq<&[u8]> for CStr<'_> { 64 | #[inline] 65 | fn eq(&self, bytes: &&[u8]) -> bool { 66 | if bytes.last() == Some(&0) { 67 | self.bytes == *bytes 68 | } else { 69 | &self.bytes[..self.bytes.len() - 1] == *bytes 70 | } 71 | } 72 | } 73 | 74 | impl PartialEq<&str> for CStr<'_> { 75 | #[inline] 76 | fn eq(&self, other: &&str) -> bool { 77 | &self.bytes[..self.bytes.len() - 1] == other.as_bytes() 78 | } 79 | } 80 | 81 | impl fmt::Debug for CStr<'_> { 82 | #[inline] 83 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 84 | match str::from_utf8(self.as_bytes()) { 85 | Ok(s) => s.fmt(f), 86 | Err(e) => str::from_utf8(&self.as_bytes()[..e.valid_up_to()]) 87 | .unwrap() 88 | .fmt(f), 89 | } 90 | } 91 | } 92 | 93 | impl fmt::Display for CStr<'_> { 94 | #[inline] 95 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 96 | match str::from_utf8(self.as_bytes()) { 97 | Ok(s) => s.fmt(f), 98 | Err(e) => str::from_utf8(&self.as_bytes()[..e.valid_up_to()]) 99 | .unwrap() 100 | .fmt(f), 101 | } 102 | } 103 | } 104 | 105 | #[cfg(test)] 106 | mod tests { 107 | use super::*; 108 | use alloc::vec::Vec; 109 | 110 | #[test] 111 | fn cstr_from_ptr() { 112 | for len in 0..255 { 113 | let mut buf: Vec = core::iter::repeat(123).take(len).collect(); 114 | buf.push(0); 115 | let the_str = unsafe { CStr::from_ptr(buf.as_ptr()) }; 116 | assert_eq!(the_str.len(), len); 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /src/env.rs: -------------------------------------------------------------------------------- 1 | use crate::CStr; 2 | use core::sync::atomic::{AtomicIsize, AtomicPtr, Ordering::SeqCst}; 3 | 4 | pub(crate) static ARGC: AtomicIsize = AtomicIsize::new(-1); 5 | pub(crate) static ARGV: AtomicPtr<*const u8> = AtomicPtr::new(core::ptr::null_mut()); 6 | 7 | #[inline] 8 | pub fn args() -> impl Iterator> { 9 | unsafe { 10 | let argc = ARGC.load(SeqCst); 11 | let argv = ARGV.load(SeqCst); 12 | assert!(!argv.is_null() && argc != -1); 13 | (0..argc).map(move |i| CStr::from_ptr(*argv.offset(i))) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | #[derive(Clone, Copy)] 2 | pub struct Error(pub libc::c_int); 3 | 4 | impl PartialEq for Error { 5 | #[inline] 6 | fn eq(&self, other: &i32) -> bool { 7 | self.0 == *other 8 | } 9 | } 10 | 11 | impl core::fmt::Debug for Error { 12 | #[inline] 13 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 14 | ::fmt(self, f) 15 | } 16 | } 17 | 18 | impl core::fmt::Display for Error { 19 | #[inline] 20 | fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { 21 | write!(f, "OS Error {}", self.0) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/fmt.rs: -------------------------------------------------------------------------------- 1 | // This formatting code is modified from the code in itoa, to favor code size and compromises 2 | // performance on formatting of large integers. 3 | 4 | const U64_MAX_LEN: usize = 20; 5 | 6 | const DEC_DIGITS_LUT: &[u8] = b"\ 7 | 0001020304050607080910111213141516171819\ 8 | 2021222324252627282930313233343536373839\ 9 | 4041424344454647484950515253545556575859\ 10 | 6061626364656667686970717273747576777879\ 11 | 8081828384858687888990919293949596979899"; 12 | 13 | #[derive(Default)] 14 | pub struct Buffer { 15 | buf: [u8; U64_MAX_LEN], 16 | } 17 | 18 | impl Buffer { 19 | #[inline] 20 | pub fn new() -> Self { 21 | Self::default() 22 | } 23 | 24 | #[inline] 25 | pub fn format(&mut self, mut n: u64) -> &[u8] { 26 | let mut curr = self.buf.len(); 27 | let buf = &mut self.buf; 28 | 29 | if n == 0 { 30 | buf[buf.len() - 1] = b'0'; 31 | return &buf[buf.len() - 1..]; 32 | } 33 | 34 | while n >= 10 { 35 | let d1 = ((n % 100) << 1) as usize; 36 | n /= 100; 37 | curr -= 2; 38 | buf[curr..curr + 2].copy_from_slice(&DEC_DIGITS_LUT[d1..d1 + 2]); 39 | } 40 | 41 | if n > 0 { 42 | curr -= 1; 43 | buf[curr] = (n as u8) + b'0'; 44 | } 45 | 46 | &buf[curr..] 47 | } 48 | } 49 | 50 | #[cfg(test)] 51 | mod tests { 52 | use super::*; 53 | 54 | #[test] 55 | fn all() { 56 | for i in 0..(u16::MAX as u64) { 57 | assert_eq!( 58 | itoa::Buffer::new().format(i).as_bytes(), 59 | Buffer::new().format(i) 60 | ); 61 | } 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/fs/directory.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | syscalls, 3 | syscalls::{OpenFlags, OpenMode}, 4 | CStr, Error, 5 | }; 6 | use alloc::{vec, vec::Vec}; 7 | use core::convert::TryInto; 8 | use libc::c_int; 9 | 10 | pub struct Directory { 11 | fd: c_int, 12 | } 13 | 14 | impl Directory { 15 | #[inline] 16 | pub fn open(path: CStr) -> Result { 17 | Ok(Self { 18 | fd: syscalls::openat( 19 | libc::AT_FDCWD, 20 | path, 21 | OpenFlags::RDONLY | OpenFlags::DIRECTORY | OpenFlags::CLOEXEC, 22 | OpenMode::empty(), 23 | )?, 24 | }) 25 | } 26 | 27 | #[inline] 28 | pub fn raw_fd(&self) -> c_int { 29 | self.fd 30 | } 31 | 32 | #[inline] 33 | pub fn read(&self) -> Result { 34 | let mut contents = vec![0u8; 4096]; 35 | 36 | // First, read using the first half of the allocation 37 | let mut previous_bytes_used = syscalls::getdents64(self.fd, &mut contents[..2048])?; 38 | let mut bytes_used = previous_bytes_used; 39 | 40 | // If we read something, try using the rest of the allocation 41 | if previous_bytes_used > 0 { 42 | bytes_used += syscalls::getdents64(self.fd, &mut contents[previous_bytes_used..])?; 43 | } 44 | // Then, if we read something on the second time, start reallocating. 45 | 46 | // Must run this loop until getdents64 returns no new entries 47 | // Yes, even if there is plenty of unused space. Some filesystems (at least sshfs) rely on this behavior 48 | while bytes_used != previous_bytes_used { 49 | previous_bytes_used = bytes_used; 50 | contents.extend(core::iter::repeat(0).take(contents.capacity())); 51 | bytes_used += syscalls::getdents64(self.fd, &mut contents[previous_bytes_used..])?; 52 | } 53 | 54 | contents.truncate(bytes_used); 55 | 56 | Ok(DirectoryContents { contents }) 57 | } 58 | } 59 | 60 | impl Drop for Directory { 61 | #[inline] 62 | fn drop(&mut self) { 63 | let _ = syscalls::close(self.fd); 64 | } 65 | } 66 | 67 | pub struct DirectoryContents { 68 | contents: Vec, 69 | } 70 | 71 | impl DirectoryContents { 72 | #[inline] 73 | pub fn iter(&self) -> IterDir { 74 | IterDir { 75 | remaining: &self.contents[..], 76 | } 77 | } 78 | } 79 | 80 | pub struct IterDir<'a> { 81 | remaining: &'a [u8], 82 | } 83 | 84 | impl<'a> Iterator for IterDir<'a> { 85 | type Item = DirEntry<'a>; 86 | 87 | #[inline] 88 | fn next(&mut self) -> Option { 89 | if self.remaining.is_empty() { 90 | return None; 91 | } 92 | 93 | let inode = u64::from_ne_bytes(self.remaining[..8].try_into().unwrap()); 94 | // We don't need to read the offset member 95 | let reclen = u16::from_ne_bytes(self.remaining[16..18].try_into().unwrap()); 96 | let d_type = self.remaining[18]; 97 | 98 | let mut end = 19; 99 | while self.remaining[end] != 0 { 100 | end += 1; 101 | } 102 | let name = CStr::from_bytes(&self.remaining[19..end + 1]); 103 | 104 | self.remaining = &self.remaining[reclen as usize..]; 105 | 106 | Some(DirEntry { 107 | inode, 108 | name, 109 | d_type: match d_type { 110 | 0 => DType::UNKNOWN, 111 | 1 => DType::FIFO, 112 | 2 => DType::CHR, 113 | 4 => DType::DIR, 114 | 6 => DType::BLK, 115 | 8 => DType::REG, 116 | 10 => DType::LNK, 117 | 12 => DType::SOCK, 118 | _ => DType::UNKNOWN, 119 | }, 120 | }) 121 | } 122 | 123 | #[inline] 124 | fn size_hint(&self) -> (usize, Option) { 125 | ( 126 | self.remaining.len() / core::mem::size_of::(), 127 | Some(self.remaining.len() / (core::mem::size_of::() - 256)), 128 | ) 129 | } 130 | } 131 | 132 | #[derive(Clone)] 133 | pub struct DirEntry<'a> { 134 | inode: libc::c_ulong, 135 | name: CStr<'a>, 136 | d_type: DType, 137 | } 138 | 139 | impl<'a> DirEntry<'a> { 140 | #[inline] 141 | pub fn name(&self) -> CStr<'a> { 142 | self.name 143 | } 144 | 145 | #[inline] 146 | pub fn inode(&self) -> libc::c_ulong { 147 | self.inode 148 | } 149 | 150 | #[inline] 151 | pub fn d_type(&self) -> DType { 152 | self.d_type 153 | } 154 | } 155 | 156 | #[derive(Clone, Copy, Debug, PartialEq, Eq)] 157 | #[allow(clippy::upper_case_acronyms)] 158 | pub enum DType { 159 | UNKNOWN = 0, 160 | FIFO = 1, 161 | CHR = 2, 162 | DIR = 4, 163 | BLK = 6, 164 | REG = 8, 165 | LNK = 10, 166 | SOCK = 12, 167 | } 168 | 169 | #[cfg(test)] 170 | mod tests { 171 | use super::*; 172 | use alloc::vec::Vec; 173 | 174 | // TODO: Get a directory with more file types 175 | 176 | #[test] 177 | fn read_cwd() { 178 | let dir = Directory::open(CStr::from_bytes(b"/dev\0")).unwrap(); 179 | let contents = dir.read().unwrap(); 180 | 181 | let mut libc_dirents = Vec::new(); 182 | unsafe { 183 | let dirp = libc::opendir(b"/dev\0".as_ptr() as *const libc::c_char); 184 | let mut entry = libc::readdir64(dirp); 185 | while !entry.is_null() { 186 | libc_dirents.push(*entry); 187 | entry = libc::readdir64(dirp); 188 | } 189 | libc::closedir(dirp); 190 | } 191 | 192 | for (libc, ven) in libc_dirents.iter().zip(contents.iter()) { 193 | unsafe { 194 | assert_eq!(CStr::from_ptr(libc.d_name.as_ptr().cast()), ven.name); 195 | } 196 | assert_eq!(libc.d_ino, ven.inode); 197 | assert_eq!(libc.d_type, ven.d_type as u8); 198 | } 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /src/fs/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | io::{Read, Write}, 3 | syscalls, 4 | syscalls::{OpenFlags, OpenMode}, 5 | CStr, Error, 6 | }; 7 | use alloc::{vec, vec::Vec}; 8 | use libc::c_int; 9 | 10 | mod directory; 11 | pub use directory::*; 12 | 13 | pub struct File(c_int); 14 | 15 | impl File { 16 | #[inline] 17 | pub fn open(path: &[u8]) -> Result { 18 | Ok(Self(syscalls::openat( 19 | libc::AT_FDCWD, 20 | CStr::from_bytes(path), 21 | OpenFlags::RDONLY | OpenFlags::CLOEXEC, 22 | OpenMode::empty(), 23 | )?)) 24 | } 25 | 26 | #[inline] 27 | pub fn create(path: &[u8]) -> Result { 28 | Ok(Self(syscalls::openat( 29 | libc::AT_FDCWD, 30 | CStr::from_bytes(path), 31 | OpenFlags::RDWR | OpenFlags::CREAT | OpenFlags::CLOEXEC, 32 | OpenMode::RUSR 33 | | OpenMode::WUSR 34 | | OpenMode::RGRP 35 | | OpenMode::WGRP 36 | | OpenMode::ROTH 37 | | OpenMode::WOTH, 38 | )?)) 39 | } 40 | } 41 | 42 | impl Drop for File { 43 | #[inline] 44 | fn drop(&mut self) { 45 | let _ = syscalls::close(self.0); 46 | } 47 | } 48 | 49 | impl Read for File { 50 | #[inline] 51 | fn read(&mut self, buf: &mut [u8]) -> Result { 52 | syscalls::read(self.0, buf) 53 | } 54 | } 55 | 56 | impl Write for File { 57 | #[inline] 58 | fn write(&mut self, buf: &[u8]) -> Result { 59 | syscalls::write(self.0, buf) 60 | } 61 | } 62 | 63 | #[inline] 64 | pub fn read(path: &[u8]) -> Result, Error> { 65 | let file_len = 66 | syscalls::fstatat(libc::AT_FDCWD, CStr::from_bytes(path)).map(|stat| stat.st_size)?; 67 | let mut file = File::open(path)?; 68 | let mut bytes = vec![0; file_len as usize]; 69 | let mut buf = &mut bytes[..]; 70 | while !buf.is_empty() { 71 | match file.read(buf) { 72 | Ok(0) => break, 73 | Ok(n) => buf = &mut buf[n..], 74 | Err(Error(libc::EAGAIN)) => {} 75 | Err(e) => return Err(e), 76 | } 77 | } 78 | Ok(bytes) 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | use super::*; 84 | 85 | #[test] 86 | fn files() { 87 | let expected_contents = &b"test contents\n"[..]; 88 | 89 | let mut file = File::create(b"/tmp/test.foo\0").unwrap(); 90 | file.write(expected_contents).unwrap(); 91 | 92 | let mut contents = [0; 64]; 93 | let mut file = File::open(b"/tmp/test.foo\0").unwrap(); 94 | let bytes_read = file.read(&mut contents).unwrap(); 95 | 96 | assert_eq!(&contents[..bytes_read], expected_contents); 97 | 98 | let contents = read(b"/tmp/test.foo\0").unwrap(); 99 | assert_eq!(contents, expected_contents); 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/io.rs: -------------------------------------------------------------------------------- 1 | use crate::Error; 2 | 3 | pub type Result = core::result::Result; 4 | 5 | pub trait Read { 6 | fn read(&mut self, buf: &mut [u8]) -> Result; 7 | } 8 | 9 | pub trait Write { 10 | fn write(&mut self, buf: &[u8]) -> Result; 11 | 12 | #[inline] 13 | fn write_all(&mut self, mut buf: &[u8]) -> Result<()> { 14 | while !buf.is_empty() { 15 | match self.write(buf) { 16 | Ok(0) => { 17 | return Err(Error(libc::EBADF)); 18 | } 19 | Ok(n) => buf = buf.get(n..).unwrap_or_default(), 20 | Err(Error(libc::EAGAIN)) => {} 21 | Err(e) => return Err(e), 22 | } 23 | } 24 | Ok(()) 25 | } 26 | } 27 | 28 | pub struct Stdout; 29 | 30 | impl Write for Stdout { 31 | #[inline] 32 | fn write(&mut self, buf: &[u8]) -> Result { 33 | crate::syscalls::write(libc::STDOUT_FILENO, buf) 34 | } 35 | } 36 | 37 | impl core::fmt::Write for Stdout { 38 | #[inline] 39 | fn write_str(&mut self, s: &str) -> core::fmt::Result { 40 | match self.write_all(s.as_bytes()) { 41 | Ok(_) => Ok(()), 42 | Err(_) => panic!("Unable to write to stdout"), 43 | } 44 | } 45 | } 46 | 47 | pub struct Stderr; 48 | 49 | impl Write for Stderr { 50 | #[inline] 51 | fn write(&mut self, buf: &[u8]) -> Result { 52 | crate::syscalls::write(libc::STDERR_FILENO, buf) 53 | } 54 | } 55 | 56 | impl core::fmt::Write for Stderr { 57 | #[inline] 58 | fn write_str(&mut self, s: &str) -> core::fmt::Result { 59 | match self.write_all(s.as_bytes()) { 60 | Ok(_) => Ok(()), 61 | Err(_) => panic!("Unable to write to stderr"), 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(test), no_std)] 2 | #![feature(naked_functions, alloc_error_handler, lang_items)] 3 | #![warn(clippy::missing_inline_in_public_items)] 4 | #![feature(cfg_target_has_atomic, core_intrinsics, linkage)] 5 | #![allow(internal_features)] // Must use lang_items to implement a Rust runtime 6 | 7 | #[cfg(not(target_os = "linux"))] 8 | core::compile_error!( 9 | "This library is only implemented for Linux, \ 10 | because the primary goal of this library is to bypass the \ 11 | system libc and make syscalls directly. Making syscalls without \ 12 | going through libc is not supported on Windows and MacOS. \ 13 | Support for any OS with a stable syscall ABI may be considered, \ 14 | open an issue on https://github.com/saethlin/veneer." 15 | ); 16 | 17 | #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] 18 | compile_error!("This crate is only implemented for x86_64 and aarch64"); 19 | 20 | extern crate alloc; 21 | 22 | #[cfg(target_os = "linux")] 23 | mod allocator; 24 | #[cfg(target_os = "linux")] 25 | mod cstr; 26 | #[cfg(target_os = "linux")] 27 | pub mod env; 28 | #[cfg(target_os = "linux")] 29 | mod error; 30 | #[cfg(target_os = "linux")] 31 | pub mod fmt; 32 | #[cfg(target_os = "linux")] 33 | pub mod fs; 34 | #[cfg(target_os = "linux")] 35 | pub mod io; 36 | #[cfg(target_os = "linux")] 37 | mod mem; 38 | #[cfg(target_os = "linux")] 39 | pub mod net; 40 | #[cfg(target_os = "linux")] 41 | pub mod prelude; 42 | #[cfg(target_os = "linux")] 43 | mod spinlock; 44 | #[cfg(target_os = "linux")] 45 | pub mod syscalls; 46 | 47 | #[cfg(target_os = "linux")] 48 | pub use allocator::Allocator; 49 | #[cfg(target_os = "linux")] 50 | pub use cstr::CStr; 51 | #[cfg(target_os = "linux")] 52 | pub use error::Error; 53 | #[cfg(target_os = "linux")] 54 | pub use veneer_macros::main; 55 | 56 | #[cfg(all(feature = "rt", not(test)))] 57 | #[lang = "eh_personality"] 58 | #[no_mangle] 59 | pub extern "C" fn eh_personality() {} 60 | 61 | #[cfg(all(feature = "rt", not(test)))] 62 | #[alloc_error_handler] 63 | fn alloc_error(layout: core::alloc::Layout) -> ! { 64 | panic!("memory allocation of {} bytes failed", layout.size()); 65 | } 66 | 67 | #[cfg(all(target_os = "linux", feature = "rt", not(test)))] 68 | #[panic_handler] 69 | fn panic(info: &core::panic::PanicInfo) -> ! { 70 | crate::eprintln!("{}", info); 71 | let _ = crate::syscalls::kill(0, 6); 72 | crate::syscalls::exit(-1) 73 | } 74 | 75 | #[cfg(all(target_os = "linux", feature = "rt", not(test), target_arch = "x86_64"))] 76 | #[no_mangle] 77 | #[unsafe(naked)] 78 | unsafe extern "C" fn _start() { 79 | // Just move argc and argv into the right registers and call main 80 | core::arch::naked_asm!( 81 | "mov rdi, [rsp]", // The value of rsp is actually a pointer to argc 82 | "mov rsi, rsp", 83 | "add rsi, 8", // But for argv we just increment the rsp pointer by 1 (offset by 8) 84 | "call __veneer_init", 85 | "call __veneer_main", 86 | ) 87 | } 88 | 89 | #[cfg(all( 90 | target_os = "linux", 91 | feature = "rt", 92 | not(test), 93 | target_arch = "aarch64" 94 | ))] 95 | #[no_mangle] 96 | #[naked] 97 | unsafe extern "C" fn _start() { 98 | core::arch::naked_asm!( 99 | "ldr x0, [sp]", 100 | "mov x1, sp", 101 | "add x1, x1, 0x8", 102 | "bl __veneer_init", 103 | "bl __veneer_main", 104 | ) 105 | } 106 | 107 | #[cfg(all(target_os = "linux", target_os = "linux", feature = "rt", not(test)))] 108 | #[no_mangle] 109 | unsafe extern "C" fn __veneer_init(argc: isize, argv: *mut *const u8) { 110 | crate::env::ARGC.store(argc, core::sync::atomic::Ordering::SeqCst); 111 | crate::env::ARGV.store(argv.cast(), core::sync::atomic::Ordering::SeqCst); 112 | } 113 | 114 | #[cfg(all(target_os = "linux", feature = "rt", not(test)))] 115 | #[global_allocator] 116 | static ALLOC: crate::Allocator = crate::Allocator::new(); 117 | 118 | #[macro_export] 119 | macro_rules! print { 120 | ($($args:tt)*) => { 121 | core::fmt::write(&mut $crate::io::Stdout, format_args!($($args)*)).unwrap(); 122 | }; 123 | } 124 | 125 | #[macro_export] 126 | macro_rules! println { 127 | () => { 128 | <$crate::io::Stdout as core::fmt::Write>::write_str(&mut $crate::io::Stdout, "\n").unwrap(); 129 | }; 130 | ($format:expr) => { 131 | <$crate::io::Stdout as core::fmt::Write>::write_str(&mut $crate::io::Stdout, concat!($format, "\n")).unwrap(); 132 | }; 133 | ($format:expr, $($args:tt)*) => { 134 | core::fmt::write(&mut $crate::io::Stdout, format_args!(concat!($format, "\n"), $($args)*)).unwrap(); 135 | }; 136 | } 137 | 138 | #[macro_export] 139 | macro_rules! eprint { 140 | ($($args:tt)*) => { 141 | core::fmt::write(&mut $crate::io::Stderr, format_args!($($args)*)).unwrap(); 142 | }; 143 | } 144 | 145 | #[macro_export] 146 | macro_rules! eprintln { 147 | ($format:expr, $($args:tt)*) => { 148 | core::fmt::write(&mut $crate::io::Stderr, format_args!(concat!($format, "\n"), $($args)*)).unwrap(); 149 | }; 150 | } 151 | -------------------------------------------------------------------------------- /src/mem/impls.rs: -------------------------------------------------------------------------------- 1 | use core::intrinsics::likely; 2 | 3 | const WORD_SIZE: usize = core::mem::size_of::(); 4 | const WORD_MASK: usize = WORD_SIZE - 1; 5 | 6 | // If the number of bytes involved exceed this threshold we will opt in word-wise copy. 7 | // The value here selected is max(2 * WORD_SIZE, 16): 8 | // * We need at least 2 * WORD_SIZE bytes to guarantee that at least 1 word will be copied through 9 | // word-wise copy. 10 | // * The word-wise copy logic needs to perform some checks so it has some small overhead. 11 | // ensures that even on 32-bit platforms we have copied at least 8 bytes through 12 | // word-wise copy so the saving of word-wise copy outweights the fixed overhead. 13 | const WORD_COPY_THRESHOLD: usize = if 2 * WORD_SIZE > 16 { 14 | 2 * WORD_SIZE 15 | } else { 16 | 16 17 | }; 18 | 19 | #[inline(always)] 20 | pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize) { 21 | #[inline(always)] 22 | unsafe fn copy_forward_bytes(mut dest: *mut u8, mut src: *const u8, n: usize) { 23 | let dest_end = dest.add(n); 24 | while dest < dest_end { 25 | *dest = *src; 26 | dest = dest.add(1); 27 | src = src.add(1); 28 | } 29 | } 30 | 31 | #[inline(always)] 32 | unsafe fn copy_forward_aligned_words(dest: *mut u8, src: *const u8, n: usize) { 33 | let mut dest_usize = dest as *mut usize; 34 | let mut src_usize = src as *mut usize; 35 | let dest_end = dest.add(n) as *mut usize; 36 | 37 | while dest_usize < dest_end { 38 | *dest_usize = *src_usize; 39 | dest_usize = dest_usize.add(1); 40 | src_usize = src_usize.add(1); 41 | } 42 | } 43 | 44 | if n >= WORD_COPY_THRESHOLD { 45 | // Align dest 46 | // Because of n >= 2 * WORD_SIZE, dst_misalignment < n 47 | let dest_misalignment = (dest as usize).wrapping_neg() & WORD_MASK; 48 | copy_forward_bytes(dest, src, dest_misalignment); 49 | dest = dest.add(dest_misalignment); 50 | src = src.add(dest_misalignment); 51 | n -= dest_misalignment; 52 | 53 | let n_words = n & !WORD_MASK; 54 | let src_misalignment = src as usize & WORD_MASK; 55 | if likely(src_misalignment == 0) { 56 | copy_forward_aligned_words(dest, src, n_words); 57 | } else { 58 | copy_forward_misaligned_words(dest, src, n_words); 59 | } 60 | dest = dest.add(n_words); 61 | src = src.add(n_words); 62 | n -= n_words; 63 | } 64 | copy_forward_bytes(dest, src, n); 65 | } 66 | 67 | #[inline(always)] 68 | pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) { 69 | // The following backward copy helper functions uses the pointers past the end 70 | // as their inputs instead of pointers to the start! 71 | #[inline(always)] 72 | unsafe fn copy_backward_bytes(mut dest: *mut u8, mut src: *const u8, n: usize) { 73 | let dest_start = dest.sub(n); 74 | while dest_start < dest { 75 | dest = dest.sub(1); 76 | src = src.sub(1); 77 | *dest = *src; 78 | } 79 | } 80 | 81 | #[inline(always)] 82 | unsafe fn copy_backward_aligned_words(dest: *mut u8, src: *const u8, n: usize) { 83 | let mut dest_usize = dest as *mut usize; 84 | let mut src_usize = src as *mut usize; 85 | let dest_start = dest.sub(n) as *mut usize; 86 | 87 | while dest_start < dest_usize { 88 | dest_usize = dest_usize.sub(1); 89 | src_usize = src_usize.sub(1); 90 | *dest_usize = *src_usize; 91 | } 92 | } 93 | 94 | let mut dest = dest.add(n); 95 | let mut src = src.add(n); 96 | 97 | if n >= WORD_COPY_THRESHOLD { 98 | // Align dest 99 | // Because of n >= 2 * WORD_SIZE, dst_misalignment < n 100 | let dest_misalignment = dest as usize & WORD_MASK; 101 | copy_backward_bytes(dest, src, dest_misalignment); 102 | dest = dest.sub(dest_misalignment); 103 | src = src.sub(dest_misalignment); 104 | n -= dest_misalignment; 105 | 106 | let n_words = n & !WORD_MASK; 107 | let src_misalignment = src as usize & WORD_MASK; 108 | if likely(src_misalignment == 0) { 109 | copy_backward_aligned_words(dest, src, n_words); 110 | } else { 111 | copy_backward_misaligned_words(dest, src, n_words); 112 | } 113 | dest = dest.sub(n_words); 114 | src = src.sub(n_words); 115 | n -= n_words; 116 | } 117 | copy_backward_bytes(dest, src, n); 118 | } 119 | 120 | #[inline(always)] 121 | pub unsafe fn set_bytes(mut s: *mut u8, c: u8, mut n: usize) { 122 | #[inline(always)] 123 | pub unsafe fn set_bytes_bytes(mut s: *mut u8, c: u8, n: usize) { 124 | let end = s.add(n); 125 | while s < end { 126 | *s = c; 127 | s = s.add(1); 128 | } 129 | } 130 | 131 | #[inline(always)] 132 | pub unsafe fn set_bytes_words(s: *mut u8, c: u8, n: usize) { 133 | let mut broadcast = c as usize; 134 | let mut bits = 8; 135 | while bits < WORD_SIZE * 8 { 136 | broadcast |= broadcast << bits; 137 | bits *= 2; 138 | } 139 | 140 | let mut s_usize = s as *mut usize; 141 | let end = s.add(n) as *mut usize; 142 | 143 | while s_usize < end { 144 | *s_usize = broadcast; 145 | s_usize = s_usize.add(1); 146 | } 147 | } 148 | 149 | if likely(n >= WORD_COPY_THRESHOLD) { 150 | // Align s 151 | // Because of n >= 2 * WORD_SIZE, dst_misalignment < n 152 | let misalignment = (s as usize).wrapping_neg() & WORD_MASK; 153 | set_bytes_bytes(s, c, misalignment); 154 | s = s.add(misalignment); 155 | n -= misalignment; 156 | 157 | let n_words = n & !WORD_MASK; 158 | set_bytes_words(s, c, n_words); 159 | s = s.add(n_words); 160 | n -= n_words; 161 | } 162 | set_bytes_bytes(s, c, n); 163 | } 164 | 165 | #[inline(always)] 166 | pub unsafe fn compare_bytes(s1: *const u8, s2: *const u8, n: usize) -> i32 { 167 | let mut i = 0; 168 | while i < n { 169 | let a = *s1.add(i); 170 | let b = *s2.add(i); 171 | if a != b { 172 | return a as i32 - b as i32; 173 | } 174 | i += 1; 175 | } 176 | 0 177 | } 178 | -------------------------------------------------------------------------------- /src/mem/macros.rs: -------------------------------------------------------------------------------- 1 | //! Macros shared throughout the compiler-builtins implementation 2 | 3 | /// The "main macro" used for defining intrinsics. 4 | /// 5 | /// The compiler-builtins library is super platform-specific with tons of crazy 6 | /// little tweaks for various platforms. As a result it *could* involve a lot of 7 | /// #[cfg] and macro soup, but the intention is that this macro alleviates a lot 8 | /// of that complexity. Ideally this macro has all the weird ABI things 9 | /// platforms need and elsewhere in this library it just looks like normal Rust 10 | /// code. 11 | /// 12 | /// This macro is structured to be invoked with a bunch of functions that looks 13 | /// like: 14 | /// ```ignore 15 | /// intrinsics! { 16 | /// pub extern "C" fn foo(a: i32) -> u32 { 17 | /// // ... 18 | /// } 19 | /// 20 | /// #[nonstandard_attribute] 21 | /// pub extern "C" fn bar(a: i32) -> u32 { 22 | /// // ... 23 | /// } 24 | /// } 25 | /// ``` 26 | /// 27 | /// Each function is defined in a manner that looks like a normal Rust function. 28 | /// The macro then accepts a few nonstandard attributes that can decorate 29 | /// various functions. Each of the attributes is documented below with what it 30 | /// can do, and each of them slightly tweaks how further expansion happens. 31 | /// 32 | /// A quick overview of attributes supported right now are: 33 | /// 34 | /// * `maybe_use_optimized_c_shim` - indicates that the Rust implementation is 35 | /// ignored if an optimized C version was compiled. 36 | /// * `aapcs_on_arm` - forces the ABI of the function to be `"aapcs"` on ARM and 37 | /// the specified ABI everywhere else. 38 | /// * `unadjusted_on_win64` - like `aapcs_on_arm` this switches to the 39 | /// `"unadjusted"` abi on Win64 and the specified abi elsewhere. 40 | /// * `win64_128bit_abi_hack` - this attribute is used for 128-bit integer 41 | /// intrinsics where the ABI is slightly tweaked on Windows platforms, but 42 | /// it's a normal ABI elsewhere for returning a 128 bit integer. 43 | /// * `arm_aeabi_alias` - handles the "aliasing" of various intrinsics on ARM 44 | /// their otherwise typical names to other prefixed ones. 45 | /// 46 | macro_rules! intrinsics { 47 | () => (); 48 | 49 | // Support cfg_attr: 50 | ( 51 | #[cfg_attr($e:meta, $($attr:tt)*)] 52 | $(#[$($attrs:tt)*])* 53 | pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 54 | $($body:tt)* 55 | } 56 | $($rest:tt)* 57 | ) => ( 58 | #[cfg($e)] 59 | intrinsics! { 60 | #[$($attr)*] 61 | $(#[$($attrs)*])* 62 | pub extern $abi fn $name($($argname: $ty),*) $(-> $ret)? { 63 | $($body)* 64 | } 65 | } 66 | 67 | #[cfg(not($e))] 68 | intrinsics! { 69 | $(#[$($attrs)*])* 70 | pub extern $abi fn $name($($argname: $ty),*) $(-> $ret)? { 71 | $($body)* 72 | } 73 | } 74 | 75 | intrinsics!($($rest)*); 76 | ); 77 | 78 | // Right now there's a bunch of architecture-optimized intrinsics in the 79 | // stock compiler-rt implementation. Not all of these have been ported over 80 | // to Rust yet so when the `c` feature of this crate is enabled we fall back 81 | // to the architecture-specific versions which should be more optimized. The 82 | // purpose of this macro is to easily allow specifying this. 83 | // 84 | // The `#[maybe_use_optimized_c_shim]` attribute indicates that this 85 | // intrinsic may have an optimized C version. In these situations the build 86 | // script, if the C code is enabled and compiled, will emit a cfg directive 87 | // to get passed to rustc for our compilation. If that cfg is set we skip 88 | // the Rust implementation, but if the attribute is not enabled then we 89 | // compile in the Rust implementation. 90 | ( 91 | #[maybe_use_optimized_c_shim] 92 | $(#[$($attr:tt)*])* 93 | pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 94 | $($body:tt)* 95 | } 96 | 97 | $($rest:tt)* 98 | ) => ( 99 | #[cfg($name = "optimized-c")] 100 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 101 | extern $abi { 102 | fn $name($($argname: $ty),*) $(-> $ret)?; 103 | } 104 | unsafe { 105 | $name($($argname),*) 106 | } 107 | } 108 | 109 | #[cfg(not($name = "optimized-c"))] 110 | intrinsics! { 111 | $(#[$($attr)*])* 112 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 113 | $($body)* 114 | } 115 | } 116 | 117 | intrinsics!($($rest)*); 118 | ); 119 | 120 | // We recognize the `#[aapcs_on_arm]` attribute here and generate the 121 | // same intrinsic but force it to have the `"aapcs"` calling convention on 122 | // ARM and `"C"` elsewhere. 123 | ( 124 | #[aapcs_on_arm] 125 | $(#[$($attr:tt)*])* 126 | pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 127 | $($body:tt)* 128 | } 129 | 130 | $($rest:tt)* 131 | ) => ( 132 | #[cfg(target_arch = "arm")] 133 | intrinsics! { 134 | $(#[$($attr)*])* 135 | pub extern "aapcs" fn $name( $($argname: $ty),* ) $(-> $ret)? { 136 | $($body)* 137 | } 138 | } 139 | 140 | #[cfg(not(target_arch = "arm"))] 141 | intrinsics! { 142 | $(#[$($attr)*])* 143 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 144 | $($body)* 145 | } 146 | } 147 | 148 | intrinsics!($($rest)*); 149 | ); 150 | 151 | // Like aapcs above we recognize an attribute for the "unadjusted" abi on 152 | // win64 for some methods. 153 | ( 154 | #[unadjusted_on_win64] 155 | $(#[$($attr:tt)*])* 156 | pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 157 | $($body:tt)* 158 | } 159 | 160 | $($rest:tt)* 161 | ) => ( 162 | #[cfg(all(any(windows, all(target_os = "uefi", target_arch = "x86_64")), target_pointer_width = "64"))] 163 | intrinsics! { 164 | $(#[$($attr)*])* 165 | pub extern "unadjusted" fn $name( $($argname: $ty),* ) $(-> $ret)? { 166 | $($body)* 167 | } 168 | } 169 | 170 | #[cfg(not(all(any(windows, all(target_os = "uefi", target_arch = "x86_64")), target_pointer_width = "64")))] 171 | intrinsics! { 172 | $(#[$($attr)*])* 173 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 174 | $($body)* 175 | } 176 | } 177 | 178 | intrinsics!($($rest)*); 179 | ); 180 | 181 | // Some intrinsics on win64 which return a 128-bit integer have an.. unusual 182 | // calling convention. That's managed here with this "abi hack" which alters 183 | // the generated symbol's ABI. 184 | // 185 | // This will still define a function in this crate with the given name and 186 | // signature, but the actual symbol for the intrinsic may have a slightly 187 | // different ABI on win64. 188 | ( 189 | #[win64_128bit_abi_hack] 190 | $(#[$($attr:tt)*])* 191 | pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 192 | $($body:tt)* 193 | } 194 | 195 | $($rest:tt)* 196 | ) => ( 197 | #[cfg(all(any(windows, target_os = "uefi"), target_arch = "x86_64"))] 198 | $(#[$($attr)*])* 199 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 200 | $($body)* 201 | } 202 | 203 | #[cfg(all(any(windows, target_os = "uefi"), target_arch = "x86_64"))] 204 | pub mod $name { 205 | #[cfg_attr(not(feature = "mangled-names"), no_mangle)] 206 | pub extern $abi fn $name( $($argname: $ty),* ) 207 | -> ::macros::win64_128bit_abi_hack::U64x2 208 | { 209 | let e: $($ret)? = super::$name($($argname),*); 210 | ::macros::win64_128bit_abi_hack::U64x2::from(e) 211 | } 212 | } 213 | 214 | #[cfg(not(all(any(windows, target_os = "uefi"), target_arch = "x86_64")))] 215 | intrinsics! { 216 | $(#[$($attr)*])* 217 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 218 | $($body)* 219 | } 220 | } 221 | 222 | intrinsics!($($rest)*); 223 | ); 224 | 225 | // A bunch of intrinsics on ARM are aliased in the standard compiler-rt 226 | // build under `__aeabi_*` aliases, and LLVM will call these instead of the 227 | // original function. The aliasing here is used to generate these symbols in 228 | // the object file. 229 | ( 230 | #[arm_aeabi_alias = $alias:ident] 231 | $(#[$($attr:tt)*])* 232 | pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 233 | $($body:tt)* 234 | } 235 | 236 | $($rest:tt)* 237 | ) => ( 238 | #[cfg(target_arch = "arm")] 239 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 240 | $($body)* 241 | } 242 | 243 | #[cfg(target_arch = "arm")] 244 | pub mod $name { 245 | #[cfg_attr(not(feature = "mangled-names"), no_mangle)] 246 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 247 | super::$name($($argname),*) 248 | } 249 | } 250 | 251 | #[cfg(target_arch = "arm")] 252 | pub mod $alias { 253 | #[cfg_attr(not(feature = "mangled-names"), no_mangle)] 254 | #[cfg_attr(all(not(windows), not(target_vendor="apple")), linkage = "weak")] 255 | pub extern "aapcs" fn $alias( $($argname: $ty),* ) $(-> $ret)? { 256 | super::$name($($argname),*) 257 | } 258 | } 259 | 260 | #[cfg(not(target_arch = "arm"))] 261 | intrinsics! { 262 | $(#[$($attr)*])* 263 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 264 | $($body)* 265 | } 266 | } 267 | 268 | intrinsics!($($rest)*); 269 | ); 270 | 271 | // C mem* functions are only generated when the "mem" feature is enabled. 272 | ( 273 | #[mem_builtin] 274 | $(#[$($attr:tt)*])* 275 | pub unsafe extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 276 | $($body:tt)* 277 | } 278 | 279 | $($rest:tt)* 280 | ) => ( 281 | $(#[$($attr)*])* 282 | pub unsafe extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 283 | $($body)* 284 | } 285 | 286 | #[cfg(feature = "mem")] 287 | pub mod $name { 288 | $(#[$($attr)*])* 289 | #[cfg_attr(not(feature = "mangled-names"), no_mangle)] 290 | pub unsafe extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 291 | super::$name($($argname),*) 292 | } 293 | } 294 | 295 | intrinsics!($($rest)*); 296 | ); 297 | 298 | // Naked functions are special: we can't generate wrappers for them since 299 | // they use a custom calling convention. 300 | ( 301 | #[naked] 302 | $(#[$($attr:tt)*])* 303 | pub unsafe extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 304 | $($body:tt)* 305 | } 306 | 307 | $($rest:tt)* 308 | ) => ( 309 | pub mod $name { 310 | #[naked] 311 | $(#[$($attr)*])* 312 | #[cfg_attr(not(feature = "mangled-names"), no_mangle)] 313 | pub unsafe extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 314 | $($body)* 315 | } 316 | } 317 | 318 | intrinsics!($($rest)*); 319 | ); 320 | 321 | // For division and modulo, AVR uses a custom calling convention¹ that does 322 | // not match our definitions here. Ideally we would just use hand-written 323 | // naked functions, but that's quite a lot of code to port² - so for the 324 | // time being we are just ignoring the problematic functions, letting 325 | // avr-gcc (which is required to compile to AVR anyway) link them from 326 | // libgcc. 327 | // 328 | // ¹ https://gcc.gnu.org/wiki/avr-gcc (see "Exceptions to the Calling 329 | // Convention") 330 | // ² https://github.com/gcc-mirror/gcc/blob/31048012db98f5ec9c2ba537bfd850374bdd771f/libgcc/config/avr/lib1funcs.S 331 | ( 332 | #[avr_skip] 333 | $(#[$($attr:tt)*])* 334 | pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 335 | $($body:tt)* 336 | } 337 | 338 | $($rest:tt)* 339 | ) => ( 340 | #[cfg(not(target_arch = "avr"))] 341 | intrinsics! { 342 | $(#[$($attr)*])* 343 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 344 | $($body)* 345 | } 346 | } 347 | 348 | intrinsics!($($rest)*); 349 | ); 350 | 351 | // This is the final catch-all rule. At this point we generate an 352 | // intrinsic with a conditional `#[no_mangle]` directive to avoid 353 | // interfering with duplicate symbols and whatnot during testing. 354 | // 355 | // The implementation is placed in a separate module, to take advantage 356 | // of the fact that rustc partitions functions into code generation 357 | // units based on module they are defined in. As a result we will have 358 | // a separate object file for each intrinsic. For further details see 359 | // corresponding PR in rustc https://github.com/rust-lang/rust/pull/70846 360 | // 361 | // After the intrinsic is defined we just continue with the rest of the 362 | // input we were given. 363 | ( 364 | $(#[$($attr:tt)*])* 365 | pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 366 | $($body:tt)* 367 | } 368 | 369 | $($rest:tt)* 370 | ) => ( 371 | $(#[$($attr)*])* 372 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 373 | $($body)* 374 | } 375 | 376 | pub mod $name { 377 | $(#[$($attr)*])* 378 | #[cfg_attr(not(feature = "mangled-names"), no_mangle)] 379 | pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 380 | super::$name($($argname),*) 381 | } 382 | } 383 | 384 | intrinsics!($($rest)*); 385 | ); 386 | 387 | // Same as the above for unsafe functions. 388 | ( 389 | $(#[$($attr:tt)*])* 390 | pub unsafe extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { 391 | $($body:tt)* 392 | } 393 | 394 | $($rest:tt)* 395 | ) => ( 396 | $(#[$($attr)*])* 397 | pub unsafe extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 398 | $($body)* 399 | } 400 | 401 | pub mod $name { 402 | $(#[$($attr)*])* 403 | #[cfg_attr(not(feature = "mangled-names"), no_mangle)] 404 | pub unsafe extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { 405 | super::$name($($argname),*) 406 | } 407 | } 408 | 409 | intrinsics!($($rest)*); 410 | ); 411 | } 412 | pub(crate) use intrinsics; 413 | 414 | // Hack for LLVM expectations for ABI on windows. This is used by the 415 | // `#[win64_128bit_abi_hack]` attribute recognized above 416 | #[cfg(all(any(windows, target_os = "uefi"), target_pointer_width = "64"))] 417 | pub mod win64_128bit_abi_hack { 418 | #[repr(simd)] 419 | pub struct U64x2(u64, u64); 420 | 421 | impl From for U64x2 { 422 | fn from(i: i128) -> U64x2 { 423 | use int::DInt; 424 | let j = i as u128; 425 | U64x2(j.lo(), j.hi()) 426 | } 427 | } 428 | 429 | impl From for U64x2 { 430 | fn from(i: u128) -> U64x2 { 431 | use int::DInt; 432 | U64x2(i.lo(), i.hi()) 433 | } 434 | } 435 | } 436 | -------------------------------------------------------------------------------- /src/mem/mod.rs: -------------------------------------------------------------------------------- 1 | // Trying to satisfy clippy here is hopeless 2 | #![allow(clippy::style)] 3 | 4 | #[allow(warnings)] 5 | #[cfg(target_pointer_width = "16")] 6 | type c_int = i16; 7 | #[allow(warnings)] 8 | #[cfg(not(target_pointer_width = "16"))] 9 | type c_int = i32; 10 | 11 | use core::{ 12 | intrinsics::{atomic_load_unordered, atomic_store_unordered, exact_div}, 13 | mem, 14 | ops::{BitOr, Shl}, 15 | }; 16 | 17 | mod macros; 18 | use macros::intrinsics; 19 | 20 | // memcpy/memmove/memset have optimized implementations on some architectures 21 | #[cfg_attr( 22 | all(not(feature = "no-asm"), target_arch = "x86_64"), 23 | path = "x86_64.rs" 24 | )] 25 | mod impls; 26 | 27 | intrinsics! { 28 | #[mem_builtin] 29 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 30 | pub unsafe extern "C" fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 { 31 | impls::copy_forward(dest, src, n); 32 | dest 33 | } 34 | 35 | #[mem_builtin] 36 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 37 | pub unsafe extern "C" fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 { 38 | let delta = (dest as usize).wrapping_sub(src as usize); 39 | if delta >= n { 40 | // We can copy forwards because either dest is far enough ahead of src, 41 | // or src is ahead of dest (and delta overflowed). 42 | impls::copy_forward(dest, src, n); 43 | } else { 44 | impls::copy_backward(dest, src, n); 45 | } 46 | dest 47 | } 48 | 49 | #[mem_builtin] 50 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 51 | pub unsafe extern "C" fn memset(s: *mut u8, c: crate::mem::c_int, n: usize) -> *mut u8 { 52 | impls::set_bytes(s, c as u8, n); 53 | s 54 | } 55 | 56 | #[mem_builtin] 57 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 58 | pub unsafe extern "C" fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 { 59 | impls::compare_bytes(s1, s2, n) 60 | } 61 | 62 | #[mem_builtin] 63 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 64 | pub unsafe extern "C" fn bcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 { 65 | memcmp(s1, s2, n) 66 | } 67 | 68 | #[mem_builtin] 69 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 70 | pub unsafe extern "C" fn strlen(s: *const core::ffi::c_char) -> usize { 71 | let mut n = 0; 72 | let mut s = s; 73 | while *s != 0 { 74 | n += 1; 75 | s = s.offset(1); 76 | } 77 | n 78 | } 79 | } 80 | 81 | // `bytes` must be a multiple of `mem::size_of::()` 82 | #[cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))] 83 | fn memcpy_element_unordered_atomic(dest: *mut T, src: *const T, bytes: usize) { 84 | unsafe { 85 | let n = exact_div(bytes, mem::size_of::()); 86 | let mut i = 0; 87 | while i < n { 88 | atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i))); 89 | i += 1; 90 | } 91 | } 92 | } 93 | 94 | // `bytes` must be a multiple of `mem::size_of::()` 95 | #[cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))] 96 | fn memmove_element_unordered_atomic(dest: *mut T, src: *const T, bytes: usize) { 97 | unsafe { 98 | let n = exact_div(bytes, mem::size_of::()); 99 | if src < dest as *const T { 100 | // copy from end 101 | let mut i = n; 102 | while i != 0 { 103 | i -= 1; 104 | atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i))); 105 | } 106 | } else { 107 | // copy from beginning 108 | let mut i = 0; 109 | while i < n { 110 | atomic_store_unordered(dest.add(i), atomic_load_unordered(src.add(i))); 111 | i += 1; 112 | } 113 | } 114 | } 115 | } 116 | 117 | // `T` must be a primitive integer type, and `bytes` must be a multiple of `mem::size_of::()` 118 | #[cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))] 119 | fn memset_element_unordered_atomic(s: *mut T, c: u8, bytes: usize) 120 | where 121 | T: Copy + From + Shl + BitOr, 122 | { 123 | unsafe { 124 | let n = exact_div(bytes, mem::size_of::()); 125 | 126 | // Construct a value of type `T` consisting of repeated `c` 127 | // bytes, to let us ensure we write each `T` atomically. 128 | let mut x = T::from(c); 129 | let mut i = 1; 130 | while i < mem::size_of::() { 131 | x = x << 8 | T::from(c); 132 | i += 1; 133 | } 134 | 135 | // Write it to `s` 136 | let mut i = 0; 137 | while i < n { 138 | atomic_store_unordered(s.add(i), x); 139 | i += 1; 140 | } 141 | } 142 | } 143 | 144 | intrinsics! { 145 | #[cfg(target_has_atomic_load_store = "8")] 146 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 147 | pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () { 148 | memcpy_element_unordered_atomic(dest, src, bytes); 149 | } 150 | #[cfg(target_has_atomic_load_store = "16")] 151 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 152 | pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) -> () { 153 | memcpy_element_unordered_atomic(dest, src, bytes); 154 | } 155 | #[cfg(target_has_atomic_load_store = "32")] 156 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 157 | pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) -> () { 158 | memcpy_element_unordered_atomic(dest, src, bytes); 159 | } 160 | #[cfg(target_has_atomic_load_store = "64")] 161 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 162 | pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () { 163 | memcpy_element_unordered_atomic(dest, src, bytes); 164 | } 165 | #[cfg(target_has_atomic_load_store = "128")] 166 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 167 | pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () { 168 | memcpy_element_unordered_atomic(dest, src, bytes); 169 | } 170 | 171 | #[cfg(target_has_atomic_load_store = "8")] 172 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 173 | pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () { 174 | memmove_element_unordered_atomic(dest, src, bytes); 175 | } 176 | #[cfg(target_has_atomic_load_store = "16")] 177 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 178 | pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) -> () { 179 | memmove_element_unordered_atomic(dest, src, bytes); 180 | } 181 | #[cfg(target_has_atomic_load_store = "32")] 182 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 183 | pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) -> () { 184 | memmove_element_unordered_atomic(dest, src, bytes); 185 | } 186 | #[cfg(target_has_atomic_load_store = "64")] 187 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 188 | pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () { 189 | memmove_element_unordered_atomic(dest, src, bytes); 190 | } 191 | #[cfg(target_has_atomic_load_store = "128")] 192 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 193 | pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () { 194 | memmove_element_unordered_atomic(dest, src, bytes); 195 | } 196 | 197 | #[cfg(target_has_atomic_load_store = "8")] 198 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 199 | pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () { 200 | memset_element_unordered_atomic(s, c, bytes); 201 | } 202 | #[cfg(target_has_atomic_load_store = "16")] 203 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 204 | pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_2(s: *mut u16, c: u8, bytes: usize) -> () { 205 | memset_element_unordered_atomic(s, c, bytes); 206 | } 207 | #[cfg(target_has_atomic_load_store = "32")] 208 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 209 | pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_4(s: *mut u32, c: u8, bytes: usize) -> () { 210 | memset_element_unordered_atomic(s, c, bytes); 211 | } 212 | #[cfg(target_has_atomic_load_store = "64")] 213 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 214 | pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () { 215 | memset_element_unordered_atomic(s, c, bytes); 216 | } 217 | #[cfg(target_has_atomic_load_store = "128")] 218 | #[cfg_attr(not(all(target_os = "windows", target_env = "gnu")), linkage = "weak")] 219 | pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () { 220 | memset_element_unordered_atomic(s, c, bytes); 221 | } 222 | } 223 | -------------------------------------------------------------------------------- /src/mem/x86_64.rs: -------------------------------------------------------------------------------- 1 | // On most modern Intel and AMD processors, "rep movsq" and "rep stosq" have 2 | // been enhanced to perform better than an simple qword loop, making them ideal 3 | // for implementing memcpy/memset. Note that "rep cmps" has received no such 4 | // enhancement, so it is not used to implement memcmp. 5 | // 6 | // On certain recent Intel processors, "rep movsb" and "rep stosb" have been 7 | // further enhanced to automatically select the best microarchitectural 8 | // implementation based on length and alignment. See the following features from 9 | // the "Intel® 64 and IA-32 Architectures Optimization Reference Manual": 10 | // - ERMSB - Enhanced REP MOVSB and STOSB (Ivy Bridge and later) 11 | // - FSRM - Fast Short REP MOV (Ice Lake and later) 12 | // - Fast Zero-Length MOVSB (On no current hardware) 13 | // - Fast Short STOSB (On no current hardware) 14 | // 15 | // To simplify things, we switch to using the byte-based variants if the "ermsb" 16 | // feature is present at compile-time. We don't bother detecting other features. 17 | // Note that ERMSB does not enhance the backwards (DF=1) "rep movsb". 18 | 19 | use core::{arch::asm, intrinsics, mem}; 20 | 21 | #[inline(always)] 22 | #[cfg(target_feature = "ermsb")] 23 | pub unsafe fn copy_forward(dest: *mut u8, src: *const u8, count: usize) { 24 | // FIXME: Use the Intel syntax once we drop LLVM 9 support on rust-lang/rust. 25 | core::arch::asm!( 26 | "repe movsb (%rsi), (%rdi)", 27 | inout("rcx") count => _, 28 | inout("rdi") dest => _, 29 | inout("rsi") src => _, 30 | options(att_syntax, nostack, preserves_flags) 31 | ); 32 | } 33 | 34 | #[inline(always)] 35 | #[cfg(not(target_feature = "ermsb"))] 36 | pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, count: usize) { 37 | let (pre_byte_count, qword_count, byte_count) = rep_param(dest, count); 38 | // Separating the blocks gives the compiler more freedom to reorder instructions. 39 | asm!( 40 | "rep movsb", 41 | inout("ecx") pre_byte_count => _, 42 | inout("rdi") dest => dest, 43 | inout("rsi") src => src, 44 | options(att_syntax, nostack, preserves_flags) 45 | ); 46 | asm!( 47 | "rep movsq", 48 | inout("rcx") qword_count => _, 49 | inout("rdi") dest => dest, 50 | inout("rsi") src => src, 51 | options(att_syntax, nostack, preserves_flags) 52 | ); 53 | asm!( 54 | "rep movsb", 55 | inout("ecx") byte_count => _, 56 | inout("rdi") dest => _, 57 | inout("rsi") src => _, 58 | options(att_syntax, nostack, preserves_flags) 59 | ); 60 | } 61 | 62 | #[inline(always)] 63 | pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, count: usize) { 64 | let (pre_byte_count, qword_count, byte_count) = rep_param(dest, count); 65 | // We can't separate this block due to std/cld 66 | asm!( 67 | "std", 68 | "rep movsb", 69 | "sub $7, %rsi", 70 | "sub $7, %rdi", 71 | "mov {qword_count}, %rcx", 72 | "rep movsq", 73 | "test {pre_byte_count:e}, {pre_byte_count:e}", 74 | "add $7, %rsi", 75 | "add $7, %rdi", 76 | "mov {pre_byte_count:e}, %ecx", 77 | "rep movsb", 78 | "cld", 79 | pre_byte_count = in(reg) pre_byte_count, 80 | qword_count = in(reg) qword_count, 81 | inout("ecx") byte_count => _, 82 | inout("rdi") dest.add(count - 1) => _, 83 | inout("rsi") src.add(count - 1) => _, 84 | // We modify flags, but we restore it afterwards 85 | options(att_syntax, nostack, preserves_flags) 86 | ); 87 | } 88 | 89 | #[inline(always)] 90 | #[cfg(target_feature = "ermsb")] 91 | pub unsafe fn set_bytes(dest: *mut u8, c: u8, count: usize) { 92 | // FIXME: Use the Intel syntax once we drop LLVM 9 support on rust-lang/rust. 93 | core::arch::asm!( 94 | "repe stosb %al, (%rdi)", 95 | inout("rcx") count => _, 96 | inout("rdi") dest => _, 97 | inout("al") c => _, 98 | options(att_syntax, nostack, preserves_flags) 99 | ) 100 | } 101 | 102 | #[inline(always)] 103 | #[cfg(not(target_feature = "ermsb"))] 104 | pub unsafe fn set_bytes(mut dest: *mut u8, c: u8, count: usize) { 105 | let c = c as u64 * 0x0101_0101_0101_0101; 106 | let (pre_byte_count, qword_count, byte_count) = rep_param(dest, count); 107 | // Separating the blocks gives the compiler more freedom to reorder instructions. 108 | asm!( 109 | "rep stosb", 110 | inout("ecx") pre_byte_count => _, 111 | inout("rdi") dest => dest, 112 | in("rax") c, 113 | options(att_syntax, nostack, preserves_flags) 114 | ); 115 | asm!( 116 | "rep stosq", 117 | inout("rcx") qword_count => _, 118 | inout("rdi") dest => dest, 119 | in("rax") c, 120 | options(att_syntax, nostack, preserves_flags) 121 | ); 122 | asm!( 123 | "rep stosb", 124 | inout("ecx") byte_count => _, 125 | inout("rdi") dest => _, 126 | in("rax") c, 127 | options(att_syntax, nostack, preserves_flags) 128 | ); 129 | } 130 | 131 | #[inline(always)] 132 | pub unsafe fn compare_bytes(a: *const u8, b: *const u8, n: usize) -> i32 { 133 | #[inline(always)] 134 | unsafe fn cmp(mut a: *const T, mut b: *const T, n: usize, f: F) -> i32 135 | where 136 | T: Clone + Copy + Eq, 137 | U: Clone + Copy + Eq, 138 | F: FnOnce(*const U, *const U, usize) -> i32, 139 | { 140 | // Ensure T is not a ZST. 141 | const { assert!(mem::size_of::() != 0) }; 142 | 143 | let end = a.add(intrinsics::unchecked_div(n, mem::size_of::())); 144 | while a != end { 145 | if a.read_unaligned() != b.read_unaligned() { 146 | return f(a.cast(), b.cast(), mem::size_of::()); 147 | } 148 | a = a.add(1); 149 | b = b.add(1); 150 | } 151 | f( 152 | a.cast(), 153 | b.cast(), 154 | intrinsics::unchecked_rem(n, mem::size_of::()), 155 | ) 156 | } 157 | let c1 = |mut a: *const u8, mut b: *const u8, n| { 158 | for _ in 0..n { 159 | if a.read() != b.read() { 160 | return i32::from(a.read()) - i32::from(b.read()); 161 | } 162 | a = a.add(1); 163 | b = b.add(1); 164 | } 165 | 0 166 | }; 167 | let c2 = |a: *const u16, b, n| cmp(a, b, n, c1); 168 | let c4 = |a: *const u32, b, n| cmp(a, b, n, c2); 169 | let c8 = |a: *const u64, b, n| cmp(a, b, n, c4); 170 | let c16 = |a: *const u128, b, n| cmp(a, b, n, c8); 171 | c16(a.cast(), b.cast(), n) 172 | } 173 | 174 | /// Determine optimal parameters for a `rep` instruction. 175 | fn rep_param(dest: *mut u8, mut count: usize) -> (usize, usize, usize) { 176 | // Unaligned writes are still slow on modern processors, so align the destination address. 177 | let pre_byte_count = ((8 - (dest as usize & 0b111)) & 0b111).min(count); 178 | count -= pre_byte_count; 179 | let qword_count = count >> 3; 180 | let byte_count = count & 0b111; 181 | (pre_byte_count, qword_count, byte_count) 182 | } 183 | -------------------------------------------------------------------------------- /src/net.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/prelude.rs: -------------------------------------------------------------------------------- 1 | pub use crate::{eprint, eprintln, print, println}; 2 | -------------------------------------------------------------------------------- /src/spinlock.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | cell::UnsafeCell, 3 | ops::{Deref, DerefMut}, 4 | sync::atomic::{ 5 | AtomicBool, 6 | Ordering::{Acquire, Release}, 7 | }, 8 | }; 9 | 10 | pub struct SpinLock { 11 | locked: AtomicBool, 12 | contents: UnsafeCell, 13 | } 14 | 15 | unsafe impl Sync for SpinLock {} 16 | 17 | pub struct SpinLockGuard<'a, T> { 18 | lock: &'a SpinLock, 19 | } 20 | 21 | impl SpinLock { 22 | pub const fn new(inner: T) -> Self { 23 | Self { 24 | locked: AtomicBool::new(false), 25 | contents: UnsafeCell::new(inner), 26 | } 27 | } 28 | 29 | pub fn lock(&self) -> SpinLockGuard { 30 | while self 31 | .locked 32 | .compare_exchange(false, true, Acquire, Acquire) 33 | .is_err() 34 | { 35 | core::hint::spin_loop(); 36 | } 37 | SpinLockGuard { lock: self } 38 | } 39 | } 40 | 41 | impl Drop for SpinLockGuard<'_, T> { 42 | fn drop(&mut self) { 43 | self.lock.locked.store(false, Release); 44 | } 45 | } 46 | 47 | impl Deref for SpinLockGuard<'_, T> { 48 | type Target = T; 49 | 50 | fn deref(&self) -> &T { 51 | unsafe { &*self.lock.contents.get() } 52 | } 53 | } 54 | 55 | impl DerefMut for SpinLockGuard<'_, T> { 56 | fn deref_mut(&mut self) -> &mut T { 57 | unsafe { &mut *self.lock.contents.get() } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/syscalls.rs: -------------------------------------------------------------------------------- 1 | use crate::{CStr, Error}; 2 | use core::{marker::PhantomData, mem}; 3 | use libc::c_int; 4 | use sc::syscall; 5 | 6 | #[inline] 7 | pub fn read(fd: c_int, bytes: &mut [u8]) -> Result { 8 | unsafe { syscall!(READ, fd, bytes.as_mut_ptr(), bytes.len()) }.usize_result() 9 | } 10 | 11 | #[inline] 12 | pub fn write(fd: c_int, bytes: &[u8]) -> Result { 13 | unsafe { syscall!(WRITE, fd, bytes.as_ptr(), bytes.len()) }.usize_result() 14 | } 15 | 16 | // For directories RDONLY | DIRECTORY | CLOEXEC 17 | bitflags::bitflags! { 18 | pub struct OpenFlags: c_int { 19 | const RDONLY = libc::O_RDONLY; 20 | const WRONLY = libc::O_WRONLY; 21 | const RDWR = libc::O_RDWR; 22 | const APPEND = libc::O_APPEND; 23 | const ASYNC = libc::O_ASYNC; 24 | const CLOEXEC = libc::O_CLOEXEC; 25 | const CREAT = libc::O_CREAT; 26 | const DIRECT = libc::O_DIRECT; 27 | const DIRECTORY = libc::O_DIRECTORY; 28 | const DSYNC = libc::O_DSYNC; 29 | const EXCL = libc::O_EXCL; 30 | const LARGEFILE = libc::O_LARGEFILE; 31 | const NOATIME = libc::O_NOATIME; 32 | const NOCTTY = libc::O_NOCTTY; 33 | const NOFOLLOW = libc::O_NOFOLLOW; 34 | const NONBLOCK = libc::O_NONBLOCK; 35 | const PATH = libc::O_PATH; 36 | const SYNC = libc::O_SYNC; 37 | const TMPFILE = libc::O_TMPFILE; 38 | const TRUNC = libc::O_TRUNC; 39 | } 40 | } 41 | 42 | bitflags::bitflags! { 43 | pub struct OpenMode: libc::c_uint { 44 | const RWXU = libc::S_IRWXU; 45 | const RUSR = libc::S_IRUSR; 46 | const WUSR = libc::S_IWUSR; 47 | const XUSR = libc::S_IXUSR; 48 | const RWXG = libc::S_IRWXG; 49 | const RGRP = libc::S_IRGRP; 50 | const WGRP = libc::S_IWGRP; 51 | const XGRP = libc::S_IXGRP; 52 | const RWXO = libc::S_IRWXO; 53 | const ROTH = libc::S_IROTH; 54 | const WOTH = libc::S_IWOTH; 55 | const XOTH = libc::S_IXOTH; 56 | const SUID = libc::S_ISUID; 57 | const SGID = libc::S_ISGID; 58 | const SVTX = libc::S_ISVTX; 59 | } 60 | } 61 | 62 | #[inline] 63 | pub fn openat(at_fd: c_int, path: CStr, flags: OpenFlags, mode: OpenMode) -> Result { 64 | unsafe { syscall!(OPENAT, at_fd, path.as_ptr(), flags.bits(), mode.bits()) } 65 | .to_result_and(|n| n as c_int) 66 | } 67 | 68 | #[inline] 69 | pub fn close(fd: c_int) -> Result<(), Error> { 70 | unsafe { syscall!(CLOSE, fd) }.null_result() 71 | } 72 | 73 | #[inline] 74 | pub fn fstat(fd: c_int) -> Result { 75 | unsafe { 76 | let mut status: libc::stat = mem::zeroed(); 77 | syscall!(FSTAT, fd, &mut status as *mut libc::stat).to_result_with(status) 78 | } 79 | } 80 | 81 | #[inline] 82 | pub fn lstat(path: CStr) -> Result { 83 | unsafe { 84 | let mut status: libc::stat = mem::zeroed(); 85 | syscall!(FSTAT, path.as_ptr(), &mut status as *mut libc::stat).to_result_with(status) 86 | } 87 | } 88 | 89 | #[inline] 90 | pub fn ppoll( 91 | fds: &mut [libc::pollfd], 92 | timeout: &libc::timespec, 93 | sigmask: &libc::sigset_t, 94 | ) -> Result { 95 | unsafe { 96 | syscall!( 97 | PPOLL, 98 | fds.as_mut_ptr(), 99 | fds.len(), 100 | timeout as *const libc::timespec, 101 | sigmask as *const libc::sigset_t 102 | ) 103 | } 104 | .usize_result() 105 | } 106 | 107 | #[derive(Clone, Copy)] 108 | pub enum SeekFrom { 109 | Start, 110 | End, 111 | Current, 112 | } 113 | 114 | #[inline] 115 | pub fn lseek(fd: c_int, seek_mode: SeekFrom, offset: usize) -> Result { 116 | let seek_mode = match seek_mode { 117 | SeekFrom::Start => libc::SEEK_SET, 118 | SeekFrom::End => libc::SEEK_END, 119 | SeekFrom::Current => libc::SEEK_CUR, 120 | }; 121 | unsafe { syscall!(LSEEK, fd, seek_mode, offset) }.usize_result() 122 | } 123 | 124 | #[inline] 125 | pub fn mmap( 126 | addr: *mut u8, 127 | len: usize, 128 | prot: i32, 129 | flags: i32, 130 | fd: i32, 131 | offset: isize, 132 | ) -> Result<*mut u8, Error> { 133 | unsafe { syscall!(MMAP, addr, len, prot, flags, fd, offset) }.to_result_and(|n| n as *mut u8) 134 | } 135 | 136 | #[inline] 137 | pub fn mprotect(memory: &[u8], protection: c_int) -> Result<(), Error> { 138 | unsafe { syscall!(MPROTECT, memory.as_ptr(), memory.len(), protection) }.null_result() 139 | } 140 | 141 | /// munmap 142 | /// 143 | /// # Safety 144 | /// 145 | /// The specified memory region must not be used after this function is called 146 | #[inline] 147 | pub unsafe fn munmap(addr: *mut u8, len: usize) -> Result<(), Error> { 148 | syscall!(MUNMAP, addr, len).null_result() 149 | } 150 | 151 | #[inline] 152 | pub fn brk(addr: *mut u8) -> Result<*mut u8, Error> { 153 | unsafe { syscall!(BRK, addr) }.to_result_and(|n| n as *mut u8) 154 | } 155 | 156 | // Wraps the rt_sigaction call in the same way that glibc does 157 | // So I guess there's no way to use normal signals, only realtime signals? 158 | #[inline] 159 | pub fn sigaction( 160 | signal: c_int, 161 | action: &libc::sigaction, 162 | old_action: &mut libc::sigaction, 163 | ) -> Result<(), Error> { 164 | unsafe { 165 | syscall!( 166 | RT_SIGACTION, 167 | signal, 168 | action as *const libc::sigaction, 169 | old_action as *mut libc::sigaction, 170 | mem::size_of::() 171 | ) 172 | } 173 | .to_result_with(()) 174 | } 175 | 176 | // sigprocmask 177 | 178 | // sigreturn 179 | 180 | #[macro_export] 181 | macro_rules! ioctl { 182 | ($fd:expr, $request:expr, $($arg:expr),*) => { 183 | unsafe { syscall!(IOCTL, $fd, $request, $($arg)*) }.usize_result() 184 | }; 185 | ($fd:expr, $request:expr, $($arg:expr),*) => { 186 | ioctl!($fd, $request, $($arg)*) 187 | }; 188 | } 189 | 190 | #[inline] 191 | pub fn pread64(fd: c_int, buf: &mut [u8], offset: usize) -> Result { 192 | unsafe { syscall!(PREAD64, fd, buf.as_mut_ptr(), buf.len(), offset) }.usize_result() 193 | } 194 | 195 | #[inline] 196 | pub fn pwrite64(fd: c_int, buf: &[u8], offset: usize) -> Result { 197 | unsafe { syscall!(PWRITE64, fd, buf.as_ptr(), buf.len(), offset) }.usize_result() 198 | } 199 | 200 | pub struct IoVec<'a> { 201 | #[allow(dead_code)] 202 | iov_base: *mut u8, 203 | #[allow(dead_code)] 204 | iov_len: usize, 205 | _danny: PhantomData<&'a mut u8>, 206 | } 207 | 208 | #[inline] 209 | pub fn readv(fd: c_int, iovec: &'_ mut [IoVec<'_>]) -> Result { 210 | unsafe { syscall!(READV, fd, iovec.as_mut_ptr(), iovec.len()) }.usize_result() 211 | } 212 | 213 | #[inline] 214 | pub fn writev(fd: c_int, iovec: &'_ [IoVec<'_>]) -> Result { 215 | unsafe { syscall!(READV, fd, iovec.as_ptr(), iovec.len()) }.usize_result() 216 | } 217 | 218 | bitflags::bitflags! { 219 | pub struct Mode: c_int { 220 | const F_OK = 0; 221 | const R_OK = 4; 222 | const W_OK = 2; 223 | const X_OK = 1; 224 | } 225 | } 226 | 227 | bitflags::bitflags! { 228 | pub struct Pipe2Flags: c_int { 229 | const CLOEXEC = libc::O_CLOEXEC; 230 | const DIRECT = libc::O_DIRECT; 231 | const NONBLOCK = libc::O_NONBLOCK; 232 | } 233 | } 234 | 235 | #[inline] 236 | pub fn pipe2(flags: Pipe2Flags) -> Result<[c_int; 2], Error> { 237 | let mut pipefd: [c_int; 2] = [0, 0]; 238 | unsafe { syscall!(PIPE2, pipefd.as_mut_ptr(), flags.bits()) }.to_result_with(pipefd) 239 | } 240 | 241 | #[inline] 242 | pub fn sched_yield() -> Result<(), Error> { 243 | unsafe { syscall!(SCHED_YIELD) }.null_result() 244 | } 245 | 246 | #[inline] 247 | pub fn mremap( 248 | old_address: *mut u8, 249 | old_size: usize, 250 | new_size: usize, 251 | flags: c_int, 252 | ) -> Result<*mut u8, Error> { 253 | unsafe { syscall!(MREMAP, old_address, old_size, new_size, flags) } 254 | .to_result_and(|n| n as *mut u8) 255 | } 256 | 257 | bitflags::bitflags! { 258 | pub struct MSync: c_int { 259 | const ASYNC = libc::MS_ASYNC; 260 | const SYNC = libc::MS_SYNC; 261 | const INVALIDATE = libc::MS_INVALIDATE; 262 | } 263 | } 264 | 265 | #[inline] 266 | pub fn msync(memory: &[u8], flags: MSync) -> Result<(), Error> { 267 | unsafe { syscall!(MSYNC, memory.as_ptr(), memory.len(), flags.bits()) }.null_result() 268 | } 269 | 270 | #[inline] 271 | pub fn mincore(memory: &[u8], status: &mut [u8]) -> Result<(), Error> { 272 | if status.len() < memory.len().div_ceil(4096) { 273 | return Err(Error(libc::EINVAL)); 274 | } 275 | unsafe { syscall!(MINCORE, memory.as_ptr(), memory.len(), status.as_mut_ptr()) }.null_result() 276 | } 277 | 278 | bitflags::bitflags! { 279 | pub struct Advice: c_int { 280 | const NORMAL = libc::MADV_NORMAL; 281 | const RANDOM = libc::MADV_RANDOM; 282 | const SEQUENTIAL = libc::MADV_SEQUENTIAL; 283 | const WILLNEED = libc::MADV_WILLNEED; 284 | const DONTNEED = libc::MADV_DONTNEED; 285 | const REMOVE = libc::MADV_REMOVE; 286 | const DONTFORM = libc::MADV_DONTFORK; 287 | const DOFORK = libc::MADV_DOFORK; 288 | const HWPOISON = libc::MADV_HWPOISON; 289 | const MERGEABLE = libc::MADV_MERGEABLE; 290 | const UNMERGEABLE = libc::MADV_UNMERGEABLE; 291 | const SOFT_OFFLINE = libc::MADV_SOFT_OFFLINE; 292 | const HUGEPAGE = libc::MADV_HUGEPAGE; 293 | const NOHUGEPAGE = libc::MADV_NOHUGEPAGE; 294 | const DONTDUMP = libc::MADV_DONTDUMP; 295 | const DODUMP = libc::MADV_DODUMP; 296 | const FREE = libc::MADV_FREE; 297 | //const WIPEONFORK = libc::MADV_WIPEONFORK; 298 | //const KEEPONFORK = libc::MADV_KEEPONFORK; 299 | } 300 | } 301 | 302 | #[inline] 303 | pub fn madvise(memory: &[u8], advice: Advice) -> Result<(), Error> { 304 | unsafe { syscall!(MADVISE, memory.as_ptr(), memory.len(), advice.bits()) }.null_result() 305 | } 306 | 307 | bitflags::bitflags! { 308 | pub struct ShmFlags: c_int { 309 | const CREAT = libc::IPC_CREAT; 310 | const EXCL = libc::IPC_EXCL; 311 | const HUGETLB = libc::SHM_HUGETLB; 312 | const HUGE_2MB = libc::MAP_HUGE_2MB; 313 | const HUGE_1GB = libc::MAP_HUGE_1GB; 314 | const NORESERVE = libc::SHM_NORESERVE; 315 | } 316 | } 317 | 318 | #[inline] 319 | pub fn shmget(key: libc::key_t, size: usize, shmflg: ShmFlags) -> Result { 320 | unsafe { syscall!(SHMGET, key, size, shmflg.bits()) }.to_result_and(|key| key as libc::key_t) 321 | } 322 | 323 | // shmctl 324 | // 325 | // dup 326 | // 327 | // dup2 328 | // 329 | // pause 330 | // 331 | // nanosleep 332 | // 333 | // getitimer 334 | // 335 | // alarm 336 | // 337 | // setitimer 338 | 339 | #[inline] 340 | pub fn getpid() -> libc::pid_t { 341 | unsafe { syscall!(GETPID) as libc::pid_t } 342 | } 343 | 344 | // sendfile 345 | // 346 | // socket 347 | // 348 | // connect 349 | // 350 | // accept 351 | // 352 | // sendto 353 | // 354 | // recvfrom 355 | // 356 | // sendmsg 357 | // 358 | // recvmsg 359 | // 360 | // shutdown 361 | // 362 | // bind 363 | // 364 | // listen 365 | // 366 | // getsockname 367 | // 368 | // getpeername 369 | // 370 | // socketpair 371 | // 372 | // setsockopt 373 | // 374 | // getsockopt 375 | 376 | bitflags::bitflags! { 377 | pub struct CloneFlags: i32 { 378 | const VM = libc::CLONE_VM; 379 | const FS = libc::CLONE_FS; 380 | const FILES = libc::CLONE_FILES; 381 | const SIGHAND = libc::CLONE_SIGHAND; 382 | const THREAD = libc::CLONE_THREAD; 383 | const SYSVSEM = libc::CLONE_SYSVSEM; 384 | const SETTLS = libc::CLONE_SETTLS; 385 | const PARENT_SETTID = libc::CLONE_PARENT_SETTID; 386 | const CHILD_CLEARTID = libc::CLONE_CHILD_CLEARTID; 387 | } 388 | } 389 | 390 | /// # Safety 391 | /// 392 | /// The memory pointed to by stack is given up to the thread and should be part of its own mapping 393 | #[inline] 394 | pub unsafe fn clone( 395 | flags: CloneFlags, 396 | stack: *mut u8, 397 | parent_tid: &mut libc::pid_t, 398 | child_tid: &mut libc::pid_t, 399 | tls: &mut u8, 400 | ) -> Result { 401 | syscall!( 402 | CLONE, 403 | flags.bits(), 404 | stack, 405 | parent_tid as *mut libc::pid_t, 406 | tls as *mut u8, 407 | child_tid as *mut libc::pid_t 408 | ) 409 | .to_result_and(|v| v as libc::pid_t) 410 | } 411 | 412 | // 413 | // clone 414 | // 415 | // fork 416 | // 417 | // execve 418 | 419 | #[inline] 420 | pub fn exit(error_code: c_int) -> ! { 421 | unsafe { 422 | syscall!(EXIT, error_code); 423 | core::hint::unreachable_unchecked(); 424 | } 425 | } 426 | 427 | // wait4 428 | 429 | // Require that it is non-negative 430 | pub struct Pid(pub libc::pid_t); 431 | 432 | pub enum SignalWhere { 433 | Exactly(usize), 434 | CurrentGroup, 435 | AllValid, 436 | Group(usize), 437 | } 438 | #[inline] 439 | pub fn kill(pid: usize, signal: i32) -> Result<(), Error> { 440 | unsafe { syscall!(KILL, pid, signal) }.null_result() 441 | } 442 | 443 | // uname 444 | 445 | pub enum FutexOp<'a> { 446 | Wait { 447 | expected: c_int, 448 | timeout: Option, 449 | }, 450 | Wake { 451 | wake_at_most: c_int, 452 | }, 453 | Requeue { 454 | wake_at_most: c_int, 455 | requeue_onto: &'a mut c_int, 456 | max_to_requeue: c_int, 457 | }, 458 | } 459 | 460 | #[inline] 461 | pub fn futex(lock: &mut c_int, op: FutexOp<'_>, private: bool) -> Result<(), Error> { 462 | let lock = lock as *mut c_int; 463 | let private = if private { libc::FUTEX_PRIVATE_FLAG } else { 0 }; 464 | unsafe { 465 | match op { 466 | FutexOp::Wait { expected, timeout } => { 467 | let timeout = timeout 468 | .map(|mut t| &mut t as *mut libc::timespec) 469 | .unwrap_or(core::ptr::null_mut()); 470 | syscall!(FUTEX, lock, libc::FUTEX_WAIT | private, expected, timeout) 471 | } 472 | FutexOp::Wake { wake_at_most } => { 473 | syscall!(FUTEX, lock, libc::FUTEX_WAKE | private, wake_at_most) 474 | } 475 | FutexOp::Requeue { 476 | wake_at_most, 477 | requeue_onto, 478 | max_to_requeue, 479 | } => syscall!( 480 | FUTEX, 481 | lock, 482 | libc::FUTEX_REQUEUE | private, 483 | wake_at_most, 484 | max_to_requeue, 485 | requeue_onto as *mut c_int 486 | ), 487 | } 488 | } 489 | .null_result() 490 | } 491 | 492 | #[inline] 493 | pub fn fstatat(fd: c_int, name: CStr) -> Result { 494 | unsafe { 495 | let mut stats = mem::zeroed(); 496 | syscall!( 497 | NEWFSTATAT, 498 | fd, 499 | name.as_ptr(), 500 | &mut stats as *mut libc::stat64, 501 | 0 502 | ) 503 | .to_result_with(stats) 504 | } 505 | } 506 | 507 | #[inline] 508 | pub fn lstatat(fd: c_int, name: CStr) -> Result { 509 | unsafe { 510 | let mut stats = mem::zeroed(); 511 | syscall!( 512 | NEWFSTATAT, 513 | fd, 514 | name.as_ptr(), 515 | &mut stats as *mut libc::stat64, 516 | libc::AT_SYMLINK_NOFOLLOW 517 | ) 518 | .to_result_with(stats) 519 | } 520 | } 521 | 522 | #[inline] 523 | pub fn getdents64(fd: c_int, buf: &mut [u8]) -> Result { 524 | unsafe { syscall!(GETDENTS64, fd, buf.as_mut_ptr(), buf.len()) }.to_result_and(|n| n) 525 | } 526 | 527 | #[inline] 528 | pub fn faccessat(fd: c_int, name: CStr, mode: c_int) -> Result<(), Error> { 529 | unsafe { syscall!(FACCESSAT, fd, name.as_ptr(), mode) }.null_result() 530 | } 531 | 532 | #[inline] 533 | pub fn readlinkat<'a>(fd: c_int, name: CStr, buf: &'a mut [u8]) -> Result<&'a [u8], Error> { 534 | match unsafe { syscall!(READLINKAT, fd, name.as_ptr(), buf.as_mut_ptr(), buf.len()) } 535 | .to_result_and(|n| n) 536 | { 537 | Ok(n) => Ok(buf.get(..n).unwrap_or_default()), 538 | Err(e) => Err(e), 539 | } 540 | } 541 | 542 | #[inline] 543 | pub fn gettimeofday() -> Result { 544 | let mut tv = libc::timeval { 545 | tv_sec: 0, 546 | tv_usec: 0, 547 | }; 548 | unsafe { syscall!(GETTIMEOFDAY, &mut tv as *mut libc::timeval, 0) }.to_result_with(tv) 549 | } 550 | 551 | #[inline] 552 | pub fn winsize() -> Result { 553 | unsafe { 554 | let mut winsize: libc::winsize = mem::zeroed(); 555 | syscall!( 556 | IOCTL, 557 | libc::STDOUT_FILENO, 558 | libc::TIOCGWINSZ, 559 | &mut winsize as *mut libc::winsize 560 | ) 561 | .to_result_with(winsize) 562 | } 563 | } 564 | 565 | trait SyscallRet: Sized { 566 | fn to_result_with(self, t: T) -> Result; 567 | fn to_result_and(self, f: F) -> Result 568 | where 569 | F: FnOnce(Self) -> T, 570 | Self: Sized; 571 | 572 | fn usize_result(self) -> Result; 573 | 574 | #[inline] 575 | fn null_result(self) -> Result<(), Error> { 576 | self.to_result_with(()) 577 | } 578 | } 579 | 580 | impl SyscallRet for usize { 581 | #[inline] 582 | fn to_result_with(self, t: T) -> Result { 583 | let ret = self as isize; 584 | if ret < 0 { 585 | Err(Error(-ret as c_int)) 586 | } else { 587 | Ok(t) 588 | } 589 | } 590 | 591 | #[inline] 592 | fn to_result_and(self, f: F) -> Result 593 | where 594 | F: FnOnce(Self) -> T, 595 | Self: Sized, 596 | { 597 | let ret = self as isize; 598 | if ret < 0 { 599 | Err(Error(-ret as c_int)) 600 | } else { 601 | Ok(f(self)) 602 | } 603 | } 604 | 605 | #[inline] 606 | fn usize_result(self) -> Result { 607 | self.to_result_and(|n| n) 608 | } 609 | } 610 | -------------------------------------------------------------------------------- /veneer-macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "veneer-macros" 3 | version = "0.1.0" 4 | authors = ["Ben Kimock "] 5 | license = "MIT OR Apache-2.0" 6 | description = "veneer's proc macros" 7 | documentation = "https://docs.rs/veneer-macros" 8 | repository = "https://github.com/saethlin/veneer" 9 | edition = "2018" 10 | include = ["src/**/*.rs", "README.md", "LICENSE-MIT", "LICENSE-APACHE"] 11 | 12 | [lib] 13 | proc-macro = true 14 | -------------------------------------------------------------------------------- /veneer-macros/LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /veneer-macros/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /veneer-macros/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![feature(proc_macro_diagnostic, proc_macro_span, proc_macro_quote)] 2 | extern crate proc_macro; 3 | 4 | use proc_macro::{quote, Delimiter, Diagnostic, Level, TokenStream, TokenTree}; 5 | 6 | #[proc_macro_attribute] 7 | pub fn main(args: TokenStream, item: TokenStream) -> TokenStream { 8 | if !args.is_empty() { 9 | let start = args.clone().into_iter().next().unwrap().span(); 10 | let end = args.into_iter().last().unwrap().span(); 11 | let span = start.join(end).unwrap(); 12 | Diagnostic::spanned( 13 | vec![span], 14 | Level::Error, 15 | "Attribute macro veneer_macros::main does not accept any arguments", 16 | ) 17 | .emit(); 18 | } 19 | 20 | let signature = item 21 | .clone() 22 | .into_iter() 23 | .take_while(|t| { 24 | if let TokenTree::Group(group) = t { 25 | group.delimiter() != Delimiter::Brace 26 | } else { 27 | true 28 | } 29 | }) 30 | .collect::>(); 31 | 32 | let start = item.clone().into_iter().next().unwrap().span(); 33 | let end = item.clone().into_iter().last().unwrap().span(); 34 | let span = start.join(end).unwrap(); 35 | let not_a_fn = Diagnostic::spanned( 36 | vec![span], 37 | Level::Error, 38 | "Attribute macro veneer_macros::main may only be applied to functions which take no arguments", 39 | ); 40 | 41 | let name = if let ( 42 | Some(TokenTree::Ident(f)), 43 | Some(TokenTree::Ident(name)), 44 | Some(TokenTree::Group(args)), 45 | ) = (signature.get(0), signature.get(1), signature.get(2)) 46 | { 47 | if f.to_string() == "fn" && args.delimiter() == Delimiter::Parenthesis { 48 | name 49 | } else { 50 | not_a_fn.emit(); 51 | return item; 52 | } 53 | } else { 54 | not_a_fn.emit(); 55 | return item; 56 | }; 57 | 58 | let name = TokenTree::from(name.clone()); 59 | 60 | let header = if signature.len() == 3 { 61 | quote! { 62 | #[no_mangle] 63 | unsafe extern "C" fn __veneer_main() { 64 | $name(); 65 | veneer::syscalls::exit(0); 66 | } 67 | } 68 | } else { 69 | quote! { 70 | #[no_mangle] 71 | unsafe extern "C" fn __veneer_main() { 72 | let exit_code = match $name() { 73 | Ok(()) => 0, 74 | Err(_) => 1, 75 | }; 76 | veneer::syscalls::exit(exit_code); 77 | } 78 | } 79 | }; 80 | header.into_iter().chain(item.into_iter()).collect() 81 | } 82 | --------------------------------------------------------------------------------