├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── examples └── payload │ ├── .gitignore │ ├── add.rs │ ├── big_structs.rs │ ├── btreemap.rs │ ├── ext_call.rs │ ├── fib.rs │ └── tests │ └── src │ ├── alloc.rs │ ├── call_indirect.rs │ ├── match.rs │ └── mem_rw.rs ├── hetrans ├── .gitignore ├── Cargo.toml └── src │ └── main.rs ├── src ├── cfgraph.rs ├── executor.rs ├── fp_ops.rs ├── hetrans.rs ├── int_ops.rs ├── jit │ ├── compiler.rs │ ├── compiler_intrinsics.rs │ ├── llvm.rs │ ├── mod.rs │ ├── ondemand.rs │ ├── runtime.rs │ └── vm.rs ├── lib.rs ├── module.rs ├── opcode.rs ├── optimizers │ └── mod.rs ├── platform │ ├── generic.rs │ ├── mod.rs │ ├── other.rs │ └── x86_64_linux │ │ ├── mod.rs │ │ └── sigsegv.rs ├── prelude.rs ├── prelude_no_std.rs ├── resolver.rs ├── ssa.rs ├── trans │ ├── config.rs │ ├── debug_print.rs │ ├── mod.rs │ └── optrans.rs └── value.rs ├── tests ├── emscripten │ ├── config.json │ └── src │ │ └── hello_world.cc └── runner │ └── run.py └── wa ├── .gitignore ├── Cargo.toml └── src ├── jit.rs ├── main.rs ├── resolver.rs ├── stream.rs ├── syscall.rs └── utils.rs /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | /target 3 | **/*.rs.bk 4 | Cargo.lock 5 | /tests/ 6 | /examples/ 7 | /bridge/ 8 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "wasm-core" 3 | version = "0.2.15" 4 | authors = ["losfair "] 5 | description = "Portable WebAssembly implementation" 6 | license = "LGPL-3.0" 7 | keywords = ["wasm", "webassembly"] 8 | repository = "https://github.com/losfair/wasm-core" 9 | readme = "README.md" 10 | 11 | [dependencies] 12 | serde = { version = "1", default-features = false } 13 | serde_derive = { version = "1", default-features = false } 14 | bincode-no-std = { version = "1.0.0", optional = true } 15 | bincode = { version = "1.0.0", optional = true } 16 | llvm-sys = { version = "60.0", optional = true } 17 | smallvec = { version = "0.6", optional = true } 18 | lazy_static = "1.0" 19 | parity-wasm = { version = "0.27.2", optional = true } 20 | libc = { version = "0.2", optional = true } 21 | byteorder = { version = "1", default-features = false } 22 | 23 | [features] 24 | default = ["std"] 25 | std = ["bincode"] 26 | no_std = ["bincode-no-std"] 27 | jit = ["std", "llvm-sys", "smallvec", "libc"] 28 | trans = ["std", "parity-wasm"] 29 | debug = [] 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # wasm-core 2 | 3 | [![Crates.io](https://img.shields.io/crates/v/wasm-core.svg)](https://crates.io/crates/wasm-core) 4 | 5 | **NOTE: This repository is now deprecated due to multiple design issues. LLVM-generated WebAssembly code is still expected to run without problems (by July 28, 2018), but users are advised to move to other implementations for future usage.** 6 | 7 | Portable WebAssembly implementation intended to run everywhere. 8 | 9 | ----- 10 | 11 | # Features 12 | 13 | ### Efficient 14 | 15 | wasm-core includes two execution engines, an interpreter and a JIT based on LLVM. While the former one is mainly for use on platforms with constrained resources or not supported by LLVM MCJIT, the latter one is designed for high performance and should be used whenever possible. 16 | 17 | With LLVM optimizations and on-demand compilation, the JIT engine of wasm-core is able to achieve near-native performance on x86-64. 18 | 19 | ### Portable 20 | 21 | wasm-core supports `no_std`. This means that it can run on any platforms with `libcore` and `liballoc` available, which include a lot of embedded devices and even OS kernels. 22 | 23 | ### Secure 24 | 25 | The default execution environment is fully sandboxed, which means that user code cannot access the outside environment without explicit native imports. 26 | 27 | ### Easy integration 28 | 29 | External functions can be easily imported by implementing the `NativeResolver` trait and exported WebAssembly functions can be easily called from outside. See [Ice Core](https://github.com/losfair/IceCore/tree/lssa) as an example. 30 | 31 | # How to use 32 | 33 | Instead of loading WebAssembly files directly, wasm-core takes code in an IR format generated from the raw WebAssembly code by the `wasm-translator` crate under `translator/`. 34 | 35 | See [Ice Core](https://github.com/losfair/IceCore/tree/lssa) as an example for executing pure-wasm code and `wa/` as an example for executing wasm generated by Emscripten. (note: support for Emscripten-generated code may be removed in the future once the WebAssembly backend of LLVM becomes stable and fully usable from clang. ) 36 | 37 | # Bugs 38 | 39 | See issues with the `bug` tag. 40 | 41 | # Contribute 42 | 43 | Contribution to this project is always welcomed! Open a pull request directly if you've fixed a bug and open a issue for discussion first if you want to add new features. 44 | -------------------------------------------------------------------------------- /examples/payload/.gitignore: -------------------------------------------------------------------------------- 1 | *.wasm 2 | -------------------------------------------------------------------------------- /examples/payload/add.rs: -------------------------------------------------------------------------------- 1 | #[no_mangle] 2 | pub extern "C" fn add(a: i32, b: i32) -> i32 { 3 | a + b 4 | } 5 | -------------------------------------------------------------------------------- /examples/payload/big_structs.rs: -------------------------------------------------------------------------------- 1 | pub struct Foo { 2 | a: i32, 3 | b: i64, 4 | c: u8, 5 | d: i64 6 | } 7 | 8 | impl Foo { 9 | pub fn new() -> Foo { 10 | Foo { 11 | a: 1, 12 | b: 2, 13 | c: 3, 14 | d: 4 15 | } 16 | } 17 | } 18 | 19 | #[no_mangle] 20 | pub extern "C" fn new_foo() -> *mut Foo { 21 | let foo = Box::new(Foo::new()); 22 | Box::into_raw(foo) 23 | } 24 | -------------------------------------------------------------------------------- /examples/payload/btreemap.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | #[no_mangle] 4 | pub extern "C" fn insert_lookup(v: i32) -> i32 { 5 | let mut m: BTreeMap = BTreeMap::new(); 6 | m.insert(format!("{}", v), v * 2); 7 | *m.get(format!("{}", v).as_str()).unwrap() 8 | } 9 | 10 | fn main() { 11 | println!("{}", insert_lookup(21)); 12 | } 13 | -------------------------------------------------------------------------------- /examples/payload/ext_call.rs: -------------------------------------------------------------------------------- 1 | extern "C" { 2 | fn __wcore_ping(v: i32) -> i32; 3 | } 4 | 5 | #[no_mangle] 6 | pub extern "C" fn call() -> i32 { 7 | unsafe { 8 | __wcore_ping(42) 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /examples/payload/fib.rs: -------------------------------------------------------------------------------- 1 | #[no_mangle] 2 | pub extern "C" fn fib(n: i32) -> i32 { 3 | if n == 1 || n == 2 { 4 | 1 5 | } else { 6 | fib(n - 1) + fib(n - 2) 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /examples/payload/tests/src/alloc.rs: -------------------------------------------------------------------------------- 1 | #[no_mangle] 2 | pub extern "C" fn alloc_blocks(n: i32) -> *mut *mut [u8] { 3 | let mut blocks: Vec<*mut [u8]> = Vec::new(); 4 | for _ in 0..n { 5 | let v = vec! [0; 4096]; 6 | blocks.push(Box::into_raw(v.into_boxed_slice())); 7 | } 8 | 9 | let ret: *mut *mut [u8] = &mut blocks[0]; 10 | ::std::mem::forget(blocks); 11 | ret 12 | } 13 | -------------------------------------------------------------------------------- /examples/payload/tests/src/call_indirect.rs: -------------------------------------------------------------------------------- 1 | type Callable = extern "C" fn () -> i32; 2 | 3 | static mut CLOSURE: Option<*mut Fn(i64) -> i32> = None; 4 | 5 | #[no_mangle] 6 | pub extern "C" fn call(target: Callable) -> i32 { 7 | target() 8 | } 9 | 10 | #[no_mangle] 11 | pub extern "C" fn produce_value() -> i32 { 12 | 42 13 | } 14 | 15 | #[no_mangle] 16 | pub extern "C" fn get_addr() -> Callable { 17 | produce_value 18 | } 19 | 20 | #[inline(never)] 21 | fn produce_closure() -> Box i32> { 22 | let v: i32 = 0; 23 | Box::new(|a| { 24 | //panic!(); 25 | produce_value() + a as i32 26 | }) 27 | } 28 | 29 | #[no_mangle] 30 | pub extern "C" fn run() { 31 | let f: &Fn(i64) -> i32 = unsafe { 32 | if CLOSURE.is_none() { 33 | CLOSURE = Some(Box::into_raw( 34 | produce_closure() 35 | )); 36 | } 37 | &*CLOSURE.unwrap() 38 | }; 39 | let result = f(99); 40 | assert_eq!(result, 42 + 99); 41 | } 42 | -------------------------------------------------------------------------------- /examples/payload/tests/src/match.rs: -------------------------------------------------------------------------------- 1 | #[no_mangle] 2 | //#[inline(never)] 3 | pub extern "C" fn do_match(v: i32) -> i32 { 4 | match v { 5 | 3 => do_match(5), 6 | 6 => do_match(10), 7 | 10 => 42, 8 | 5 => 99, 9 | _ => unreachable!() 10 | } 11 | } 12 | 13 | #[no_mangle] 14 | pub extern "C" fn run() { 15 | assert_eq!(do_match(111), 0); 16 | assert_eq!(do_match(6), 42); 17 | assert_eq!(do_match(3), 99); 18 | assert_eq!(do_match(10), 42); 19 | assert_eq!(do_match(5), 99); 20 | } 21 | -------------------------------------------------------------------------------- /examples/payload/tests/src/mem_rw.rs: -------------------------------------------------------------------------------- 1 | #[no_mangle] 2 | pub extern "C" fn run() { 3 | let vec: Vec = vec! [42; 10000]; 4 | let mut result: i32 = 0; 5 | for v in vec { 6 | result += v; 7 | } 8 | assert_eq!(result, 42000); 9 | } 10 | 11 | fn main() { 12 | run(); 13 | } 14 | -------------------------------------------------------------------------------- /hetrans/.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | -------------------------------------------------------------------------------- /hetrans/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hetrans" 3 | version = "0.1.0" 4 | authors = ["losfair "] 5 | 6 | [dependencies] 7 | wasm-core = { path = ".." , features = ["trans"]} 8 | -------------------------------------------------------------------------------- /hetrans/src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate wasm_core; 2 | 3 | use std::fs::File; 4 | use std::env; 5 | use std::io::Read; 6 | use std::io::Write; 7 | 8 | use wasm_core::value::Value; 9 | use wasm_core::module::*; 10 | use wasm_core::trans::config::ModuleConfig; 11 | use wasm_core::hetrans::translate_module; 12 | use wasm_core::hetrans::NullMapNativeInvoke; 13 | 14 | fn main() { 15 | let mut args = env::args(); 16 | args.next().unwrap(); 17 | 18 | let path = args.next().expect("Path required"); 19 | let entry_fn_name = args.next().expect("Entry function required"); 20 | let mut f = File::open(&path).unwrap(); 21 | let mut code: Vec = Vec::new(); 22 | 23 | let cfg: ModuleConfig = ModuleConfig::default(); 24 | 25 | f.read_to_end(&mut code).unwrap(); 26 | let module = wasm_core::trans::translate_module_raw(code.as_slice(), cfg); 27 | let entry_fn = module.lookup_exported_func(&entry_fn_name).expect("Entry function not found"); 28 | 29 | let result = translate_module(&module, entry_fn, &mut NullMapNativeInvoke); 30 | eprintln!("{:?}", module.functions[entry_fn].body.opcodes); 31 | ::std::io::stdout().write_all(&result).unwrap(); 32 | } 33 | -------------------------------------------------------------------------------- /src/cfgraph.rs: -------------------------------------------------------------------------------- 1 | use opcode::Opcode; 2 | use prelude::{BTreeMap, BTreeSet}; 3 | 4 | #[derive(Clone, Debug)] 5 | pub struct CFGraph { 6 | pub blocks: Vec 7 | } 8 | 9 | #[derive(Clone, Debug)] 10 | pub struct BasicBlock { 11 | pub opcodes: Vec, 12 | pub br: Option // must be Some in a valid control graph 13 | } 14 | 15 | #[derive(Clone, Debug, Eq, PartialEq)] 16 | pub enum Branch { 17 | Jmp(BlockId), 18 | JmpEither(BlockId, BlockId), // (if_true, if_false) 19 | JmpTable(Vec, BlockId), 20 | Return 21 | } 22 | 23 | #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] 24 | pub struct BlockId(pub usize); 25 | 26 | pub type OptimizeResult = Result; 27 | 28 | #[derive(Clone, Debug)] 29 | pub enum OptimizeError { 30 | InvalidBranchTarget, 31 | Custom(String) 32 | } 33 | 34 | pub trait Optimizer { 35 | type Return; 36 | 37 | fn optimize(&self, cfg: &mut CFGraph) -> OptimizeResult; 38 | } 39 | 40 | fn _assert_optimizer_trait_object_safe() { 41 | struct Opt {} 42 | impl Optimizer for Opt { 43 | type Return = (); 44 | fn optimize(&self, _: &mut CFGraph) -> OptimizeResult { Ok(()) } 45 | } 46 | 47 | let _obj: Box> = Box::new(Opt {}); 48 | } 49 | 50 | trait CheckedBranchTarget { 51 | type TValue; 52 | 53 | fn checked_branch_target(&self) -> OptimizeResult; 54 | } 55 | 56 | impl<'a> CheckedBranchTarget for Option<&'a BlockId> { 57 | type TValue = BlockId; 58 | 59 | fn checked_branch_target(&self) -> OptimizeResult { 60 | match *self { 61 | Some(v) => Ok(*v), 62 | None => Err(OptimizeError::InvalidBranchTarget) 63 | } 64 | } 65 | } 66 | 67 | impl CFGraph { 68 | pub fn from_function(fops: &[Opcode]) -> OptimizeResult { 69 | Ok(CFGraph { 70 | blocks: scan_basic_blocks(fops)? 71 | }) 72 | } 73 | 74 | pub fn validate(&self) -> OptimizeResult<()> { 75 | for blk in &self.blocks { 76 | for op in &blk.opcodes { 77 | if op.is_branch() { 78 | return Err(OptimizeError::Custom( 79 | "Branch instruction(s) found in the middle of a basic block".into() 80 | )); 81 | } 82 | } 83 | let br = if let Some(ref br) = blk.br { 84 | br 85 | } else { 86 | return Err(OptimizeError::Custom( 87 | "Empty branch target(s) found".into() 88 | )); 89 | }; 90 | let br_ok = match *br { 91 | Branch::Jmp(id) => { 92 | if id.0 >= self.blocks.len() { 93 | false 94 | } else { 95 | true 96 | } 97 | }, 98 | Branch::JmpEither(a, b) => { 99 | if a.0 >= self.blocks.len() || b.0 >= self.blocks.len() { 100 | false 101 | } else { 102 | true 103 | } 104 | }, 105 | Branch::JmpTable(ref targets, otherwise) => { 106 | let mut ok = true; 107 | for t in targets { 108 | if t.0 >= self.blocks.len() { 109 | ok = false; 110 | break; 111 | } 112 | } 113 | if ok { 114 | if otherwise.0 >= self.blocks.len() { 115 | false 116 | } else { 117 | true 118 | } 119 | } else { 120 | false 121 | } 122 | }, 123 | Branch::Return => true 124 | }; 125 | if !br_ok { 126 | return Err(OptimizeError::Custom( 127 | "Invalid branch target(s)".into() 128 | )); 129 | } 130 | } 131 | 132 | Ok(()) 133 | } 134 | 135 | /// Generate sequential opcodes. 136 | pub fn gen_opcodes(&self) -> Vec { 137 | enum OpOrBr { 138 | Op(Opcode), 139 | Br(Branch) // pending branch to basic block 140 | } 141 | 142 | let mut seq: Vec = Vec::new(); 143 | let mut begin_instrs: Vec = Vec::with_capacity(self.blocks.len()); 144 | 145 | for (i, bb) in self.blocks.iter().enumerate() { 146 | begin_instrs.push(seq.len() as u32); 147 | for op in &bb.opcodes { 148 | seq.push(OpOrBr::Op(op.clone())); 149 | } 150 | seq.push(OpOrBr::Br(bb.br.as_ref().unwrap().clone())); 151 | } 152 | 153 | seq.into_iter().map(|oob| { 154 | match oob { 155 | OpOrBr::Op(op) => op, 156 | OpOrBr::Br(br) => { 157 | match br { 158 | Branch::Jmp(BlockId(id)) => Opcode::Jmp(begin_instrs[id]), 159 | Branch::JmpEither(BlockId(if_true), BlockId(if_false)) => { 160 | Opcode::JmpEither( 161 | begin_instrs[if_true], 162 | begin_instrs[if_false] 163 | ) 164 | }, 165 | Branch::JmpTable(targets, BlockId(otherwise)) => Opcode::JmpTable( 166 | targets.into_iter().map(|BlockId(id)| begin_instrs[id]).collect(), 167 | begin_instrs[otherwise] 168 | ), 169 | Branch::Return => Opcode::Return 170 | } 171 | } 172 | } 173 | }).collect() 174 | } 175 | 176 | pub fn optimize< 177 | T: Optimizer, 178 | R 179 | >(&mut self, optimizer: T) -> OptimizeResult { 180 | optimizer.optimize(self) 181 | } 182 | } 183 | 184 | impl BasicBlock { 185 | pub fn new() -> BasicBlock { 186 | BasicBlock { 187 | opcodes: vec! [], 188 | br: None 189 | } 190 | } 191 | } 192 | 193 | impl Opcode { 194 | fn is_branch(&self) -> bool { 195 | match *self { 196 | Opcode::Jmp(_) | Opcode::JmpIf(_) | Opcode::JmpEither(_, _) | Opcode::JmpTable(_, _) | Opcode::Return => true, 197 | _ => false 198 | } 199 | } 200 | } 201 | 202 | /// Constructs a Vec of basic blocks. 203 | fn scan_basic_blocks(ops: &[Opcode]) -> OptimizeResult> { 204 | if ops.len() == 0 { 205 | return Ok(Vec::new()); 206 | } 207 | 208 | let mut jmp_targets: BTreeSet = BTreeSet::new(); 209 | 210 | // Entry point. 211 | jmp_targets.insert(0); 212 | 213 | { 214 | // Detect jmp targets 215 | for (i, op) in ops.iter().enumerate() { 216 | if op.is_branch() { 217 | match *op { 218 | Opcode::Jmp(id) => { 219 | jmp_targets.insert(id); 220 | }, 221 | Opcode::JmpIf(id) => { 222 | jmp_targets.insert(id); 223 | }, 224 | Opcode::JmpEither(a, b) => { 225 | jmp_targets.insert(a); 226 | jmp_targets.insert(b); 227 | }, 228 | Opcode::JmpTable(ref targets, otherwise) => { 229 | for t in targets { 230 | jmp_targets.insert(*t); 231 | } 232 | jmp_targets.insert(otherwise); 233 | }, 234 | Opcode::Return => {}, 235 | _ => unreachable!() 236 | } 237 | 238 | // The instruction following a branch starts a new basic block. 239 | jmp_targets.insert((i + 1) as u32); 240 | } 241 | } 242 | } 243 | 244 | // Split opcodes into basic blocks 245 | let (bb_ops, instr_mappings): (Vec<&[Opcode]>, BTreeMap) = { 246 | let mut bb_ops: Vec<&[Opcode]> = Vec::new(); 247 | let mut instr_mappings: BTreeMap = BTreeMap::new(); 248 | 249 | // jmp_targets.len() >= 1 holds here because of `jmp_targets.insert(0)` 250 | let mut jmp_targets: Vec = jmp_targets.iter().map(|v| *v).collect(); 251 | 252 | // [start, end) ... 253 | // ops.len 254 | { 255 | let last = *jmp_targets.last().unwrap() as usize; 256 | if last > ops.len() { 257 | return Err(OptimizeError::InvalidBranchTarget); 258 | } 259 | 260 | // ops.len() >= 1 holds here. 261 | // if last == 0 (same as jmp_targets.len() == 1) then a new jmp target will still be pushed 262 | // so that jmp_targets.len() >= 2 always hold after this. 263 | if last < ops.len() { 264 | jmp_targets.push(ops.len() as u32); 265 | } 266 | } 267 | 268 | for i in 0..jmp_targets.len() - 1 { 269 | // [st..ed) 270 | let st = jmp_targets[i] as usize; 271 | let ed = jmp_targets[i + 1] as usize; 272 | instr_mappings.insert(st as u32, BlockId(bb_ops.len())); 273 | bb_ops.push(&ops[st..ed]); 274 | } 275 | 276 | (bb_ops, instr_mappings) 277 | }; 278 | 279 | let mut bbs: Vec = Vec::new(); 280 | 281 | for (i, bb) in bb_ops.iter().enumerate() { 282 | let mut bb = bb.to_vec(); 283 | 284 | let br: Option = if let Some(op) = bb.last() { 285 | if op.is_branch() { 286 | Some(match *op { 287 | Opcode::Jmp(target) => Branch::Jmp(instr_mappings.get(&target).checked_branch_target()?), 288 | Opcode::JmpIf(target) => Branch::JmpEither( 289 | instr_mappings.get(&target).checked_branch_target()?, // if true 290 | BlockId(i + 1) // otherwise 291 | ), 292 | Opcode::JmpEither(a, b) => Branch::JmpEither( 293 | instr_mappings.get(&a).checked_branch_target()?, 294 | instr_mappings.get(&b).checked_branch_target()? 295 | ), 296 | Opcode::JmpTable(ref targets, otherwise) => { 297 | let mut br_targets: Vec = Vec::new(); 298 | for t in targets { 299 | br_targets.push(instr_mappings.get(t).checked_branch_target()?); 300 | } 301 | Branch::JmpTable( 302 | br_targets, 303 | instr_mappings.get(&otherwise).checked_branch_target()? 304 | ) 305 | }, 306 | Opcode::Return => Branch::Return, 307 | _ => unreachable!() 308 | }) 309 | } else { 310 | None 311 | } 312 | } else { 313 | None 314 | }; 315 | 316 | let br: Branch = if let Some(v) = br { 317 | bb.pop().unwrap(); 318 | v 319 | } else { 320 | Branch::Jmp(BlockId(i + 1)) 321 | }; 322 | 323 | let mut result = BasicBlock::new(); 324 | result.opcodes = bb; 325 | result.br = Some(br); 326 | 327 | bbs.push(result); 328 | } 329 | 330 | Ok(bbs) 331 | } 332 | 333 | #[cfg(test)] 334 | mod tests { 335 | use super::*; 336 | 337 | #[test] 338 | fn test_jmp() { 339 | let opcodes: Vec = vec! [ 340 | // bb 0 341 | Opcode::I32Const(100), // 0 342 | Opcode::Jmp(3), // 1 343 | // bb 1, implicit fallthrough 344 | Opcode::I32Const(50), // 2 345 | // bb 2 (due to jmp) 346 | Opcode::I32Const(25), // 3 347 | Opcode::Return // 4 348 | ]; 349 | 350 | let cfg = CFGraph::from_function(opcodes.as_slice()).unwrap(); 351 | cfg.validate().unwrap(); 352 | 353 | assert_eq!(cfg.blocks.len(), 3); 354 | assert_eq!(cfg.blocks[0].br, Some(Branch::Jmp(BlockId(2)))); 355 | assert_eq!(cfg.blocks[1].br, Some(Branch::Jmp(BlockId(2)))); 356 | assert_eq!(cfg.blocks[2].br, Some(Branch::Return)); 357 | 358 | eprintln!("{:?}", cfg); 359 | 360 | eprintln!("{:?}", cfg.gen_opcodes()); 361 | } 362 | 363 | #[test] 364 | fn test_jmp_if() { 365 | let opcodes: Vec = vec! [ 366 | // bb 0 367 | Opcode::I32Const(100), // 0 368 | Opcode::JmpIf(3), // 1 369 | // bb 1, implicit fallthrough 370 | Opcode::I32Const(50), // 2 371 | // bb 2 (due to jmp) 372 | Opcode::I32Const(25), // 3 373 | Opcode::Return // 4 374 | ]; 375 | 376 | let cfg = CFGraph::from_function(opcodes.as_slice()).unwrap(); 377 | cfg.validate().unwrap(); 378 | 379 | assert_eq!(cfg.blocks.len(), 3); 380 | assert_eq!(cfg.blocks[0].br, Some(Branch::JmpEither(BlockId(2), BlockId(1)))); 381 | assert_eq!(cfg.blocks[1].br, Some(Branch::Jmp(BlockId(2)))); 382 | assert_eq!(cfg.blocks[2].br, Some(Branch::Return)); 383 | 384 | eprintln!("{:?}", cfg); 385 | 386 | eprintln!("{:?}", cfg.gen_opcodes()); 387 | } 388 | 389 | #[test] 390 | fn test_circular() { 391 | let opcodes: Vec = vec! [ 392 | // bb 1 393 | Opcode::I32Const(100), // 0 394 | Opcode::JmpIf(0), 395 | // bb 2 396 | Opcode::Return // 4 397 | ]; 398 | 399 | let cfg = CFGraph::from_function(opcodes.as_slice()).unwrap(); 400 | cfg.validate().unwrap(); 401 | 402 | assert_eq!(cfg.blocks.len(), 2); 403 | assert_eq!(cfg.blocks[0].br, Some(Branch::JmpEither(BlockId(0), BlockId(1)))); 404 | 405 | eprintln!("{:?}", cfg); 406 | 407 | eprintln!("{:?}", cfg.gen_opcodes()); 408 | } 409 | 410 | #[test] 411 | fn test_invalid_branch_target() { 412 | let opcodes: Vec = vec! [ Opcode::Jmp(10) ]; 413 | match CFGraph::from_function(opcodes.as_slice()) { 414 | Err(OptimizeError::InvalidBranchTarget) => {}, 415 | _ => panic!("Expecting an InvalidBranchTarget error") 416 | } 417 | } 418 | } 419 | -------------------------------------------------------------------------------- /src/fp_ops.rs: -------------------------------------------------------------------------------- 1 | use executor::{ExecuteResult, ExecuteError}; 2 | 3 | #[inline] 4 | pub fn i32_reinterpret_f32(v: f32) -> i32 { 5 | unsafe { 6 | ::std::mem::transmute(v) 7 | } 8 | } 9 | 10 | #[inline] 11 | pub fn i64_reinterpret_f64(v: f64) -> i64 { 12 | unsafe { 13 | ::std::mem::transmute(v) 14 | } 15 | } 16 | 17 | #[inline] 18 | pub fn f32_reinterpret_i32(v: i32) -> f32 { 19 | unsafe { 20 | ::std::mem::transmute(v) 21 | } 22 | } 23 | 24 | #[inline] 25 | pub fn f64_reinterpret_i64(v: i64) -> f64 { 26 | unsafe { 27 | ::std::mem::transmute(v) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/hetrans.rs: -------------------------------------------------------------------------------- 1 | use module::*; 2 | use opcode::{Opcode, Memarg}; 3 | use byteorder::{LittleEndian, ByteOrder}; 4 | 5 | #[derive(Copy, Clone, Debug)] 6 | #[repr(u8)] 7 | pub enum TargetOp { 8 | Drop = 1, 9 | Dup, 10 | Swap2, 11 | Select, 12 | 13 | Call, 14 | Return, 15 | Halt, 16 | 17 | GetLocal, 18 | SetLocal, 19 | TeeLocal, 20 | 21 | GetSlotIndirect, 22 | GetSlot, 23 | SetSlot, 24 | ResetSlots, 25 | 26 | NativeInvoke, 27 | 28 | CurrentMemory, 29 | GrowMemory, 30 | 31 | Nop, 32 | Unreachable, 33 | NotSupported, 34 | 35 | Jmp, 36 | JmpIf, 37 | JmpEither, 38 | JmpTable, 39 | 40 | I32Load, 41 | I32Load8U, 42 | I32Load8S, 43 | I32Load16U, 44 | I32Load16S, 45 | I32Store, 46 | I32Store8, 47 | I32Store16, 48 | 49 | I32Const, 50 | I32Ctz, 51 | I32Clz, 52 | I32Popcnt, 53 | I32Add, 54 | I32Sub, 55 | I32Mul, 56 | I32DivU, 57 | I32DivS, 58 | I32RemU, 59 | I32RemS, 60 | I32And, 61 | I32Or, 62 | I32Xor, 63 | I32Shl, 64 | I32ShrU, 65 | I32ShrS, 66 | I32Rotl, 67 | I32Rotr, 68 | 69 | I32Eq, 70 | I32Ne, 71 | I32LtU, 72 | I32LtS, 73 | I32LeU, 74 | I32LeS, 75 | I32GtU, 76 | I32GtS, 77 | I32GeU, 78 | I32GeS, 79 | 80 | I32WrapI64, 81 | 82 | I64Load, 83 | I64Load8U, 84 | I64Load8S, 85 | I64Load16U, 86 | I64Load16S, 87 | I64Load32U, 88 | I64Load32S, 89 | I64Store, 90 | I64Store8, 91 | I64Store16, 92 | I64Store32, 93 | 94 | I64Const, 95 | I64Ctz, 96 | I64Clz, 97 | I64Popcnt, 98 | I64Add, 99 | I64Sub, 100 | I64Mul, 101 | I64DivU, 102 | I64DivS, 103 | I64RemU, 104 | I64RemS, 105 | I64And, 106 | I64Or, 107 | I64Xor, 108 | I64Shl, 109 | I64ShrU, 110 | I64ShrS, 111 | I64Rotl, 112 | I64Rotr, 113 | 114 | I64Eq, 115 | I64Ne, 116 | I64LtU, 117 | I64LtS, 118 | I64LeU, 119 | I64LeS, 120 | I64GtU, 121 | I64GtS, 122 | I64GeU, 123 | I64GeS, 124 | 125 | I64ExtendI32U, 126 | I64ExtendI32S, 127 | 128 | Never 129 | } 130 | 131 | pub trait MapNativeInvoke { 132 | fn map_native_invoke(&mut self, module: &str, field: &str) -> Option; 133 | } 134 | 135 | pub struct NullMapNativeInvoke; 136 | impl MapNativeInvoke for NullMapNativeInvoke { 137 | fn map_native_invoke(&mut self, _module: &str, _field: &str) -> Option { None } 138 | } 139 | 140 | struct Reloc { 141 | code_loc: usize, 142 | ty: RelocType 143 | } 144 | 145 | enum RelocType { 146 | Function(usize /* function id */), 147 | LocalJmp(usize /* local opcode index */) 148 | } 149 | 150 | #[derive(Debug)] 151 | struct OffsetTable { 152 | table_slot_offset: usize, 153 | globals_slot_offset: usize 154 | } 155 | 156 | struct TargetFunction { 157 | code: Vec, 158 | opcode_relocs: Vec, // source_op_id => target_op_id 159 | generic_relocs: Vec 160 | } 161 | 162 | pub fn translate_module(m: &Module, entry_fn: usize, mni: &mut MapNativeInvoke) -> Vec { 163 | let mut target_code: Vec = Vec::new(); 164 | 165 | let (target_dss, slot_values, offset_table) = build_initializers(m); 166 | let _init_data_relocs = write_initializers(&target_dss, &mut target_code); 167 | 168 | //eprintln!("Offsets: {:?}", offset_table); 169 | 170 | let mut functions: Vec = Vec::with_capacity(m.functions.len()); 171 | 172 | for f in &m.functions { 173 | functions.push(translate_function(&m, f, &offset_table, mni)); 174 | } 175 | 176 | let mut slot_initializer_relocs: Vec = Vec::with_capacity(functions.len()); 177 | let mut function_relocs: Vec = Vec::with_capacity(functions.len()); 178 | let mut executable: Vec = Vec::new(); 179 | 180 | executable.push(TargetOp::ResetSlots as u8); 181 | write_u32(&mut executable, slot_values.len() as u32); 182 | 183 | for (i, sv) in slot_values.iter().enumerate() { 184 | executable.push(TargetOp::I64Const as u8); 185 | 186 | slot_initializer_relocs.push(executable.len()); 187 | write_u64(&mut executable, *sv as u64); 188 | 189 | executable.push(TargetOp::SetSlot as u8); 190 | write_u32(&mut executable, i as u32); 191 | } 192 | 193 | let mut entry_reloc_point = build_call(m, &mut executable, entry_fn); 194 | executable.push(TargetOp::Halt as u8); 195 | 196 | for (i, f) in functions.iter().enumerate() { 197 | //eprintln!("Relocating function: {} -> {}", i, executable.len()); 198 | function_relocs.push(executable.len()); 199 | executable.extend_from_slice(&f.code); 200 | } 201 | 202 | // Relocate entry 203 | LittleEndian::write_u32( 204 | &mut executable[entry_reloc_point .. entry_reloc_point + 4], 205 | function_relocs[entry_fn] as u32 206 | ); 207 | 208 | // Relocate code 209 | for (i, f) in functions.iter().enumerate() { 210 | let target_section = &mut executable[function_relocs[i] .. function_relocs[i] + f.code.len()]; 211 | for reloc in &f.generic_relocs { 212 | let slot = &mut target_section[reloc.code_loc .. reloc.code_loc + 4]; 213 | match reloc.ty { 214 | RelocType::Function(id) => { 215 | LittleEndian::write_u32(slot, function_relocs[id] as u32); 216 | }, 217 | RelocType::LocalJmp(pos) => { 218 | LittleEndian::write_u32(slot, (function_relocs[i] + f.opcode_relocs[pos]) as u32); 219 | } 220 | } 221 | } 222 | } 223 | 224 | // Relocate table 225 | for i in 0..m.tables[0].elements.len() { 226 | let base = slot_initializer_relocs[offset_table.table_slot_offset + i]; 227 | let elem = &mut executable[base .. base + 8]; 228 | 229 | // On little endian systems this is the lower 32 bits of a 64-bit value. 230 | let function_id = LittleEndian::read_u32(elem); 231 | if function_id != ::std::u32::MAX { 232 | //eprintln!("Relocating: {} -> {}", function_id, function_relocs[function_id as usize]); 233 | LittleEndian::write_u32(elem, function_relocs[function_id as usize] as u32); 234 | } 235 | } 236 | 237 | target_code.extend_from_slice(&executable); 238 | 239 | target_code 240 | } 241 | 242 | fn build_call(m: &Module, out: &mut Vec, target: usize) -> usize /* reloc */ { 243 | let tf: &Function = &m.functions[target]; 244 | let Type::Func(ref ty_args, ref ty_rets) = &m.types[tf.typeidx as usize]; 245 | 246 | // target 247 | out.push(TargetOp::I32Const as u8); 248 | let reloc_point = out.len(); 249 | write_u32(out, ::std::u32::MAX); 250 | 251 | // n_locals 252 | out.push(TargetOp::I32Const as u8); 253 | write_u32(out, tf.locals.len() as u32); 254 | 255 | out.push(TargetOp::Call as u8); 256 | write_u32(out, ty_args.len() as u32); 257 | 258 | reloc_point 259 | } 260 | 261 | fn translate_function(m: &Module, f: &Function, offset_table: &OffsetTable, mni: &mut MapNativeInvoke) -> TargetFunction { 262 | let mut result: Vec = Vec::new(); 263 | let mut relocs: Vec = Vec::new(); 264 | let opcodes = &f.body.opcodes; 265 | let mut opcode_relocs: Vec = Vec::with_capacity(opcodes.len()); 266 | 267 | for op in opcodes { 268 | opcode_relocs.push(result.len()); 269 | match *op { 270 | Opcode::Drop => { 271 | result.push(TargetOp::Drop as u8); 272 | }, 273 | Opcode::Select => { 274 | result.push(TargetOp::Select as u8); 275 | }, 276 | Opcode::Call(target) => { 277 | let reloc_point = build_call(m, &mut result, target as usize); 278 | relocs.push(Reloc { 279 | code_loc: reloc_point, 280 | ty: RelocType::Function(target as usize) 281 | }); 282 | }, 283 | Opcode::CallIndirect(target_ty) => { 284 | let Type::Func(ref ty_args, ref ty_rets) = &m.types[target_ty as usize]; 285 | 286 | // We've got the index into table at stack top. 287 | result.push(TargetOp::I32Const as u8); 288 | write_u32(&mut result, offset_table.table_slot_offset as u32); 289 | result.push(TargetOp::I32Add as u8); 290 | result.push(TargetOp::GetSlotIndirect as u8); 291 | // (slot_value) now 292 | 293 | result.push(TargetOp::Dup as u8); 294 | result.push(TargetOp::I64Const as u8); 295 | write_u64(&mut result, 0xffffffffu64); 296 | result.push(TargetOp::I64And as u8); // Take the lower 32 bits (target) 297 | // (slot_value, target) now 298 | 299 | result.push(TargetOp::Swap2 as u8); 300 | result.push(TargetOp::I64Const as u8); 301 | write_u64(&mut result, 0xffffffffu64 << 32); // Take the upper 32 bits (n_locals) 302 | result.push(TargetOp::I64And as u8); 303 | result.push(TargetOp::I64Const as u8); 304 | write_u64(&mut result, 32); 305 | result.push(TargetOp::I64ShrU as u8); 306 | // (target, n_locals) now 307 | 308 | result.push(TargetOp::Call as u8); 309 | write_u32(&mut result, ty_args.len() as u32); 310 | }, 311 | Opcode::Return => { 312 | result.push(TargetOp::Return as u8); 313 | }, 314 | Opcode::Nop => {}, 315 | Opcode::Unreachable => result.push(TargetOp::Unreachable as u8), 316 | Opcode::GetLocal(id) => { 317 | result.push(TargetOp::GetLocal as u8); 318 | write_u32(&mut result, id); 319 | }, 320 | Opcode::SetLocal(id) => { 321 | result.push(TargetOp::SetLocal as u8); 322 | write_u32(&mut result, id); 323 | }, 324 | Opcode::TeeLocal(id) => { 325 | result.push(TargetOp::TeeLocal as u8); 326 | write_u32(&mut result, id); 327 | }, 328 | Opcode::GetGlobal(id) => { 329 | result.push(TargetOp::GetSlot as u8); 330 | write_u32(&mut result, offset_table.globals_slot_offset as u32 + id); 331 | }, 332 | Opcode::SetGlobal(id) => { 333 | result.push(TargetOp::SetSlot as u8); 334 | write_u32(&mut result, offset_table.globals_slot_offset as u32 + id); 335 | }, 336 | Opcode::Jmp(loc) => { 337 | result.push(TargetOp::Jmp as u8); 338 | relocs.push(Reloc { 339 | code_loc: result.len(), 340 | ty: RelocType::LocalJmp(loc as usize) 341 | }); 342 | write_u32(&mut result, ::std::u32::MAX); 343 | }, 344 | Opcode::JmpIf(loc) => { 345 | result.push(TargetOp::JmpIf as u8); 346 | relocs.push(Reloc { 347 | code_loc: result.len(), 348 | ty: RelocType::LocalJmp(loc as usize) 349 | }); 350 | write_u32(&mut result, ::std::u32::MAX); 351 | }, 352 | Opcode::JmpEither(loc_a, loc_b) => { 353 | result.push(TargetOp::JmpEither as u8); 354 | relocs.push(Reloc { 355 | code_loc: result.len(), 356 | ty: RelocType::LocalJmp(loc_a as usize) 357 | }); 358 | write_u32(&mut result, ::std::u32::MAX); 359 | relocs.push(Reloc { 360 | code_loc: result.len(), 361 | ty: RelocType::LocalJmp(loc_b as usize) 362 | }); 363 | write_u32(&mut result, ::std::u32::MAX); 364 | }, 365 | Opcode::JmpTable(ref targets, otherwise) => { 366 | result.push(TargetOp::JmpTable as u8); 367 | relocs.push(Reloc { 368 | code_loc: result.len(), 369 | ty: RelocType::LocalJmp(otherwise as usize) 370 | }); 371 | write_u32(&mut result, ::std::u32::MAX); 372 | 373 | write_u32(&mut result, targets.len() as u32); 374 | for t in targets { 375 | relocs.push(Reloc { 376 | code_loc: result.len(), 377 | ty: RelocType::LocalJmp(*t as usize) 378 | }); 379 | write_u32(&mut result, ::std::u32::MAX); 380 | } 381 | }, 382 | Opcode::CurrentMemory => { 383 | // [current_memory] / 65536 = n_pages 384 | result.push(TargetOp::CurrentMemory as u8); 385 | result.push(TargetOp::I32Const as u8); 386 | write_u32(&mut result, 65536 as u32); 387 | result.push(TargetOp::I32DivU as u8); 388 | }, 389 | Opcode::GrowMemory => { 390 | // len_inc = n_pages * 65536 391 | result.push(TargetOp::I32Const as u8); 392 | write_u32(&mut result, 65536 as u32); 393 | result.push(TargetOp::I32Mul as u8); 394 | 395 | result.push(TargetOp::GrowMemory as u8); 396 | 397 | // [current_memory] / 65536 = n_pages 398 | result.push(TargetOp::I32Const as u8); 399 | write_u32(&mut result, 65536 as u32); 400 | result.push(TargetOp::I32DivU as u8); 401 | }, 402 | Opcode::I32Const(v) => { 403 | result.push(TargetOp::I32Const as u8); 404 | write_u32(&mut result, v as u32); 405 | }, 406 | Opcode::I32Clz => result.push(TargetOp::I32Clz as u8), 407 | Opcode::I32Ctz => result.push(TargetOp::I32Ctz as u8), 408 | Opcode::I32Popcnt => result.push(TargetOp::I32Popcnt as u8), 409 | Opcode::I32Add => result.push(TargetOp::I32Add as u8), 410 | Opcode::I32Sub => result.push(TargetOp::I32Sub as u8), 411 | Opcode::I32Mul => result.push(TargetOp::I32Mul as u8), 412 | Opcode::I32DivU => result.push(TargetOp::I32DivU as u8), 413 | Opcode::I32DivS => result.push(TargetOp::I32DivS as u8), 414 | Opcode::I32RemU => result.push(TargetOp::I32RemU as u8), 415 | Opcode::I32RemS => result.push(TargetOp::I32RemS as u8), 416 | Opcode::I32And => result.push(TargetOp::I32And as u8), 417 | Opcode::I32Or => result.push(TargetOp::I32Or as u8), 418 | Opcode::I32Xor => result.push(TargetOp::I32Xor as u8), 419 | Opcode::I32Shl => result.push(TargetOp::I32Shl as u8), 420 | Opcode::I32ShrU => result.push(TargetOp::I32ShrU as u8), 421 | Opcode::I32ShrS => result.push(TargetOp::I32ShrS as u8), 422 | Opcode::I32Rotl => result.push(TargetOp::I32Rotl as u8), 423 | Opcode::I32Rotr => result.push(TargetOp::I32Rotr as u8), 424 | Opcode::I32Eqz => { 425 | result.push(TargetOp::I32Const as u8); 426 | write_u32(&mut result, 0); 427 | result.push(TargetOp::I32Eq as u8); 428 | }, 429 | Opcode::I32Eq => result.push(TargetOp::I32Eq as u8), 430 | Opcode::I32Ne => result.push(TargetOp::I32Ne as u8), 431 | Opcode::I32LtU => result.push(TargetOp::I32LtU as u8), 432 | Opcode::I32LtS => result.push(TargetOp::I32LtS as u8), 433 | Opcode::I32LeU => result.push(TargetOp::I32LeU as u8), 434 | Opcode::I32LeS => result.push(TargetOp::I32LeS as u8), 435 | Opcode::I32GtU => result.push(TargetOp::I32GtU as u8), 436 | Opcode::I32GtS => result.push(TargetOp::I32GtS as u8), 437 | Opcode::I32GeU => result.push(TargetOp::I32GeU as u8), 438 | Opcode::I32GeS => result.push(TargetOp::I32GeS as u8), 439 | Opcode::I32WrapI64 => result.push(TargetOp::I32WrapI64 as u8), 440 | Opcode::I32Load(Memarg { offset, align }) => { 441 | result.push(TargetOp::I32Load as u8); 442 | write_u32(&mut result, offset); 443 | }, 444 | Opcode::I32Load8U(Memarg { offset, align }) => { 445 | result.push(TargetOp::I32Load8U as u8); 446 | write_u32(&mut result, offset); 447 | }, 448 | Opcode::I32Load8S(Memarg { offset, align }) => { 449 | result.push(TargetOp::I32Load8S as u8); 450 | write_u32(&mut result, offset); 451 | }, 452 | Opcode::I32Load16U(Memarg { offset, align }) => { 453 | result.push(TargetOp::I32Load16U as u8); 454 | write_u32(&mut result, offset); 455 | }, 456 | Opcode::I32Load16S(Memarg { offset, align }) => { 457 | result.push(TargetOp::I32Load16S as u8); 458 | write_u32(&mut result, offset); 459 | }, 460 | Opcode::I32Store(Memarg { offset, align }) => { 461 | result.push(TargetOp::I32Store as u8); 462 | write_u32(&mut result, offset); 463 | }, 464 | Opcode::I32Store8(Memarg { offset, align }) => { 465 | result.push(TargetOp::I32Store8 as u8); 466 | write_u32(&mut result, offset); 467 | }, 468 | Opcode::I32Store16(Memarg { offset, align }) => { 469 | result.push(TargetOp::I32Store16 as u8); 470 | write_u32(&mut result, offset); 471 | }, 472 | Opcode::I64Const(v) => { 473 | result.push(TargetOp::I64Const as u8); 474 | write_u64(&mut result, v as u64); 475 | }, 476 | Opcode::I64Clz => result.push(TargetOp::I64Clz as u8), 477 | Opcode::I64Ctz => result.push(TargetOp::I64Ctz as u8), 478 | Opcode::I64Popcnt => result.push(TargetOp::I64Popcnt as u8), 479 | Opcode::I64Add => result.push(TargetOp::I64Add as u8), 480 | Opcode::I64Sub => result.push(TargetOp::I64Sub as u8), 481 | Opcode::I64Mul => result.push(TargetOp::I64Mul as u8), 482 | Opcode::I64DivU => result.push(TargetOp::I64DivU as u8), 483 | Opcode::I64DivS => result.push(TargetOp::I64DivS as u8), 484 | Opcode::I64RemU => result.push(TargetOp::I64RemU as u8), 485 | Opcode::I64RemS => result.push(TargetOp::I64RemS as u8), 486 | Opcode::I64And => result.push(TargetOp::I64And as u8), 487 | Opcode::I64Or => result.push(TargetOp::I64Or as u8), 488 | Opcode::I64Xor => result.push(TargetOp::I64Xor as u8), 489 | Opcode::I64Shl => result.push(TargetOp::I64Shl as u8), 490 | Opcode::I64ShrU => result.push(TargetOp::I64ShrU as u8), 491 | Opcode::I64ShrS => result.push(TargetOp::I64ShrS as u8), 492 | Opcode::I64Rotl => result.push(TargetOp::I64Rotl as u8), 493 | Opcode::I64Rotr => result.push(TargetOp::I64Rotr as u8), 494 | Opcode::I64Eqz => { 495 | result.push(TargetOp::I64Const as u8); 496 | write_u64(&mut result, 0); 497 | result.push(TargetOp::I64Eq as u8); 498 | }, 499 | Opcode::I64Eq => result.push(TargetOp::I64Eq as u8), 500 | Opcode::I64Ne => result.push(TargetOp::I64Ne as u8), 501 | Opcode::I64LtU => result.push(TargetOp::I64LtU as u8), 502 | Opcode::I64LtS => result.push(TargetOp::I64LtS as u8), 503 | Opcode::I64LeU => result.push(TargetOp::I64LeU as u8), 504 | Opcode::I64LeS => result.push(TargetOp::I64LeS as u8), 505 | Opcode::I64GtU => result.push(TargetOp::I64GtU as u8), 506 | Opcode::I64GtS => result.push(TargetOp::I64GtS as u8), 507 | Opcode::I64GeU => result.push(TargetOp::I64GeU as u8), 508 | Opcode::I64GeS => result.push(TargetOp::I64GeS as u8), 509 | Opcode::I64ExtendI32U => result.push(TargetOp::I64ExtendI32U as u8), 510 | Opcode::I64ExtendI32S => result.push(TargetOp::I64ExtendI32S as u8), 511 | Opcode::I64Load(Memarg { offset, align }) => { 512 | result.push(TargetOp::I64Load as u8); 513 | write_u32(&mut result, offset); 514 | }, 515 | Opcode::I64Load8U(Memarg { offset, align }) => { 516 | result.push(TargetOp::I64Load8U as u8); 517 | write_u32(&mut result, offset); 518 | }, 519 | Opcode::I64Load8S(Memarg { offset, align }) => { 520 | result.push(TargetOp::I64Load8S as u8); 521 | write_u32(&mut result, offset); 522 | }, 523 | Opcode::I64Load16U(Memarg { offset, align }) => { 524 | result.push(TargetOp::I64Load16U as u8); 525 | write_u32(&mut result, offset); 526 | }, 527 | Opcode::I64Load16S(Memarg { offset, align }) => { 528 | result.push(TargetOp::I64Load16S as u8); 529 | write_u32(&mut result, offset); 530 | }, 531 | Opcode::I64Load32U(Memarg { offset, align }) => { 532 | result.push(TargetOp::I64Load32U as u8); 533 | write_u32(&mut result, offset); 534 | }, 535 | Opcode::I64Load32S(Memarg { offset, align }) => { 536 | result.push(TargetOp::I64Load32S as u8); 537 | write_u32(&mut result, offset); 538 | }, 539 | Opcode::I64Store(Memarg { offset, align }) => { 540 | result.push(TargetOp::I64Store as u8); 541 | write_u32(&mut result, offset); 542 | }, 543 | Opcode::I64Store8(Memarg { offset, align }) => { 544 | result.push(TargetOp::I64Store8 as u8); 545 | write_u32(&mut result, offset); 546 | }, 547 | Opcode::I64Store16(Memarg { offset, align }) => { 548 | result.push(TargetOp::I64Store16 as u8); 549 | write_u32(&mut result, offset); 550 | }, 551 | Opcode::I64Store32(Memarg { offset, align }) => { 552 | result.push(TargetOp::I64Store32 as u8); 553 | write_u32(&mut result, offset); 554 | }, 555 | Opcode::F32Const(v) => { 556 | result.push(TargetOp::I32Const as u8); 557 | write_u32(&mut result, v as u32); 558 | }, 559 | Opcode::F64Const(v) => { 560 | result.push(TargetOp::I64Const as u8); 561 | write_u64(&mut result, v as u64); 562 | }, 563 | Opcode::F32ReinterpretI32 | Opcode::I32ReinterpretF32 564 | | Opcode::F64ReinterpretI64 | Opcode::I64ReinterpretF64 => {}, 565 | Opcode::NativeInvoke(id) => { 566 | let native = &m.natives[id as usize]; 567 | 568 | result.push(TargetOp::NativeInvoke as u8); 569 | write_u32( 570 | &mut result, 571 | if let Some(ni_id) = mni.map_native_invoke(&native.module, &native.field) { 572 | ni_id 573 | } else { 574 | if native.module != "hexagon_e" { 575 | panic!("NativeInvoke with a module other than `hexagon_e` is not supported. Got: {}", native.module); 576 | } 577 | 578 | if !native.field.starts_with("syscall_") { 579 | panic!("Invalid NativeInvoke field prefix; Expecting `syscall_`"); 580 | } 581 | 582 | let ni_id: u32 = native.field.splitn(2, "_").nth(1).unwrap().parse().unwrap_or_else(|_| { 583 | panic!("Unable to parse NativeInvoke id"); 584 | }); 585 | 586 | ni_id 587 | } 588 | ); 589 | }, 590 | _ => { 591 | if cfg!(feature = "debug") { 592 | eprintln!("Not implemented: {:?}", op); 593 | } 594 | result.push(TargetOp::NotSupported as u8); 595 | } 596 | } 597 | } 598 | 599 | TargetFunction { 600 | code: result, 601 | opcode_relocs: opcode_relocs, 602 | generic_relocs: relocs 603 | } 604 | } 605 | 606 | fn write_initializers(dss: &[DataSegment], target: &mut Vec) -> Vec /* code relocs */ { 607 | let mut relocs: Vec = Vec::with_capacity(dss.len()); 608 | 609 | assert_eq!(target.len(), 0); 610 | 611 | // placeholder 612 | write_u32(target, ::std::u32::MAX); 613 | 614 | let initial_len = target.len(); // 4 615 | 616 | // (addr, len, data) 617 | for ds in dss { 618 | write_u32(target, ds.offset); 619 | write_u32(target, ds.data.len() as u32); 620 | relocs.push(target.len()); 621 | target.extend_from_slice(&ds.data); 622 | } 623 | 624 | let actual_len = target.len() - initial_len; 625 | LittleEndian::write_u32(&mut target[0..4], actual_len as u32); 626 | 627 | relocs 628 | } 629 | 630 | // DataSegment with target offsets. 631 | fn build_initializers(m: &Module) -> (Vec, Vec, OffsetTable) { 632 | let mut slot_values: Vec = Vec::new(); 633 | 634 | let wasm_table = &m.tables[0]; 635 | let wasm_globals = &m.globals; 636 | 637 | let wasm_table_offset: usize = 0; 638 | for elem in &wasm_table.elements { 639 | let elem = elem.unwrap_or(::std::u32::MAX); 640 | let n_locals = if (elem as usize) < m.functions.len() { 641 | m.functions[elem as usize].locals.len() as u32 642 | } else { 643 | ::std::u32::MAX 644 | }; 645 | 646 | slot_values.push( 647 | (((n_locals as u64) << 32) | (elem as u64)) as i64 648 | ); 649 | } 650 | 651 | let wasm_globals_offset: usize = slot_values.len(); 652 | for g in wasm_globals { 653 | let val = g.value.reinterpret_as_i64(); 654 | slot_values.push(val); 655 | } 656 | 657 | (m.data_segments.clone(), slot_values, OffsetTable { 658 | table_slot_offset: wasm_table_offset, 659 | globals_slot_offset: wasm_globals_offset 660 | }) 661 | } 662 | 663 | fn write_u32(target: &mut Vec, val: u32) { 664 | let val = unsafe { ::std::mem::transmute::(val) }; 665 | target.extend_from_slice(&val); 666 | } 667 | 668 | fn write_u64(target: &mut Vec, val: u64) { 669 | let val = unsafe { ::std::mem::transmute::(val) }; 670 | target.extend_from_slice(&val); 671 | } 672 | -------------------------------------------------------------------------------- /src/int_ops.rs: -------------------------------------------------------------------------------- 1 | use prelude::intrinsics; 2 | use value::Value; 3 | use opcode::Memarg; 4 | use executor::Memory; 5 | use executor::{ExecuteResult, ExecuteError}; 6 | 7 | #[inline] 8 | pub fn i32_clz(v: i32) -> Value { 9 | Value::I32(unsafe { 10 | intrinsics::ctlz(v) 11 | }) 12 | } 13 | 14 | #[inline] 15 | pub fn i32_ctz(v: i32) -> Value { 16 | Value::I32(unsafe { 17 | intrinsics::cttz(v) 18 | }) 19 | } 20 | 21 | #[inline] 22 | pub fn i32_popcnt(v: i32) -> Value { 23 | Value::I32(unsafe { 24 | intrinsics::ctpop(v) 25 | }) 26 | } 27 | 28 | #[inline] 29 | pub fn i32_add(a: i32, b: i32) -> Value { 30 | Value::I32(a.wrapping_add(b)) 31 | } 32 | 33 | #[inline] 34 | pub fn i32_sub(a: i32, b: i32) -> Value { 35 | Value::I32(a.wrapping_sub(b)) 36 | } 37 | 38 | #[inline] 39 | pub fn i32_mul(a: i32, b: i32) -> Value { 40 | Value::I32(a.wrapping_mul(b)) 41 | } 42 | 43 | #[inline] 44 | pub fn i32_div_u(a: i32, b: i32) -> Value { 45 | Value::I32((a as u32).wrapping_div(b as u32) as i32) 46 | } 47 | 48 | #[inline] 49 | pub fn i32_div_s(a: i32, b: i32) -> Value { 50 | Value::I32(a.wrapping_div(b)) 51 | } 52 | 53 | #[inline] 54 | pub fn i32_rem_u(a: i32, b: i32) -> Value { 55 | Value::I32((a as u32).wrapping_rem(b as u32) as i32) 56 | } 57 | 58 | #[inline] 59 | pub fn i32_rem_s(a: i32, b: i32) -> Value { 60 | Value::I32(a.wrapping_rem(b)) 61 | } 62 | 63 | #[inline] 64 | pub fn i32_and(a: i32, b: i32) -> Value { 65 | Value::I32(a & b) 66 | } 67 | 68 | #[inline] 69 | pub fn i32_or(a: i32, b: i32) -> Value { 70 | Value::I32(a | b) 71 | } 72 | 73 | #[inline] 74 | pub fn i32_xor(a: i32, b: i32) -> Value { 75 | Value::I32(a ^ b) 76 | } 77 | 78 | #[inline] 79 | pub fn i32_shl(a: i32, b: i32) -> Value { 80 | Value::I32(a.wrapping_shl((b as u32) & 31)) 81 | } 82 | 83 | #[inline] 84 | pub fn i32_shr_u(a: i32, b: i32) -> Value { 85 | Value::I32(((a as u32).wrapping_shr((b as u32) & 31)) as i32) 86 | } 87 | 88 | #[inline] 89 | pub fn i32_shr_s(a: i32, b: i32) -> Value { 90 | Value::I32(a.wrapping_shr((b as u32) & 31)) 91 | } 92 | 93 | #[inline] 94 | pub fn i32_rotl(a: i32, b: i32) -> Value { 95 | Value::I32(a.rotate_left(b as u32)) 96 | } 97 | 98 | #[inline] 99 | pub fn i32_rotr(a: i32, b: i32) -> Value { 100 | Value::I32(a.rotate_right(b as u32)) 101 | } 102 | 103 | #[inline] 104 | pub fn i32_eqz(v: i32) -> Value { 105 | if v == 0 { 106 | Value::I32(1) 107 | } else { 108 | Value::I32(0) 109 | } 110 | } 111 | 112 | #[inline] 113 | pub fn i32_eq(a: i32, b: i32) -> Value { 114 | if a == b { 115 | Value::I32(1) 116 | } else { 117 | Value::I32(0) 118 | } 119 | } 120 | 121 | #[inline] 122 | pub fn i32_ne(a: i32, b: i32) -> Value { 123 | if a == b { 124 | Value::I32(0) 125 | } else { 126 | Value::I32(1) 127 | } 128 | } 129 | 130 | #[inline] 131 | pub fn i32_lt_u(a: i32, b: i32) -> Value { 132 | if (a as u32) < (b as u32) { 133 | Value::I32(1) 134 | } else { 135 | Value::I32(0) 136 | } 137 | } 138 | 139 | #[inline] 140 | pub fn i32_lt_s(a: i32, b: i32) -> Value { 141 | if a < b { 142 | Value::I32(1) 143 | } else { 144 | Value::I32(0) 145 | } 146 | } 147 | 148 | #[inline] 149 | pub fn i32_le_u(a: i32, b: i32) -> Value { 150 | if (a as u32) <= (b as u32) { 151 | Value::I32(1) 152 | } else { 153 | Value::I32(0) 154 | } 155 | } 156 | 157 | #[inline] 158 | pub fn i32_le_s(a: i32, b: i32) -> Value { 159 | if a <= b { 160 | Value::I32(1) 161 | } else { 162 | Value::I32(0) 163 | } 164 | } 165 | 166 | #[inline] 167 | pub fn i32_gt_u(a: i32, b: i32) -> Value { 168 | if (a as u32) > (b as u32) { 169 | Value::I32(1) 170 | } else { 171 | Value::I32(0) 172 | } 173 | } 174 | 175 | #[inline] 176 | pub fn i32_gt_s(a: i32, b: i32) -> Value { 177 | if a > b { 178 | Value::I32(1) 179 | } else { 180 | Value::I32(0) 181 | } 182 | } 183 | 184 | #[inline] 185 | pub fn i32_ge_u(a: i32, b: i32) -> Value { 186 | if (a as u32) >= (b as u32) { 187 | Value::I32(1) 188 | } else { 189 | Value::I32(0) 190 | } 191 | } 192 | 193 | #[inline] 194 | pub fn i32_ge_s(a: i32, b: i32) -> Value { 195 | if a >= b { 196 | Value::I32(1) 197 | } else { 198 | Value::I32(0) 199 | } 200 | } 201 | 202 | #[inline] 203 | pub fn i32_wrap_i64(a: i64) -> Value { 204 | Value::I32(a as i32) 205 | } 206 | 207 | unsafe trait LoadStore: Copy + Sized {} 208 | unsafe impl LoadStore for i32 {} 209 | unsafe impl LoadStore for i64 {} 210 | 211 | #[inline] 212 | fn load_from_mem(index: u32, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult { 213 | let n = n as usize; 214 | 215 | let t_size = ::prelude::mem::size_of::(); 216 | if n > t_size { 217 | return Err(ExecuteError::InvalidMemoryOperation); 218 | } 219 | 220 | let data: &[u8] = storage.data.as_slice(); 221 | 222 | let ea = (index + m.offset) as usize; 223 | if ea + n > data.len() { 224 | return Err(ExecuteError::AddrOutOfBound(ea as u32, n as u32)); 225 | } 226 | 227 | // n <= sizeof(T) holds here so we can copy safely. 228 | unsafe { 229 | let mut result: T = ::prelude::mem::zeroed(); 230 | ::prelude::ptr::copy( 231 | &data[ea] as *const u8, 232 | &mut result as *mut T as *mut u8, 233 | n 234 | ); 235 | 236 | Ok(result) 237 | } 238 | } 239 | 240 | #[inline] 241 | fn store_to_mem(index: u32, val: T, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<()> { 242 | let n = n as usize; 243 | 244 | let t_size = ::prelude::mem::size_of::(); 245 | if n > t_size { 246 | return Err(ExecuteError::InvalidMemoryOperation); 247 | } 248 | 249 | let data: &mut [u8] = storage.data.as_mut_slice(); 250 | 251 | let ea = (index + m.offset) as usize; 252 | 253 | // this will not overflow because all of index, m.offset 254 | // and n is in the range of u32. 255 | if ea + n > data.len() { 256 | return Err(ExecuteError::AddrOutOfBound(ea as u32, n as u32)); 257 | } 258 | 259 | // ea + n <= data.len() && n <= sizeof(T) holds here so we can copy safely. 260 | unsafe { 261 | ::prelude::ptr::copy( 262 | &val as *const T as *const u8, 263 | &mut data[ea] as *mut u8, 264 | n 265 | ); 266 | } 267 | 268 | Ok(()) 269 | } 270 | 271 | #[inline] 272 | fn unsigned_loaded_i32_to_signed(v: i32, n: u32) -> i32 { 273 | match n { 274 | 1 => (v as u32) as u8 as i8 as i32, 275 | 2 => (v as u32) as u16 as i16 as i32, 276 | _ => v 277 | } 278 | } 279 | 280 | #[inline] 281 | fn unsigned_loaded_i64_to_signed(v: i64, n: u32) -> i64 { 282 | match n { 283 | 1 => (v as u64) as u8 as i8 as i64, 284 | 2 => (v as u64) as u16 as i16 as i64, 285 | 4 => (v as u64) as u32 as i32 as i64, 286 | _ => v 287 | } 288 | } 289 | 290 | #[cfg(test)] 291 | #[test] 292 | fn test_unsigned_loaded_to_signed() { 293 | assert_eq!(unsigned_loaded_i32_to_signed(0b11111011, 1), -5); 294 | assert_eq!(unsigned_loaded_i64_to_signed(0b11111011, 1), -5); 295 | } 296 | 297 | #[inline] 298 | pub fn i32_load_s(index: u32, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult { 299 | let v: i32 = load_from_mem(index, m, storage, n)?; 300 | Ok(Value::I32(unsigned_loaded_i32_to_signed(v, n))) 301 | } 302 | 303 | #[inline] 304 | pub fn i32_load_u(index: u32, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult { 305 | Ok(Value::I32(load_from_mem(index, m, storage, n)?)) 306 | } 307 | 308 | #[inline] 309 | pub fn i32_store(index: u32, val: Value, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<()> { 310 | store_to_mem(index, val.get_i32()?, m, storage, n) 311 | } 312 | 313 | #[inline] 314 | pub fn i64_clz(v: i64) -> Value { 315 | Value::I64(unsafe { 316 | intrinsics::ctlz(v) 317 | }) 318 | } 319 | 320 | #[inline] 321 | pub fn i64_ctz(v: i64) -> Value { 322 | Value::I64(unsafe { 323 | intrinsics::cttz(v) 324 | }) 325 | } 326 | 327 | #[inline] 328 | pub fn i64_popcnt(v: i64) -> Value { 329 | Value::I64(unsafe { 330 | intrinsics::ctpop(v) 331 | }) 332 | } 333 | 334 | #[inline] 335 | pub fn i64_add(a: i64, b: i64) -> Value { 336 | Value::I64(a.wrapping_add(b)) 337 | } 338 | 339 | #[inline] 340 | pub fn i64_sub(a: i64, b: i64) -> Value { 341 | Value::I64(a.wrapping_sub(b)) 342 | } 343 | 344 | #[inline] 345 | pub fn i64_mul(a: i64, b: i64) -> Value { 346 | Value::I64(a.wrapping_mul(b)) 347 | } 348 | 349 | #[inline] 350 | pub fn i64_div_u(a: i64, b: i64) -> Value { 351 | Value::I64((a as u64).wrapping_div(b as u64) as i64) 352 | } 353 | 354 | #[inline] 355 | pub fn i64_div_s(a: i64, b: i64) -> Value { 356 | Value::I64(a.wrapping_div(b)) 357 | } 358 | 359 | #[inline] 360 | pub fn i64_rem_u(a: i64, b: i64) -> Value { 361 | Value::I64((a as u64).wrapping_rem(b as u64) as i64) 362 | } 363 | 364 | #[inline] 365 | pub fn i64_rem_s(a: i64, b: i64) -> Value { 366 | Value::I64(a.wrapping_rem(b)) 367 | } 368 | 369 | #[inline] 370 | pub fn i64_and(a: i64, b: i64) -> Value { 371 | Value::I64(a & b) 372 | } 373 | 374 | #[inline] 375 | pub fn i64_or(a: i64, b: i64) -> Value { 376 | Value::I64(a | b) 377 | } 378 | 379 | #[inline] 380 | pub fn i64_xor(a: i64, b: i64) -> Value { 381 | Value::I64(a ^ b) 382 | } 383 | 384 | #[inline] 385 | pub fn i64_shl(a: i64, b: i64) -> Value { 386 | Value::I64(a.wrapping_shl(b as u32)) 387 | } 388 | 389 | #[inline] 390 | pub fn i64_shr_u(a: i64, b: i64) -> Value { 391 | Value::I64(((a as u64).wrapping_shr(b as u32)) as i64) 392 | } 393 | 394 | #[inline] 395 | pub fn i64_shr_s(a: i64, b: i64) -> Value { 396 | Value::I64(a.wrapping_shr(b as u32)) 397 | } 398 | 399 | #[inline] 400 | pub fn i64_rotl(a: i64, b: i64) -> Value { 401 | Value::I64(a.rotate_left(b as u32)) 402 | } 403 | 404 | #[inline] 405 | pub fn i64_rotr(a: i64, b: i64) -> Value { 406 | Value::I64(a.rotate_right(b as u32)) 407 | } 408 | 409 | #[inline] 410 | pub fn i64_eqz(v: i64) -> Value { 411 | if v == 0 { 412 | Value::I32(1) 413 | } else { 414 | Value::I32(0) 415 | } 416 | } 417 | 418 | #[inline] 419 | pub fn i64_eq(a: i64, b: i64) -> Value { 420 | if a == b { 421 | Value::I32(1) 422 | } else { 423 | Value::I32(0) 424 | } 425 | } 426 | 427 | #[inline] 428 | pub fn i64_ne(a: i64, b: i64) -> Value { 429 | if a == b { 430 | Value::I32(0) 431 | } else { 432 | Value::I32(1) 433 | } 434 | } 435 | 436 | #[inline] 437 | pub fn i64_lt_u(a: i64, b: i64) -> Value { 438 | if (a as u64) < (b as u64) { 439 | Value::I32(1) 440 | } else { 441 | Value::I32(0) 442 | } 443 | } 444 | 445 | #[inline] 446 | pub fn i64_lt_s(a: i64, b: i64) -> Value { 447 | if a < b { 448 | Value::I32(1) 449 | } else { 450 | Value::I32(0) 451 | } 452 | } 453 | 454 | #[inline] 455 | pub fn i64_le_u(a: i64, b: i64) -> Value { 456 | if (a as u64) <= (b as u64) { 457 | Value::I32(1) 458 | } else { 459 | Value::I32(0) 460 | } 461 | } 462 | 463 | #[inline] 464 | pub fn i64_le_s(a: i64, b: i64) -> Value { 465 | if a <= b { 466 | Value::I32(1) 467 | } else { 468 | Value::I32(0) 469 | } 470 | } 471 | 472 | #[inline] 473 | pub fn i64_gt_u(a: i64, b: i64) -> Value { 474 | if (a as u64) > (b as u64) { 475 | Value::I32(1) 476 | } else { 477 | Value::I32(0) 478 | } 479 | } 480 | 481 | #[inline] 482 | pub fn i64_gt_s(a: i64, b: i64) -> Value { 483 | if a > b { 484 | Value::I32(1) 485 | } else { 486 | Value::I32(0) 487 | } 488 | } 489 | 490 | #[inline] 491 | pub fn i64_ge_u(a: i64, b: i64) -> Value { 492 | if (a as u64) >= (b as u64) { 493 | Value::I32(1) 494 | } else { 495 | Value::I32(0) 496 | } 497 | } 498 | 499 | #[inline] 500 | pub fn i64_ge_s(a: i64, b: i64) -> Value { 501 | if a >= b { 502 | Value::I32(1) 503 | } else { 504 | Value::I32(0) 505 | } 506 | } 507 | 508 | #[inline] 509 | pub fn i64_extend_i32_u(v: i32) -> Value { 510 | // FIXME: Is this correct? 511 | Value::I64((v as i64) & 0x00000000ffffffffi64) 512 | } 513 | 514 | #[inline] 515 | pub fn i64_extend_i32_s(v: i32) -> Value { 516 | Value::I64(v as i64) 517 | } 518 | 519 | #[inline] 520 | pub fn i64_load_s(index: u32, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult { 521 | let v: i64 = load_from_mem(index, m, storage, n)?; 522 | Ok(Value::I64(unsigned_loaded_i64_to_signed(v, n))) 523 | } 524 | 525 | #[inline] 526 | pub fn i64_load_u(index: u32, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult { 527 | Ok(Value::I64(load_from_mem(index, m, storage, n)?)) 528 | } 529 | 530 | #[inline] 531 | pub fn i64_store(index: u32, val: Value, m: &Memarg, storage: &mut Memory, n: u32) -> ExecuteResult<()> { 532 | store_to_mem(index, val.get_i64()?, m, storage, n) 533 | } 534 | -------------------------------------------------------------------------------- /src/jit/llvm.rs: -------------------------------------------------------------------------------- 1 | use llvm_sys; 2 | use llvm_sys::prelude::*; 3 | use llvm_sys::core::*; 4 | use llvm_sys::execution_engine::*; 5 | use llvm_sys::target::*; 6 | use llvm_sys::analysis::*; 7 | use llvm_sys::orc::*; 8 | use llvm_sys::target_machine::*; 9 | use llvm_sys::transforms::pass_manager_builder::*; 10 | use llvm_sys::{ 11 | LLVMIntPredicate, 12 | LLVMRealPredicate, 13 | LLVMLinkage 14 | }; 15 | use std::rc::Rc; 16 | use std::cell::Cell; 17 | use std::ffi::{CStr, CString}; 18 | use std::os::raw::{c_char, c_void}; 19 | 20 | pub use llvm_sys::LLVMOpcode; 21 | pub use llvm_sys::LLVMIntPredicate::*; 22 | pub use llvm_sys::LLVMRealPredicate::*; 23 | pub use llvm_sys::prelude::LLVMValueRef; 24 | pub use llvm_sys::LLVMLinkage::*; 25 | 26 | fn empty_cstr() -> *const c_char { 27 | b"\0".as_ptr() as _ 28 | } 29 | 30 | lazy_static! { 31 | static ref LLVM_EXEC: bool = { 32 | unsafe { 33 | LLVMLinkInMCJIT(); 34 | assert_eq!( 35 | LLVM_InitializeNativeTarget(), 36 | 0 37 | ); 38 | assert_eq!( 39 | LLVM_InitializeNativeAsmPrinter(), 40 | 0 41 | ); 42 | } 43 | true 44 | }; 45 | } 46 | 47 | #[derive(Clone)] 48 | pub struct Context { 49 | inner: Rc 50 | } 51 | 52 | pub struct ContextImpl { 53 | _ref: LLVMContextRef 54 | } 55 | 56 | impl Context { 57 | pub fn new() -> Context { 58 | Context { 59 | inner: Rc::new(ContextImpl { 60 | _ref: unsafe { LLVMContextCreate() } 61 | }) 62 | } 63 | } 64 | } 65 | 66 | impl Drop for ContextImpl { 67 | fn drop(&mut self) { 68 | unsafe { LLVMContextDispose(self._ref); } 69 | } 70 | } 71 | 72 | #[derive(Clone)] 73 | pub struct Module { 74 | inner: Rc 75 | } 76 | 77 | pub struct ModuleImpl { 78 | _context: Context, 79 | _ref: LLVMModuleRef, 80 | _ref_invalidated: Cell 81 | } 82 | 83 | impl Module { 84 | pub fn new(ctx: &Context, name: String) -> Module { 85 | let name = CString::new(name).unwrap(); 86 | 87 | Module { 88 | inner: Rc::new(ModuleImpl { 89 | _context: ctx.clone(), 90 | _ref: unsafe { LLVMModuleCreateWithNameInContext( 91 | name.as_ptr(), 92 | ctx.inner._ref 93 | ) }, 94 | _ref_invalidated: Cell::new(false) 95 | }) 96 | } 97 | } 98 | 99 | pub fn deep_clone(&self) -> Module { 100 | Module { 101 | inner: Rc::new(ModuleImpl { 102 | _context: self.inner._context.clone(), 103 | _ref: unsafe { LLVMCloneModule(self.inner._ref) }, 104 | _ref_invalidated: Cell::new(false) 105 | }) 106 | } 107 | } 108 | 109 | pub fn verify(&self) { 110 | unsafe { 111 | LLVMVerifyModule( 112 | self.inner._ref, 113 | LLVMVerifierFailureAction::LLVMAbortProcessAction, 114 | ::std::ptr::null_mut() 115 | ); 116 | } 117 | } 118 | 119 | pub fn inline_with_threshold(&self, threshold: usize) { 120 | unsafe { 121 | let pm = LLVMCreatePassManager(); 122 | 123 | { 124 | let pmb = LLVMPassManagerBuilderCreate(); 125 | 126 | LLVMPassManagerBuilderUseInlinerWithThreshold(pmb, threshold as _); 127 | LLVMPassManagerBuilderPopulateModulePassManager(pmb, pm); 128 | 129 | LLVMPassManagerBuilderDispose(pmb); 130 | } 131 | 132 | LLVMRunPassManager(pm, self.inner._ref); 133 | LLVMDisposePassManager(pm); 134 | } 135 | } 136 | 137 | pub fn optimize(&self) { 138 | unsafe { 139 | let pm = LLVMCreatePassManager(); 140 | 141 | { 142 | let pmb = LLVMPassManagerBuilderCreate(); 143 | 144 | LLVMPassManagerBuilderSetOptLevel(pmb, 2); 145 | LLVMPassManagerBuilderPopulateModulePassManager(pmb, pm); 146 | 147 | LLVMPassManagerBuilderDispose(pmb); 148 | } 149 | 150 | LLVMRunPassManager(pm, self.inner._ref); 151 | LLVMRunPassManager(pm, self.inner._ref); 152 | 153 | LLVMDisposePassManager(pm); 154 | } 155 | } 156 | } 157 | 158 | impl Drop for ModuleImpl { 159 | fn drop(&mut self) { 160 | unsafe { 161 | if !self._ref_invalidated.get() { 162 | LLVMDisposeModule(self._ref); 163 | } 164 | } 165 | } 166 | } 167 | 168 | pub struct ExecutionEngine { 169 | _context: Context, 170 | _ref: LLVMExecutionEngineRef, 171 | _module_ref: LLVMModuleRef 172 | } 173 | 174 | impl ExecutionEngine { 175 | pub fn new(m: Module) -> ExecutionEngine { 176 | Self::with_opt_level(m, 1) 177 | } 178 | 179 | pub fn with_opt_level(mut m: Module, opt_level: usize) -> ExecutionEngine { 180 | // Ensure that LLVM JIT has been initialized 181 | assert_eq!(*LLVM_EXEC, true); 182 | 183 | let m = Rc::try_unwrap(m.inner).unwrap_or_else(|_| { 184 | panic!("Attempting to create an execution engine from a module while there are still some strong references to it"); 185 | }); 186 | 187 | unsafe { 188 | LLVMVerifyModule( 189 | m._ref, 190 | LLVMVerifierFailureAction::LLVMAbortProcessAction, 191 | ::std::ptr::null_mut() 192 | ); 193 | } 194 | 195 | unsafe { 196 | let mut ee: LLVMExecutionEngineRef = ::std::mem::uninitialized(); 197 | let mut err: *mut c_char = ::std::ptr::null_mut(); 198 | 199 | let mut mcjit_opts: LLVMMCJITCompilerOptions = ::std::mem::uninitialized(); 200 | LLVMInitializeMCJITCompilerOptions( 201 | &mut mcjit_opts, 202 | ::std::mem::size_of::() as _ 203 | ); 204 | mcjit_opts.OptLevel = opt_level as _; 205 | 206 | let ret = LLVMCreateMCJITCompilerForModule( 207 | &mut ee, 208 | m._ref, 209 | &mut mcjit_opts, 210 | ::std::mem::size_of::() as _, 211 | &mut err 212 | ); 213 | if ret != 0 { 214 | let e = CStr::from_ptr(err).to_str().unwrap_or_else(|_| { 215 | eprintln!("Fatal error: Unable to read error string"); 216 | ::std::process::abort(); 217 | }); 218 | eprintln!("Fatal error: Unable to create execution engine from module: {}", e); 219 | ::std::process::abort(); 220 | } 221 | 222 | m._ref_invalidated.set(true); 223 | 224 | ExecutionEngine { 225 | _context: m._context.clone(), 226 | _ref: ee, 227 | _module_ref: m._ref 228 | } 229 | } 230 | } 231 | 232 | pub fn deep_clone_module(&self) -> Module { 233 | Module { 234 | inner: Rc::new(ModuleImpl { 235 | _context: self._context.clone(), 236 | _ref: unsafe { LLVMCloneModule(self._module_ref) }, 237 | _ref_invalidated: Cell::new(false) 238 | }) 239 | } 240 | } 241 | 242 | pub fn get_function_address(&self, name: &str) -> Option<*const c_void> { 243 | let name = CString::new(name).unwrap(); 244 | 245 | let addr = unsafe { LLVMGetFunctionAddress( 246 | self._ref, 247 | name.as_ptr() 248 | ) as *const c_void }; 249 | if addr.is_null() { 250 | None 251 | } else { 252 | Some(addr) 253 | } 254 | } 255 | 256 | pub fn to_string(&self) -> String { 257 | unsafe { 258 | let raw_s = LLVMPrintModuleToString(self._module_ref); 259 | let s = CStr::from_ptr( 260 | raw_s 261 | ).to_str().unwrap().to_string(); 262 | LLVMDisposeMessage(raw_s); 263 | s 264 | } 265 | } 266 | } 267 | 268 | impl Drop for ExecutionEngine { 269 | fn drop(&mut self) { 270 | unsafe { 271 | LLVMDisposeExecutionEngine(self._ref); 272 | } 273 | } 274 | } 275 | 276 | #[derive(Clone)] 277 | pub struct Orc { 278 | inner: Rc 279 | } 280 | 281 | pub struct OrcInner { 282 | _ref: LLVMOrcJITStackRef 283 | } 284 | 285 | impl Orc { 286 | pub fn new() -> Orc { 287 | // Ensure that LLVM JIT has been initialized 288 | assert_eq!(*LLVM_EXEC, true); 289 | 290 | unsafe { 291 | let def_triple = LLVMGetDefaultTargetTriple(); 292 | let mut err: *mut c_char = ::std::ptr::null_mut(); 293 | 294 | let mut target_ref: LLVMTargetRef = ::std::mem::uninitialized(); 295 | let code = LLVMGetTargetFromTriple( 296 | def_triple, 297 | &mut target_ref, 298 | &mut err 299 | ); 300 | if code != 0 { 301 | eprintln!("LLVMGetTargetFromTriple failed"); 302 | ::std::process::abort(); 303 | } 304 | 305 | if LLVMTargetHasJIT(target_ref) == 0 { 306 | eprintln!("The current platform has no JIT support"); 307 | ::std::process::abort(); 308 | } 309 | 310 | let tm_ref = LLVMCreateTargetMachine( 311 | target_ref, 312 | def_triple, 313 | ::std::ptr::null_mut(), 314 | ::std::ptr::null_mut(), 315 | LLVMCodeGenOptLevel::LLVMCodeGenLevelDefault, 316 | LLVMRelocMode::LLVMRelocDefault, 317 | LLVMCodeModel::LLVMCodeModelJITDefault 318 | ); 319 | LLVMDisposeMessage(def_triple); 320 | 321 | assert_eq!(tm_ref.is_null(), false); 322 | 323 | Orc { 324 | inner: Rc::new(OrcInner { 325 | _ref: unsafe { 326 | LLVMOrcCreateInstance(tm_ref) 327 | } 328 | }) 329 | } 330 | } 331 | } 332 | 333 | pub fn resolve(&self, name: &str) -> *const c_void { 334 | unsafe { 335 | let mut addr: u64 = 0; 336 | let name = CString::new(name).unwrap(); 337 | let code = LLVMOrcGetSymbolAddress(self.inner._ref, &mut addr, name.as_ptr()); 338 | assert_eq!(code, LLVMOrcErrorCode::LLVMOrcErrSuccess); 339 | 340 | addr as usize as _ 341 | } 342 | } 343 | 344 | extern "C" fn sym_resolve(name: *const c_char, ctx: *mut c_void) -> u64 { 345 | let inner: *const OrcInner = ctx as _; 346 | unsafe { 347 | let mut addr: u64 = 0; 348 | let code = LLVMOrcGetSymbolAddress((*inner)._ref, &mut addr, name); 349 | assert_eq!(code, LLVMOrcErrorCode::LLVMOrcErrSuccess); 350 | 351 | addr 352 | } 353 | } 354 | 355 | pub fn add_lazily_compiled_ir( 356 | &self, 357 | m: Module 358 | ) { 359 | let m = Rc::try_unwrap(m.inner).unwrap_or_else(|_| { 360 | panic!("Attempting to create an execution engine from a module while there are still some strong references to it"); 361 | }); 362 | 363 | unsafe { 364 | LLVMVerifyModule( 365 | m._ref, 366 | LLVMVerifierFailureAction::LLVMAbortProcessAction, 367 | ::std::ptr::null_mut() 368 | ); 369 | } 370 | 371 | m._ref_invalidated.set(true); 372 | 373 | unsafe { 374 | let mut module_handle: LLVMOrcModuleHandle = ::std::mem::uninitialized(); 375 | let code = LLVMOrcAddLazilyCompiledIR( 376 | self.inner._ref, 377 | &mut module_handle, 378 | LLVMOrcMakeSharedModule(m._ref), 379 | ::std::mem::transmute(Self::sym_resolve as usize), 380 | &*self.inner as *const OrcInner as *const c_void as *mut c_void as _ 381 | ); 382 | assert_eq!(code, LLVMOrcErrorCode::LLVMOrcErrSuccess); 383 | } 384 | } 385 | } 386 | 387 | impl Drop for OrcInner { 388 | fn drop(&mut self) { 389 | let code = unsafe { 390 | LLVMOrcDisposeInstance(self._ref) 391 | }; 392 | if code != LLVMOrcErrorCode::LLVMOrcErrSuccess { 393 | ::std::process::abort(); 394 | } 395 | } 396 | } 397 | 398 | #[cfg(test)] 399 | #[test] 400 | fn test_orc_init() { 401 | let _orc = Orc::new(); 402 | } 403 | 404 | pub struct Type { 405 | _context: Context, 406 | _ref: LLVMTypeRef 407 | } 408 | 409 | impl Type { 410 | pub fn function(ctx: &Context, ret: Type, params: &[Type]) -> Type { 411 | let mut params: Vec = params.iter().map(|v| v._ref).collect(); 412 | 413 | Type { 414 | _context: ctx.clone(), 415 | _ref: unsafe { LLVMFunctionType( 416 | ret._ref, 417 | if params.len() == 0 { 418 | ::std::ptr::null_mut() 419 | } else { 420 | &mut params[0] 421 | }, 422 | params.len() as _, 423 | 0 424 | ) } 425 | } 426 | } 427 | 428 | pub fn int_native(ctx: &Context) -> Type { 429 | match NativePointerWidth::detect() { 430 | NativePointerWidth::W32 => Self::int32(ctx), 431 | NativePointerWidth::W64 => Self::int64(ctx) 432 | } 433 | } 434 | 435 | pub fn int1(ctx: &Context) -> Type { 436 | Type { 437 | _context: ctx.clone(), 438 | _ref: unsafe { LLVMInt1TypeInContext(ctx.inner._ref) } 439 | } 440 | } 441 | 442 | pub fn int8(ctx: &Context) -> Type { 443 | Type { 444 | _context: ctx.clone(), 445 | _ref: unsafe { LLVMInt8TypeInContext(ctx.inner._ref) } 446 | } 447 | } 448 | 449 | pub fn int16(ctx: &Context) -> Type { 450 | Type { 451 | _context: ctx.clone(), 452 | _ref: unsafe { LLVMInt16TypeInContext(ctx.inner._ref) } 453 | } 454 | } 455 | 456 | pub fn int32(ctx: &Context) -> Type { 457 | Type { 458 | _context: ctx.clone(), 459 | _ref: unsafe { LLVMInt32TypeInContext(ctx.inner._ref) } 460 | } 461 | } 462 | 463 | pub fn int64(ctx: &Context) -> Type { 464 | Type { 465 | _context: ctx.clone(), 466 | _ref: unsafe { LLVMInt64TypeInContext(ctx.inner._ref) } 467 | } 468 | } 469 | 470 | pub fn float32(ctx: &Context) -> Type { 471 | Type { 472 | _context: ctx.clone(), 473 | _ref: unsafe { LLVMFloatTypeInContext(ctx.inner._ref) } 474 | } 475 | } 476 | 477 | pub fn float64(ctx: &Context) -> Type { 478 | Type { 479 | _context: ctx.clone(), 480 | _ref: unsafe { LLVMDoubleTypeInContext(ctx.inner._ref) } 481 | } 482 | } 483 | 484 | pub fn void(ctx: &Context) -> Type { 485 | Type { 486 | _context: ctx.clone(), 487 | _ref: unsafe { LLVMVoidTypeInContext(ctx.inner._ref) } 488 | } 489 | } 490 | 491 | pub fn pointer(inner: Type) -> Type { 492 | Type { 493 | _context: inner._context.clone(), 494 | _ref: unsafe { LLVMPointerType(inner._ref, 0) } 495 | } 496 | } 497 | 498 | pub fn struct_type(ctx: &Context, inner: &[Type], packed: bool) -> Type { 499 | let mut llvm_types: Vec = inner.iter().map(|t| t._ref).collect(); 500 | let n_elements = llvm_types.len(); 501 | 502 | Type { 503 | _context: ctx.clone(), 504 | _ref: unsafe { 505 | LLVMStructTypeInContext( 506 | ctx.inner._ref, 507 | if llvm_types.len() == 0 { 508 | ::std::ptr::null_mut() 509 | } else { 510 | &mut llvm_types[0] 511 | }, 512 | n_elements as _, 513 | if packed { 514 | 1 515 | } else { 516 | 0 517 | } 518 | ) 519 | } 520 | } 521 | } 522 | 523 | pub fn array(ctx: &Context, elem_type: Type, n_elements: usize) -> Type { 524 | Type { 525 | _context: ctx.clone(), 526 | _ref: unsafe { 527 | LLVMArrayType( 528 | elem_type._ref, 529 | n_elements as _ 530 | ) 531 | } 532 | } 533 | } 534 | } 535 | 536 | pub struct Function { 537 | _context: Context, 538 | _module: Module, 539 | _ref: LLVMValueRef 540 | } 541 | 542 | impl Function { 543 | pub fn new(ctx: &Context, m: &Module, name: &str, ty: Type) -> Function { 544 | let name = CString::new(name).unwrap(); 545 | 546 | Function { 547 | _context: ctx.clone(), 548 | _module: m.clone(), 549 | _ref: unsafe { LLVMAddFunction(m.inner._ref, name.as_ptr(), ty._ref) } 550 | } 551 | } 552 | 553 | pub unsafe fn get_param(&self, idx: usize) -> LLVMValueRef { 554 | unsafe { 555 | LLVMGetParam(self._ref, idx as _) 556 | } 557 | } 558 | 559 | pub fn to_string(&self) -> String { 560 | unsafe { 561 | let raw_s = LLVMPrintValueToString(self._ref); 562 | let s = CStr::from_ptr( 563 | raw_s 564 | ).to_str().unwrap().to_string(); 565 | LLVMDisposeMessage(raw_s); 566 | s 567 | } 568 | } 569 | 570 | pub fn verify(&self) { 571 | unsafe { 572 | LLVMVerifyFunction( 573 | self._ref, 574 | LLVMVerifierFailureAction::LLVMAbortProcessAction 575 | ); 576 | } 577 | } 578 | 579 | pub fn set_linkage(&self, linkage: LLVMLinkage) { 580 | unsafe { 581 | LLVMSetLinkage(self._ref, linkage); 582 | } 583 | } 584 | } 585 | 586 | pub struct BasicBlock<'a> { 587 | _func: &'a Function, 588 | _ref: LLVMBasicBlockRef 589 | } 590 | 591 | impl<'a> BasicBlock<'a> { 592 | pub fn new(f: &'a Function) -> BasicBlock<'a> { 593 | BasicBlock { 594 | _func: f, 595 | _ref: unsafe { LLVMAppendBasicBlockInContext( 596 | f._context.inner._ref, 597 | f._ref, 598 | b"\0".as_ptr() as _ 599 | ) } 600 | } 601 | } 602 | 603 | pub fn builder(&'a self) -> Builder<'a> { 604 | Builder::new(self) 605 | } 606 | } 607 | 608 | pub struct Builder<'a> { 609 | _bb: &'a BasicBlock<'a>, 610 | _ref: LLVMBuilderRef 611 | } 612 | 613 | impl<'a> Builder<'a> { 614 | pub fn new(bb: &'a BasicBlock<'a>) -> Builder<'a> { 615 | unsafe { 616 | let b_ref = LLVMCreateBuilderInContext( 617 | bb._func._context.inner._ref 618 | ); 619 | LLVMPositionBuilderAtEnd(b_ref, bb._ref); 620 | 621 | Builder { 622 | _bb: bb, 623 | _ref: b_ref 624 | } 625 | } 626 | } 627 | 628 | pub unsafe fn build_br(&self, other: &BasicBlock) -> LLVMValueRef { 629 | LLVMBuildBr(self._ref, other._ref) 630 | } 631 | 632 | pub unsafe fn build_cond_br( 633 | &self, 634 | val: LLVMValueRef, 635 | if_true: &BasicBlock, 636 | if_false: &BasicBlock 637 | ) -> LLVMValueRef { 638 | LLVMBuildCondBr(self._ref, val, if_true._ref, if_false._ref) 639 | } 640 | 641 | pub unsafe fn build_alloca(&self, ty: Type) -> LLVMValueRef { 642 | LLVMBuildAlloca( 643 | self._ref, 644 | ty._ref, 645 | empty_cstr() 646 | ) 647 | } 648 | 649 | pub unsafe fn build_const_int(&self, ty: Type, v: u64, sign_extend: bool) -> LLVMValueRef { 650 | LLVMConstInt( 651 | ty._ref, 652 | v as _, 653 | if sign_extend { 654 | 1 655 | } else { 656 | 0 657 | } 658 | ) 659 | } 660 | 661 | pub unsafe fn build_store(&self, val: LLVMValueRef, ptr: LLVMValueRef) -> LLVMValueRef { 662 | LLVMBuildStore( 663 | self._ref, 664 | val, 665 | ptr 666 | ) 667 | } 668 | 669 | pub unsafe fn build_load(&self, ptr: LLVMValueRef) -> LLVMValueRef { 670 | LLVMBuildLoad( 671 | self._ref, 672 | ptr, 673 | empty_cstr() 674 | ) 675 | } 676 | 677 | pub unsafe fn build_cast( 678 | &self, 679 | op: LLVMOpcode, 680 | val: LLVMValueRef, 681 | dest_ty: Type 682 | ) -> LLVMValueRef { 683 | LLVMBuildCast( 684 | self._ref, 685 | op, 686 | val, 687 | dest_ty._ref, 688 | empty_cstr() 689 | ) 690 | } 691 | 692 | pub unsafe fn build_gep( 693 | &self, 694 | ptr: LLVMValueRef, 695 | indices: &[LLVMValueRef] 696 | ) -> LLVMValueRef { 697 | let mut indices: Vec = indices.iter().map(|v| *v).collect(); 698 | 699 | LLVMBuildGEP( 700 | self._ref, 701 | ptr, 702 | if indices.len() == 0 { 703 | ::std::ptr::null_mut() 704 | } else { 705 | &mut indices[0] 706 | }, 707 | indices.len() as _, 708 | empty_cstr() 709 | ) 710 | } 711 | 712 | pub unsafe fn build_call_raw( 713 | &self, 714 | f: LLVMValueRef, 715 | args: &[LLVMValueRef] 716 | ) -> LLVMValueRef { 717 | let mut args: Vec = args.iter().map(|v| *v).collect(); 718 | LLVMBuildCall( 719 | self._ref, 720 | f, 721 | if args.len() == 0 { 722 | ::std::ptr::null_mut() 723 | } else { 724 | &mut args[0] 725 | }, 726 | args.len() as _, 727 | empty_cstr() 728 | ) 729 | } 730 | 731 | pub unsafe fn build_call( 732 | &self, 733 | f: &Function, 734 | args: &[LLVMValueRef] 735 | ) -> LLVMValueRef { 736 | self.build_call_raw(f._ref, args) 737 | } 738 | 739 | pub unsafe fn build_icmp( 740 | &self, 741 | op: LLVMIntPredicate, 742 | lhs: LLVMValueRef, 743 | rhs: LLVMValueRef 744 | ) -> LLVMValueRef { 745 | LLVMBuildICmp( 746 | self._ref, 747 | op, 748 | lhs, 749 | rhs, 750 | empty_cstr() 751 | ) 752 | } 753 | 754 | pub unsafe fn build_fcmp( 755 | &self, 756 | op: LLVMRealPredicate, 757 | lhs: LLVMValueRef, 758 | rhs: LLVMValueRef 759 | ) -> LLVMValueRef { 760 | LLVMBuildFCmp( 761 | self._ref, 762 | op, 763 | lhs, 764 | rhs, 765 | empty_cstr() 766 | ) 767 | } 768 | 769 | pub unsafe fn build_ret(&self, v: LLVMValueRef) -> LLVMValueRef { 770 | LLVMBuildRet( 771 | self._ref, 772 | v 773 | ) 774 | } 775 | 776 | pub unsafe fn build_ret_void(&self) -> LLVMValueRef { 777 | LLVMBuildRetVoid( 778 | self._ref 779 | ) 780 | } 781 | 782 | pub unsafe fn build_unreachable(&self) -> LLVMValueRef { 783 | LLVMBuildUnreachable(self._ref) 784 | } 785 | 786 | pub unsafe fn build_and( 787 | &self, 788 | lhs: LLVMValueRef, 789 | rhs: LLVMValueRef 790 | ) -> LLVMValueRef { 791 | LLVMBuildAnd( 792 | self._ref, 793 | lhs, 794 | rhs, 795 | empty_cstr() 796 | ) 797 | } 798 | 799 | pub unsafe fn build_or( 800 | &self, 801 | lhs: LLVMValueRef, 802 | rhs: LLVMValueRef 803 | ) -> LLVMValueRef { 804 | LLVMBuildOr( 805 | self._ref, 806 | lhs, 807 | rhs, 808 | empty_cstr() 809 | ) 810 | } 811 | 812 | pub unsafe fn build_xor( 813 | &self, 814 | lhs: LLVMValueRef, 815 | rhs: LLVMValueRef 816 | ) -> LLVMValueRef { 817 | LLVMBuildXor( 818 | self._ref, 819 | lhs, 820 | rhs, 821 | empty_cstr() 822 | ) 823 | } 824 | 825 | pub unsafe fn build_shl( 826 | &self, 827 | lhs: LLVMValueRef, 828 | rhs: LLVMValueRef 829 | ) -> LLVMValueRef { 830 | LLVMBuildShl( 831 | self._ref, 832 | lhs, 833 | rhs, 834 | empty_cstr() 835 | ) 836 | } 837 | 838 | pub unsafe fn build_ashr( 839 | &self, 840 | lhs: LLVMValueRef, 841 | rhs: LLVMValueRef 842 | ) -> LLVMValueRef { 843 | LLVMBuildAShr( 844 | self._ref, 845 | lhs, 846 | rhs, 847 | empty_cstr() 848 | ) 849 | } 850 | 851 | pub unsafe fn build_lshr( 852 | &self, 853 | lhs: LLVMValueRef, 854 | rhs: LLVMValueRef 855 | ) -> LLVMValueRef { 856 | LLVMBuildLShr( 857 | self._ref, 858 | lhs, 859 | rhs, 860 | empty_cstr() 861 | ) 862 | } 863 | 864 | pub unsafe fn build_add( 865 | &self, 866 | lhs: LLVMValueRef, 867 | rhs: LLVMValueRef 868 | ) -> LLVMValueRef { 869 | LLVMBuildAdd( 870 | self._ref, 871 | lhs, 872 | rhs, 873 | empty_cstr() 874 | ) 875 | } 876 | 877 | pub unsafe fn build_sub( 878 | &self, 879 | lhs: LLVMValueRef, 880 | rhs: LLVMValueRef 881 | ) -> LLVMValueRef { 882 | LLVMBuildSub( 883 | self._ref, 884 | lhs, 885 | rhs, 886 | empty_cstr() 887 | ) 888 | } 889 | 890 | pub unsafe fn build_mul( 891 | &self, 892 | lhs: LLVMValueRef, 893 | rhs: LLVMValueRef 894 | ) -> LLVMValueRef { 895 | LLVMBuildMul( 896 | self._ref, 897 | lhs, 898 | rhs, 899 | empty_cstr() 900 | ) 901 | } 902 | 903 | pub unsafe fn build_udiv( 904 | &self, 905 | lhs: LLVMValueRef, 906 | rhs: LLVMValueRef 907 | ) -> LLVMValueRef { 908 | LLVMBuildUDiv( 909 | self._ref, 910 | lhs, 911 | rhs, 912 | empty_cstr() 913 | ) 914 | } 915 | 916 | pub unsafe fn build_sdiv( 917 | &self, 918 | lhs: LLVMValueRef, 919 | rhs: LLVMValueRef 920 | ) -> LLVMValueRef { 921 | LLVMBuildSDiv( 922 | self._ref, 923 | lhs, 924 | rhs, 925 | empty_cstr() 926 | ) 927 | } 928 | 929 | pub unsafe fn build_urem( 930 | &self, 931 | lhs: LLVMValueRef, 932 | rhs: LLVMValueRef 933 | ) -> LLVMValueRef { 934 | LLVMBuildURem( 935 | self._ref, 936 | lhs, 937 | rhs, 938 | empty_cstr() 939 | ) 940 | } 941 | 942 | pub unsafe fn build_srem( 943 | &self, 944 | lhs: LLVMValueRef, 945 | rhs: LLVMValueRef 946 | ) -> LLVMValueRef { 947 | LLVMBuildSRem( 948 | self._ref, 949 | lhs, 950 | rhs, 951 | empty_cstr() 952 | ) 953 | } 954 | 955 | pub unsafe fn build_fp_trunc( 956 | &self, 957 | v: LLVMValueRef, 958 | dest_ty: Type 959 | ) -> LLVMValueRef { 960 | LLVMBuildFPTrunc( 961 | self._ref, 962 | v, 963 | dest_ty._ref, 964 | empty_cstr() 965 | ) 966 | } 967 | 968 | pub unsafe fn build_fp_ext( 969 | &self, 970 | v: LLVMValueRef, 971 | dest_ty: Type 972 | ) -> LLVMValueRef { 973 | LLVMBuildFPExt( 974 | self._ref, 975 | v, 976 | dest_ty._ref, 977 | empty_cstr() 978 | ) 979 | } 980 | 981 | pub unsafe fn build_fadd( 982 | &self, 983 | lhs: LLVMValueRef, 984 | rhs: LLVMValueRef 985 | ) -> LLVMValueRef { 986 | LLVMBuildFAdd( 987 | self._ref, 988 | lhs, 989 | rhs, 990 | empty_cstr() 991 | ) 992 | } 993 | 994 | pub unsafe fn build_fsub( 995 | &self, 996 | lhs: LLVMValueRef, 997 | rhs: LLVMValueRef 998 | ) -> LLVMValueRef { 999 | LLVMBuildFSub( 1000 | self._ref, 1001 | lhs, 1002 | rhs, 1003 | empty_cstr() 1004 | ) 1005 | } 1006 | 1007 | pub unsafe fn build_fmul( 1008 | &self, 1009 | lhs: LLVMValueRef, 1010 | rhs: LLVMValueRef 1011 | ) -> LLVMValueRef { 1012 | LLVMBuildFMul( 1013 | self._ref, 1014 | lhs, 1015 | rhs, 1016 | empty_cstr() 1017 | ) 1018 | } 1019 | 1020 | pub unsafe fn build_fdiv( 1021 | &self, 1022 | lhs: LLVMValueRef, 1023 | rhs: LLVMValueRef 1024 | ) -> LLVMValueRef { 1025 | LLVMBuildFDiv( 1026 | self._ref, 1027 | lhs, 1028 | rhs, 1029 | empty_cstr() 1030 | ) 1031 | } 1032 | 1033 | pub unsafe fn build_switch( 1034 | &self, 1035 | v: LLVMValueRef, 1036 | cases: &[(LLVMValueRef, &BasicBlock)], 1037 | otherwise: &BasicBlock 1038 | ) -> LLVMValueRef { 1039 | let s = LLVMBuildSwitch( 1040 | self._ref, 1041 | v, 1042 | otherwise._ref, 1043 | cases.len() as _ 1044 | ); 1045 | for &(ref exp, ref bb) in cases { 1046 | LLVMAddCase( 1047 | s, 1048 | *exp, 1049 | bb._ref 1050 | ); 1051 | } 1052 | s 1053 | } 1054 | 1055 | pub unsafe fn build_bitcast( 1056 | &self, 1057 | v: LLVMValueRef, 1058 | dest_ty: Type 1059 | ) -> LLVMValueRef { 1060 | LLVMBuildBitCast( 1061 | self._ref, 1062 | v, 1063 | dest_ty._ref, 1064 | empty_cstr() 1065 | ) 1066 | } 1067 | 1068 | pub unsafe fn build_phi( 1069 | &self, 1070 | incoming: &[(LLVMValueRef, &BasicBlock)], 1071 | ty: Type 1072 | ) -> LLVMValueRef { 1073 | let phi = LLVMBuildPhi( 1074 | self._ref, 1075 | ty._ref, 1076 | empty_cstr() 1077 | ); 1078 | 1079 | if incoming.len() > 0 { 1080 | let mut incoming_values: Vec = incoming.iter() 1081 | .map(|(v, _)| *v).collect(); 1082 | let mut incoming_blocks: Vec = incoming.iter() 1083 | .map(|(_, bb)| bb._ref).collect(); 1084 | 1085 | LLVMAddIncoming( 1086 | phi, 1087 | &mut incoming_values[0], 1088 | &mut incoming_blocks[0], 1089 | incoming.len() as _ 1090 | ); 1091 | } 1092 | 1093 | phi 1094 | } 1095 | } 1096 | 1097 | impl<'a> Drop for Builder<'a> { 1098 | fn drop(&mut self) { 1099 | unsafe { 1100 | LLVMDisposeBuilder(self._ref); 1101 | } 1102 | } 1103 | } 1104 | 1105 | #[derive(Eq, PartialEq)] 1106 | pub enum NativePointerWidth { 1107 | W32, 1108 | W64 1109 | } 1110 | 1111 | impl NativePointerWidth { 1112 | pub fn detect() -> NativePointerWidth { 1113 | let size = ::std::mem::size_of::(); 1114 | if size == 4 { 1115 | NativePointerWidth::W32 1116 | } else if size == 8 { 1117 | NativePointerWidth::W64 1118 | } else { 1119 | panic!("Unsupported native pointer width: {}", size); 1120 | } 1121 | } 1122 | } 1123 | -------------------------------------------------------------------------------- /src/jit/mod.rs: -------------------------------------------------------------------------------- 1 | mod llvm; 2 | pub mod runtime; 3 | pub mod compiler; 4 | mod compiler_intrinsics; 5 | mod ondemand; 6 | pub mod vm; 7 | -------------------------------------------------------------------------------- /src/jit/ondemand.rs: -------------------------------------------------------------------------------- 1 | use super::llvm; 2 | use super::runtime::Runtime; 3 | use std::cell::{Cell, RefCell}; 4 | use std::rc::Rc; 5 | use std::os::raw::c_void; 6 | use super::compiler; 7 | use super::compiler::Compiler; 8 | 9 | pub struct Ondemand { 10 | rt: Rc, 11 | context: llvm::Context, 12 | orc: llvm::Orc, 13 | functions: Vec>> 14 | } 15 | 16 | const OPT_THRESHOLD: usize = 50; 17 | 18 | impl Ondemand { 19 | pub fn new(rt: Rc, ctx: llvm::Context, m: llvm::Module) -> Ondemand { 20 | let orc = llvm::Orc::new(); 21 | 22 | //m.inline_with_threshold(100); 23 | //m.optimize(); 24 | 25 | orc.add_lazily_compiled_ir(m); 26 | 27 | let functions: Vec>> = (0..rt.source_module.functions.len()) 28 | .map(|_| RefCell::new(None)) 29 | .collect(); 30 | 31 | Ondemand { 32 | rt: rt, 33 | context: ctx, 34 | orc: orc, 35 | functions: functions 36 | } 37 | } 38 | 39 | pub fn get_function_addr(&self, id: usize) -> *const c_void { 40 | let mut f = self.functions[id].borrow_mut(); 41 | match *f { 42 | Some(v) => v, 43 | None => { 44 | let name = compiler::generate_function_name(id); 45 | let addr = self.orc.resolve(&name); 46 | *f = Some(addr); 47 | addr 48 | } 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/jit/runtime.rs: -------------------------------------------------------------------------------- 1 | use std::cell::{UnsafeCell, RefCell}; 2 | use std::rc::Rc; 3 | use std::os::raw::c_void; 4 | use executor::{NativeResolver, NativeFunction, NativeEntry, NativeFunctionInfo, GlobalStateProvider}; 5 | use module::{Module, Type, ValType}; 6 | use value::Value; 7 | use platform::current as host; 8 | use platform::generic::MemoryManager; 9 | use super::ondemand::Ondemand; 10 | 11 | use smallvec::SmallVec; 12 | 13 | pub struct Runtime { 14 | pub(super) opt_level: u32, 15 | pub(super) mm: UnsafeCell, 16 | pub source_module: Module, 17 | function_addrs: UnsafeCell>>, 18 | globals: Box<[i64]>, 19 | native_functions: Box<[RefCell]>, 20 | native_resolver: RefCell>>, 21 | ondemand: RefCell>>, 22 | jit_info: Box> 23 | } 24 | 25 | #[derive(Clone, Debug)] 26 | pub struct RuntimeConfig { 27 | pub mem_default: usize, 28 | pub mem_max: usize, 29 | pub opt_level: u32 30 | } 31 | 32 | #[repr(C)] 33 | pub struct JitInfo { 34 | pub global_begin: *mut i64 35 | } 36 | 37 | impl Default for RuntimeConfig { 38 | fn default() -> Self { 39 | RuntimeConfig { 40 | mem_default: 4096 * 1024, 41 | mem_max: 16384 * 1024, 42 | opt_level: 0 43 | } 44 | } 45 | } 46 | 47 | impl Runtime { 48 | pub fn new(cfg: RuntimeConfig, m: Module) -> Runtime { 49 | if cfg.mem_max < cfg.mem_default { 50 | panic!("mem_max < mem_default"); 51 | } 52 | 53 | if cfg.mem_default == 0 { 54 | panic!("mem_default == 0"); 55 | } 56 | 57 | let mut globals: Box<[i64]> = vec! [0; m.globals.len()].into_boxed_slice(); 58 | for (i, g) in m.globals.iter().enumerate() { 59 | globals[i] = g.value.reinterpret_as_i64(); 60 | } 61 | 62 | let native_functions: Vec> = m.natives.iter() 63 | .map(|n| RefCell::new(NativeFunctionInfo { 64 | f: NativeFunction::Uninitialized( 65 | n.module.clone(), 66 | n.field.clone() 67 | ), 68 | typeidx: n.typeidx as usize 69 | })) 70 | .collect(); 71 | 72 | let jit_info = JitInfo { 73 | global_begin: if globals.len() == 0 { 74 | ::std::ptr::null_mut() 75 | } else { 76 | &mut globals[0] 77 | } 78 | }; 79 | 80 | let mut mm = host::NativeMemoryManager::new(::platform::generic::MemInitOptions { 81 | min: cfg.mem_default, 82 | max: cfg.mem_max 83 | }); 84 | { 85 | let mem = mm.get_ref_mut(); 86 | for ds in &m.data_segments { 87 | let offset = ds.offset as usize; 88 | mem[offset..offset + ds.data.len()].copy_from_slice(&ds.data); 89 | } 90 | } 91 | 92 | Runtime { 93 | opt_level: cfg.opt_level, 94 | mm: UnsafeCell::new(mm), 95 | source_module: m, 96 | function_addrs: UnsafeCell::new(None), 97 | globals: globals, 98 | native_functions: native_functions.into_boxed_slice(), 99 | native_resolver: RefCell::new(None), 100 | ondemand: RefCell::new(None), 101 | jit_info: Box::new(UnsafeCell::new(jit_info)) 102 | } 103 | } 104 | 105 | pub fn set_ondemand(&self, ondemand: Rc) { 106 | let mut current = self.ondemand.borrow_mut(); 107 | if current.is_some() { 108 | panic!("Attempting to re-set ondemand"); 109 | } 110 | 111 | *current = Some(ondemand); 112 | } 113 | 114 | pub fn get_function_addr(&self, id: usize) -> *const c_void { 115 | self.ondemand.borrow().as_ref().unwrap().get_function_addr(id) 116 | } 117 | 118 | pub fn set_native_resolver(&self, resolver: R) { 119 | *self.native_resolver.borrow_mut() = Some(Box::new(resolver)); 120 | } 121 | 122 | pub fn indirect_get_function_addr(&self, id_in_table: usize) -> *const c_void { 123 | let id = self.source_module.tables[0].elements[id_in_table].unwrap() as usize; 124 | self.get_function_addr(id) 125 | } 126 | 127 | pub fn grow_memory(&self, len_inc: usize) -> usize { 128 | let mm = unsafe { 129 | &mut *self.mm.get() 130 | }; 131 | let prev_len = mm.len(); 132 | mm.grow(len_inc); 133 | prev_len 134 | } 135 | 136 | pub fn get_jit_info(&self) -> *mut JitInfo { 137 | self.jit_info.get() 138 | } 139 | 140 | pub fn protected_call T>(&self, f: F) -> T { 141 | let mm = unsafe { &mut *self.mm.get() }; 142 | mm.protected_call(|_| f()) 143 | } 144 | 145 | pub fn get_memory(&self) -> *const [u8] { 146 | let mm = unsafe { &*self.mm.get() }; 147 | mm.get_ref() 148 | } 149 | 150 | pub fn get_memory_mut(&self) -> *mut [u8] { 151 | let mm = unsafe { &mut *self.mm.get() }; 152 | mm.get_ref_mut() 153 | } 154 | 155 | pub(super) unsafe extern "C" fn _jit_native_invoke_request(ret_place: *mut NativeInvokeRequest, n_args: usize) { 156 | ::std::ptr::write(ret_place, NativeInvokeRequest::new(n_args)); 157 | } 158 | 159 | pub(super) extern "C" fn _jit_native_invoke_push_arg(req: &mut NativeInvokeRequest, arg: i64) { 160 | req.args.push(arg); 161 | } 162 | 163 | pub(super) unsafe extern "C" fn _jit_native_invoke_complete(req: *mut NativeInvokeRequest, rt: &Runtime, id: usize) -> i64 { 164 | let req: NativeInvokeRequest = ::std::ptr::read(req); 165 | 166 | let nf = &rt.native_functions[id]; 167 | let ty = &rt.source_module.types[nf.borrow().typeidx]; 168 | let Type::Func(ref ty_args, ref ty_ret) = *ty; 169 | 170 | assert_eq!(req.args.len(), ty_args.len()); 171 | 172 | let native_resolver = rt.native_resolver.borrow(); 173 | 174 | let mut invoke_ctx = JitNativeInvokeContext { 175 | mem: unsafe { 176 | &mut *rt.mm.get() 177 | }.get_ref_mut(), 178 | resolver: if let Some(ref v) = *native_resolver { 179 | Some(&**v) 180 | } else { 181 | None 182 | } 183 | }; 184 | 185 | let mut call_args: SmallVec<[Value; 16]> = SmallVec::with_capacity(req.args.len()); 186 | 187 | for i in 0..req.args.len() { 188 | call_args.push(Value::reinterpret_from_i64(req.args[i], ty_args[i])); 189 | } 190 | 191 | let ret = nf.borrow_mut().f.invoke(&mut invoke_ctx, &call_args).unwrap(); 192 | 193 | if let Some(ret) = ret { 194 | ret.reinterpret_as_i64() 195 | } else { 196 | 0 197 | } 198 | } 199 | 200 | pub(super) extern "C" fn _jit_grow_memory(rt: &Runtime, len_inc: usize) -> usize { 201 | rt.grow_memory(len_inc) 202 | } 203 | 204 | pub(super) extern "C" fn _jit_get_function_addr(rt: &Runtime, id: usize) -> *const c_void { 205 | rt.get_function_addr(id) 206 | } 207 | 208 | pub(super) extern "C" fn _jit_indirect_get_function_addr(rt: &Runtime, id: usize) -> *const c_void { 209 | rt.indirect_get_function_addr(id) 210 | } 211 | } 212 | 213 | pub struct NativeInvokeRequest { 214 | args: SmallVec<[i64; 16]> 215 | } 216 | 217 | impl NativeInvokeRequest { 218 | fn new(n_args: usize) -> NativeInvokeRequest { 219 | NativeInvokeRequest { 220 | args: SmallVec::with_capacity(n_args) 221 | } 222 | } 223 | } 224 | 225 | pub struct JitNativeInvokeContext<'a> { 226 | mem: &'a mut [u8], 227 | resolver: Option<&'a NativeResolver> 228 | } 229 | 230 | impl<'a> GlobalStateProvider for JitNativeInvokeContext<'a> { 231 | fn get_memory(&self) -> &[u8] { 232 | self.mem 233 | } 234 | 235 | fn get_memory_mut(&mut self) -> &mut [u8] { 236 | self.mem 237 | } 238 | 239 | fn resolve(&self, module: &str, field: &str) -> Option { 240 | if let Some(r) = self.resolver { 241 | r.resolve(module, field) 242 | } else { 243 | None 244 | } 245 | } 246 | } 247 | -------------------------------------------------------------------------------- /src/jit/vm.rs: -------------------------------------------------------------------------------- 1 | pub struct VirtualMachine { 2 | 3 | } -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(feature = "std"), no_std)] 2 | 3 | #![feature(alloc)] 4 | #![feature(nll)] 5 | #![feature(core_intrinsics)] 6 | #![feature(underscore_lifetimes)] 7 | #![feature(test)] 8 | 9 | #[cfg(test)] 10 | extern crate test; 11 | 12 | #[cfg(not(feature = "std"))] 13 | #[macro_use] 14 | extern crate alloc; 15 | 16 | extern crate serde; 17 | 18 | #[macro_use] 19 | extern crate serde_derive; 20 | 21 | #[cfg(not(feature = "std"))] 22 | extern crate bincode_no_std; 23 | #[cfg(not(feature = "std"))] 24 | use bincode_no_std as bincode; 25 | 26 | #[cfg(feature = "std")] 27 | extern crate bincode; 28 | 29 | #[macro_use] 30 | extern crate lazy_static; 31 | 32 | #[cfg(feature = "jit")] 33 | extern crate llvm_sys; 34 | 35 | #[cfg(feature = "jit")] 36 | extern crate smallvec; 37 | 38 | #[cfg(feature = "jit")] 39 | extern crate libc; 40 | 41 | #[cfg(feature = "trans")] 42 | extern crate parity_wasm; 43 | 44 | extern crate byteorder; 45 | 46 | #[cfg(feature = "trans")] 47 | pub mod trans; 48 | 49 | #[cfg(feature = "jit")] 50 | pub mod jit; 51 | 52 | #[cfg(feature = "jit")] 53 | pub mod platform; 54 | 55 | #[cfg(feature = "std")] 56 | mod prelude; 57 | 58 | #[cfg(not(feature = "std"))] 59 | mod prelude_no_std; 60 | #[cfg(not(feature = "std"))] 61 | use prelude_no_std as prelude; 62 | 63 | pub mod opcode; 64 | pub mod executor; 65 | pub mod module; 66 | pub mod int_ops; 67 | pub mod value; 68 | pub mod resolver; 69 | pub mod fp_ops; 70 | pub mod cfgraph; 71 | pub mod optimizers; 72 | pub mod ssa; 73 | pub mod hetrans; 74 | -------------------------------------------------------------------------------- /src/module.rs: -------------------------------------------------------------------------------- 1 | use prelude::{Vec, String, BTreeMap}; 2 | 3 | use value::Value; 4 | 5 | use opcode::Opcode; 6 | use bincode; 7 | 8 | #[derive(Clone, Debug, Serialize, Deserialize, Default)] 9 | pub struct Module { 10 | pub types: Vec, 11 | pub functions: Vec, 12 | pub data_segments: Vec, 13 | pub exports: BTreeMap, 14 | pub tables: Vec, 15 | pub globals: Vec, 16 | pub natives: Vec, 17 | pub start_function: Option 18 | } 19 | 20 | #[derive(Clone, Debug, Serialize, Deserialize)] 21 | pub struct Native { 22 | pub module: String, 23 | pub field: String, 24 | pub typeidx: u32 25 | } 26 | 27 | #[derive(Clone, Debug, Serialize, Deserialize)] 28 | pub struct Global { 29 | pub value: Value 30 | } 31 | 32 | #[derive(Clone, Debug, Serialize, Deserialize)] 33 | pub struct Table { 34 | pub min: u32, 35 | pub max: Option, 36 | pub elements: Vec> 37 | } 38 | 39 | #[derive(Clone, Debug, Serialize, Deserialize)] 40 | pub enum Export { 41 | Function(u32) 42 | } 43 | 44 | #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] 45 | pub enum Type { 46 | Func(Vec, Vec) // (args...) -> (ret) 47 | } 48 | 49 | #[derive(Clone, Debug, Serialize, Deserialize)] 50 | pub struct Function { 51 | pub name: Option, 52 | pub typeidx: u32, 53 | pub locals: Vec, 54 | pub body: FunctionBody 55 | } 56 | 57 | #[derive(Clone, Debug, Serialize, Deserialize)] 58 | pub struct FunctionBody { 59 | pub opcodes: Vec 60 | } 61 | 62 | #[derive(Clone, Debug, Serialize, Deserialize)] 63 | pub struct DataSegment { 64 | pub offset: u32, 65 | pub data: Vec 66 | } 67 | 68 | #[derive(Copy, Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] 69 | pub enum ValType { 70 | I32, 71 | I64, 72 | F32, 73 | F64 74 | } 75 | 76 | impl Module { 77 | pub fn std_serialize(&self) -> Result, String> { 78 | match bincode::serialize(self) { 79 | Ok(v) => Ok(v), 80 | Err(e) => Err(format!("{:?}", e)) 81 | } 82 | } 83 | 84 | pub fn std_deserialize(data: &[u8]) -> Result { 85 | match bincode::deserialize(data) { 86 | Ok(v) => Ok(v), 87 | Err(e) => Err(format!("{:?}", e)) 88 | } 89 | } 90 | 91 | pub fn lookup_exported_func(&self, name: &str) -> Option { 92 | match self.exports.get(name) { 93 | Some(v) => match *v { 94 | Export::Function(id) => Some(id as usize) 95 | }, 96 | None => None 97 | } 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/opcode.rs: -------------------------------------------------------------------------------- 1 | use prelude::{Vec, String}; 2 | 3 | #[derive(Clone, Debug, Serialize, Deserialize)] 4 | pub enum Opcode { 5 | Drop, 6 | Select, 7 | 8 | GetLocal(u32), 9 | SetLocal(u32), 10 | TeeLocal(u32), 11 | GetGlobal(u32), 12 | SetGlobal(u32), 13 | 14 | CurrentMemory, 15 | GrowMemory, 16 | 17 | Nop, 18 | Unreachable, 19 | Return, 20 | Call(u32), 21 | CallIndirect(u32), 22 | 23 | I32Const(i32), 24 | 25 | // iunop 26 | I32Clz, 27 | I32Ctz, 28 | I32Popcnt, 29 | 30 | // ibinop 31 | I32Add, 32 | I32Sub, 33 | I32Mul, 34 | I32DivU, 35 | I32DivS, 36 | I32RemU, 37 | I32RemS, 38 | I32And, 39 | I32Or, 40 | I32Xor, 41 | I32Shl, 42 | I32ShrU, 43 | I32ShrS, 44 | I32Rotl, 45 | I32Rotr, 46 | 47 | // itestop 48 | I32Eqz, 49 | 50 | // irelop 51 | I32Eq, 52 | I32Ne, 53 | I32LtU, 54 | I32LtS, 55 | I32LeU, 56 | I32LeS, 57 | I32GtU, 58 | I32GtS, 59 | I32GeU, 60 | I32GeS, 61 | 62 | I32WrapI64, 63 | 64 | I32Load(Memarg), 65 | I32Store(Memarg), 66 | I32Load8U(Memarg), 67 | I32Load8S(Memarg), 68 | I32Load16U(Memarg), 69 | I32Load16S(Memarg), 70 | I32Store8(Memarg), 71 | I32Store16(Memarg), 72 | 73 | I64Const(i64), 74 | 75 | // iunop 76 | I64Clz, 77 | I64Ctz, 78 | I64Popcnt, 79 | 80 | // ibinop 81 | I64Add, 82 | I64Sub, 83 | I64Mul, 84 | I64DivU, 85 | I64DivS, 86 | I64RemU, 87 | I64RemS, 88 | I64And, 89 | I64Or, 90 | I64Xor, 91 | I64Shl, 92 | I64ShrU, 93 | I64ShrS, 94 | I64Rotl, 95 | I64Rotr, 96 | 97 | // itestop 98 | I64Eqz, 99 | 100 | // irelop 101 | I64Eq, 102 | I64Ne, 103 | I64LtU, 104 | I64LtS, 105 | I64LeU, 106 | I64LeS, 107 | I64GtU, 108 | I64GtS, 109 | I64GeU, 110 | I64GeS, 111 | 112 | I64ExtendI32U, 113 | I64ExtendI32S, 114 | 115 | I64Load(Memarg), 116 | I64Store(Memarg), 117 | I64Load8U(Memarg), 118 | I64Load8S(Memarg), 119 | I64Load16U(Memarg), 120 | I64Load16S(Memarg), 121 | I64Load32U(Memarg), 122 | I64Load32S(Memarg), 123 | I64Store8(Memarg), 124 | I64Store16(Memarg), 125 | I64Store32(Memarg), 126 | 127 | // Floating point 128 | F32Const(u32), 129 | F64Const(u64), 130 | F32ReinterpretI32, 131 | F64ReinterpretI64, 132 | I32ReinterpretF32, 133 | I64ReinterpretF64, 134 | I32TruncSF32, 135 | I32TruncUF32, 136 | I32TruncSF64, 137 | I32TruncUF64, 138 | I64TruncSF32, 139 | I64TruncUF32, 140 | I64TruncSF64, 141 | I64TruncUF64, 142 | F32ConvertSI32, 143 | F32ConvertUI32, 144 | F32ConvertSI64, 145 | F32ConvertUI64, 146 | F64ConvertSI32, 147 | F64ConvertUI32, 148 | F64ConvertSI64, 149 | F64ConvertUI64, 150 | F32DemoteF64, 151 | F64PromoteF32, 152 | F32Abs, 153 | F32Neg, 154 | F32Ceil, 155 | F32Floor, 156 | F32Trunc, 157 | F32Nearest, 158 | F32Sqrt, 159 | F32Add, 160 | F32Sub, 161 | F32Mul, 162 | F32Div, 163 | F32Min, 164 | F32Max, 165 | F32Copysign, 166 | F32Eq, 167 | F32Ne, 168 | F32Lt, 169 | F32Gt, 170 | F32Le, 171 | F32Ge, 172 | F64Abs, 173 | F64Neg, 174 | F64Ceil, 175 | F64Floor, 176 | F64Trunc, 177 | F64Nearest, 178 | F64Sqrt, 179 | F64Add, 180 | F64Sub, 181 | F64Mul, 182 | F64Div, 183 | F64Min, 184 | F64Max, 185 | F64Copysign, 186 | F64Eq, 187 | F64Ne, 188 | F64Lt, 189 | F64Gt, 190 | F64Le, 191 | F64Ge, 192 | 193 | // wasm-core specific opcodes here. Should be generated by front-end. 194 | Jmp(u32), 195 | JmpIf(u32), 196 | JmpEither(u32, u32), 197 | JmpTable(Vec, u32), 198 | 199 | NativeInvoke(u32), 200 | Memcpy, // Pops: (dest, src, n_bytes) 201 | 202 | NotImplemented(String) 203 | } 204 | 205 | #[derive(Copy, Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] 206 | pub struct Memarg { 207 | pub offset: u32, 208 | pub align: u32 209 | } 210 | -------------------------------------------------------------------------------- /src/optimizers/mod.rs: -------------------------------------------------------------------------------- 1 | use cfgraph::*; 2 | use prelude::{BTreeSet, BTreeMap}; 3 | 4 | pub struct RemoveDeadBasicBlocks; 5 | 6 | impl Optimizer for RemoveDeadBasicBlocks { 7 | type Return = (); 8 | 9 | fn optimize(&self, cfg: &mut CFGraph) -> OptimizeResult<()> { 10 | if cfg.blocks.len() == 0 { 11 | return Ok(()); 12 | } 13 | 14 | let mut reachable: BTreeSet = BTreeSet::new(); 15 | 16 | // Perform a depth-first search on the CFG to figure out reachable blocks. 17 | { 18 | let mut dfs_stack: Vec = vec! [ BlockId(0) ]; 19 | 20 | while let Some(blk_id) = dfs_stack.pop() { 21 | if reachable.contains(&blk_id) { 22 | continue; 23 | } 24 | 25 | reachable.insert(blk_id); 26 | 27 | let blk = &cfg.blocks[blk_id.0]; 28 | match *blk.br.as_ref().unwrap() { 29 | Branch::Jmp(t) => { 30 | dfs_stack.push(t); 31 | }, 32 | Branch::JmpEither(a, b) => { 33 | dfs_stack.push(a); 34 | dfs_stack.push(b); 35 | }, 36 | Branch::JmpTable(ref targets, otherwise) => { 37 | for t in targets { 38 | dfs_stack.push(*t); 39 | } 40 | dfs_stack.push(otherwise); 41 | }, 42 | Branch::Return => {} 43 | } 44 | } 45 | } 46 | 47 | // Maps old block ids to new ones. 48 | let mut block_id_mappings: BTreeMap = BTreeMap::new(); 49 | 50 | // Reachable basic blocks 51 | let mut new_basic_blocks = Vec::with_capacity(reachable.len()); 52 | 53 | { 54 | // Old basic blocks 55 | let mut old_basic_blocks = ::prelude::mem::replace(&mut cfg.blocks, Vec::new()); 56 | 57 | // reachable is a Set so blk_id will never duplicate. 58 | for (i, blk_id) in reachable.iter().enumerate() { 59 | block_id_mappings.insert(*blk_id, BlockId(i)); 60 | new_basic_blocks.push( 61 | ::prelude::mem::replace( 62 | &mut old_basic_blocks[blk_id.0], 63 | BasicBlock::new() 64 | ) 65 | ); 66 | } 67 | } 68 | 69 | for bb in &mut new_basic_blocks { 70 | let old_br = bb.br.take().unwrap(); 71 | bb.br = Some(match old_br { 72 | Branch::Jmp(id) => Branch::Jmp(*block_id_mappings.get(&id).unwrap()), 73 | Branch::JmpEither(a, b) => Branch::JmpEither( 74 | *block_id_mappings.get(&a).unwrap(), 75 | *block_id_mappings.get(&b).unwrap() 76 | ), 77 | Branch::JmpTable(targets, otherwise) => Branch::JmpTable( 78 | targets.into_iter().map(|t| *block_id_mappings.get(&t).unwrap()).collect(), 79 | *block_id_mappings.get(&otherwise).unwrap() 80 | ), 81 | Branch::Return => Branch::Return 82 | }); 83 | } 84 | 85 | cfg.blocks = new_basic_blocks; 86 | 87 | Ok(()) 88 | } 89 | } 90 | 91 | #[cfg(test)] 92 | mod tests { 93 | use super::*; 94 | use opcode::Opcode; 95 | 96 | #[test] 97 | fn test_remove_dead_basic_blocks() { 98 | let opcodes: Vec = vec! [ 99 | // bb 0 100 | Opcode::I32Const(100), // 0 101 | Opcode::Jmp(3), // 1 102 | // bb 1, never reached 103 | Opcode::I32Const(50), // 2 104 | // bb 2 (due to jmp) 105 | Opcode::I32Const(25), // 3 106 | Opcode::JmpIf(0), // 4 107 | // bb 3 108 | Opcode::Return // 5 109 | ]; 110 | 111 | let mut cfg = CFGraph::from_function(opcodes.as_slice()).unwrap(); 112 | cfg.validate().unwrap(); 113 | cfg.optimize(RemoveDeadBasicBlocks).unwrap(); 114 | cfg.validate().unwrap(); 115 | 116 | assert_eq!(cfg.blocks.len(), 3); 117 | assert_eq!(cfg.blocks[0].br, Some(Branch::Jmp(BlockId(1)))); 118 | assert_eq!(cfg.blocks[1].br, Some(Branch::JmpEither(BlockId(0), BlockId(2)))); 119 | assert_eq!(cfg.blocks[2].br, Some(Branch::Return)); 120 | 121 | eprintln!("{:?}", cfg); 122 | 123 | eprintln!("{:?}", cfg.gen_opcodes()); 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /src/platform/generic.rs: -------------------------------------------------------------------------------- 1 | pub unsafe trait MemoryManager { 2 | fn grow(&mut self, inc_len: usize); 3 | fn len(&self) -> usize; 4 | fn get_ref(&self) -> &[u8]; 5 | fn get_ref_mut(&mut self) -> &mut [u8]; 6 | fn hints(&self) -> MemCodegenHints; 7 | fn start_address(&self) -> *mut u8; 8 | } 9 | 10 | #[derive(Copy, Clone, Debug)] 11 | pub struct MemInitOptions { 12 | pub min: usize, 13 | pub max: usize 14 | } 15 | 16 | #[derive(Copy, Clone, Debug)] 17 | pub struct MemCodegenHints { 18 | pub needs_bounds_check: bool, 19 | pub address_mask: usize, 20 | pub indirect_len_ptr: *const usize, 21 | pub indirect_start_address_ptr: *const *mut u8, 22 | pub static_start_address: Option<*mut u8> 23 | } 24 | -------------------------------------------------------------------------------- /src/platform/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod generic; 2 | 3 | macro_rules! import_platform { 4 | ($arch:expr, $os:expr, $m:ident) => { 5 | #[cfg(all(target_arch = $arch, target_os = $os))] 6 | pub mod $m; 7 | #[cfg(all(target_arch = $arch, target_os = $os))] 8 | pub use self::$m as current; 9 | } 10 | } 11 | 12 | import_platform!("x86_64", "linux", x86_64_linux); 13 | 14 | pub mod other; 15 | #[cfg(not(all(target_arch = "x86_64", target_os = "linux")))] 16 | pub use self::other as current; 17 | -------------------------------------------------------------------------------- /src/platform/other.rs: -------------------------------------------------------------------------------- 1 | use std::ptr::null_mut; 2 | use std::cell::{Cell, RefCell}; 3 | use super::generic::*; 4 | 5 | pub struct NativeMemoryManager { 6 | min: usize, 7 | max: usize, 8 | mem: Vec, 9 | mem_start: *mut u8, 10 | mem_len: usize 11 | } 12 | 13 | impl NativeMemoryManager { 14 | pub fn new(opts: MemInitOptions) -> NativeMemoryManager { 15 | let mut mem: Vec = vec! [ 0; opts.min ]; 16 | let mem_start = &mut mem[0] as *mut u8; 17 | 18 | NativeMemoryManager { 19 | min: opts.min, 20 | max: opts.max, 21 | mem: mem, 22 | mem_start: mem_start, 23 | mem_len: opts.min 24 | } 25 | } 26 | 27 | pub fn protected_call T>(&mut self, f: F) -> T { 28 | f(self) 29 | } 30 | } 31 | 32 | unsafe impl MemoryManager for NativeMemoryManager { 33 | fn grow(&mut self, len_inc: usize) { 34 | if self.mem.len().checked_add(len_inc).unwrap() > self.max { 35 | panic!("Memory limit exceeded"); 36 | } 37 | self.mem.extend((0..len_inc).map(|_| 0)); 38 | 39 | let mem_start = &mut self.mem[0] as *mut u8; 40 | self.mem_start = mem_start; 41 | self.mem_len = self.mem.len(); 42 | } 43 | 44 | fn len(&self) -> usize { 45 | self.mem_len 46 | } 47 | 48 | fn get_ref(&self) -> &[u8] { 49 | &self.mem 50 | } 51 | 52 | fn get_ref_mut(&mut self) -> &mut [u8] { 53 | &mut self.mem 54 | } 55 | 56 | fn hints(&self) -> MemCodegenHints { 57 | MemCodegenHints { 58 | needs_bounds_check: true, 59 | address_mask: self.max.next_power_of_two() - 1, 60 | indirect_len_ptr: &self.mem_len, 61 | indirect_start_address_ptr: &self.mem_start, 62 | static_start_address: None 63 | } 64 | } 65 | 66 | fn start_address(&self) -> *mut u8 { 67 | self.mem_start 68 | } 69 | } 70 | 71 | 72 | #[cfg(test)] 73 | mod tests { 74 | use super::*; 75 | #[test] 76 | fn test_native_mm() { 77 | use std::panic::{catch_unwind, AssertUnwindSafe}; 78 | 79 | let mut mm = NativeMemoryManager::new(MemInitOptions { 80 | min: 100000, 81 | max: 3221225472 82 | }); 83 | 84 | let err = catch_unwind(AssertUnwindSafe(|| { 85 | let mem = mm.get_ref_mut(); 86 | 87 | mem[0] = 1; 88 | mem[100] = 2; 89 | mem[99999] = 3; 90 | })); 91 | assert!(err.is_ok()); 92 | 93 | let err = catch_unwind(AssertUnwindSafe(|| { 94 | let mem = mm.get_ref_mut(); 95 | mem[100000] = 3; 96 | })); 97 | assert!(err.is_err()); 98 | } 99 | 100 | #[test] 101 | fn test_native_mm_concurrent() { 102 | use std::panic::{catch_unwind, AssertUnwindSafe}; 103 | 104 | let mut handles = Vec::new(); 105 | 106 | for _ in 0..10000 { 107 | handles.push(::std::thread::spawn(|| { 108 | let mut mm = NativeMemoryManager::new(MemInitOptions { 109 | min: 100000, 110 | max: 3221225472 111 | }); 112 | let err = catch_unwind(AssertUnwindSafe(|| { 113 | let mem = mm.get_ref_mut(); 114 | mem[100000] = 42; 115 | })); 116 | assert!(err.is_err()); 117 | })); 118 | } 119 | 120 | for h in handles { 121 | h.join().unwrap(); 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/platform/x86_64_linux/mod.rs: -------------------------------------------------------------------------------- 1 | mod sigsegv; 2 | 3 | use libc; 4 | use std::ptr::null_mut; 5 | use std::cell::{Cell, RefCell}; 6 | use super::generic::*; 7 | 8 | extern "C" { 9 | fn __sigsetjmp(env: *mut u8, savesigs: i32) -> i32; 10 | fn __libc_siglongjmp(env: *mut u8, val: i32) -> !; 11 | } 12 | 13 | lazy_static! { 14 | pub static ref PAGE_SIZE: usize = { 15 | unsafe { 16 | //let mut _si: libc::siginfo_t = ::std::mem::uninitialized(); 17 | //_si.si_addr = 1; 18 | libc::sysconf(libc::_SC_PAGESIZE) as usize 19 | } 20 | }; 21 | 22 | static ref ENSURE_SIGSEGV_HANDLER: () = { 23 | // TODO: Pass control to rust stack overflow checker 24 | extern "C" fn handle_sigsegv(signo: i32, siginfo: &sigsegv::SigsegvInfo, _: *mut libc::c_void) { 25 | MEM_FAULT_JMP_BUF.with(|buf| { 26 | let jmpbuf_addr: *mut u8 = { 27 | let mut buf = buf.borrow_mut(); 28 | if buf.0 == false { 29 | ::std::process::abort(); 30 | } 31 | &mut buf.1[0] as *mut u8 32 | }; 33 | MEM_FAULT_ADDR.with(|addr| { 34 | addr.set(Some(siginfo.si_addr)); 35 | }); 36 | unsafe { 37 | __libc_siglongjmp(jmpbuf_addr, 1); 38 | } 39 | }); 40 | } 41 | 42 | unsafe { 43 | let mut sa: libc::sigaction = ::std::mem::zeroed(); 44 | sa.sa_flags = libc::SA_SIGINFO; 45 | libc::sigemptyset(&mut sa.sa_mask); 46 | sa.sa_sigaction = handle_sigsegv as usize; 47 | 48 | libc::sigaction(libc::SIGSEGV, &sa, null_mut()); 49 | } 50 | }; 51 | } 52 | 53 | thread_local! { 54 | static MEM_FAULT_JMP_BUF: RefCell<(bool, [u8; 512])> = RefCell::new((false, [0; 512])); 55 | static MEM_FAULT_ADDR: Cell> = Cell::new(None); 56 | } 57 | 58 | pub struct NativeMemoryManager { 59 | min: usize, 60 | max: usize, 61 | mapped_len: usize, 62 | mem_ptr: *mut u8, 63 | mem_len: usize 64 | } 65 | 66 | impl NativeMemoryManager { 67 | pub fn new(opts: MemInitOptions) -> NativeMemoryManager { 68 | let mapped_len = opts.max.next_power_of_two(); 69 | 70 | let mem = unsafe { 71 | libc::mmap( 72 | null_mut(), 73 | mapped_len, 74 | libc::PROT_NONE, 75 | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, 76 | -1, 77 | 0 78 | ) as *mut u8 79 | }; 80 | if mem.is_null() { 81 | panic!("mmap failed"); 82 | } 83 | 84 | let mut mm = NativeMemoryManager { 85 | min: opts.min, 86 | max: opts.max, 87 | mapped_len: mapped_len, 88 | mem_ptr: mem, 89 | mem_len: 0 90 | }; 91 | 92 | mm.set_len(opts.min); 93 | mm 94 | } 95 | 96 | fn set_len(&mut self, len: usize) { 97 | if len > self.max { 98 | panic!("len > max"); 99 | } 100 | 101 | let rounded_len = round_up_to_page_size(len); 102 | 103 | let ret = unsafe { 104 | libc::mprotect( 105 | self.mem_ptr as *mut libc::c_void, 106 | rounded_len, 107 | libc::PROT_READ | libc::PROT_WRITE 108 | ) 109 | }; 110 | if ret != 0 { 111 | panic!("mprotect failed"); 112 | } 113 | 114 | self.mem_len = len; 115 | } 116 | 117 | pub fn protected_call T>(&mut self, f: F) -> T { 118 | struct JmpBufGuard; 119 | 120 | impl Drop for JmpBufGuard { 121 | fn drop(&mut self) { 122 | MEM_FAULT_JMP_BUF.with(|buf| { 123 | let mut buf = buf.borrow_mut(); 124 | assert_eq!(buf.0, true); 125 | buf.0 = false; 126 | }); 127 | } 128 | } 129 | 130 | let _ = *ENSURE_SIGSEGV_HANDLER; 131 | 132 | MEM_FAULT_ADDR.with(|_| {}); // Just initialize it to prevent initialization in signal handler. 133 | 134 | MEM_FAULT_JMP_BUF.with(move |buf| { 135 | let jmpbuf_addr: *mut u8 = { 136 | let mut buf = buf.borrow_mut(); 137 | if buf.0 == true { 138 | panic!("protected_call is not re-entrant"); 139 | } 140 | buf.0 = true; 141 | 142 | &mut buf.1[0] as *mut u8 143 | }; 144 | 145 | let _guard = JmpBufGuard; 146 | 147 | // Set the jmp buf. 148 | // The call to the target function should be immediately made after this. 149 | let sig = unsafe { 150 | __sigsetjmp(jmpbuf_addr, 1) 151 | }; 152 | 153 | if sig == 0 { 154 | // The normal execution flow. 155 | f(self) 156 | } else { 157 | // sigsegv? 158 | let fault_addr = MEM_FAULT_ADDR.with(|addr| { 159 | addr.get().unwrap_or_else(|| { 160 | eprintln!("BUG: longjmp caught without fault address set"); 161 | ::std::process::abort(); 162 | }) 163 | }) as usize; 164 | let expected_start = self.mem_ptr as usize; 165 | let expected_end = expected_start + self.mapped_len; 166 | 167 | if fault_addr >= expected_start && fault_addr < expected_end { 168 | panic!("Memory access out of bounds"); 169 | } else { 170 | eprintln!( 171 | "Fault out of protected memory: {:x}; Expecting {:x}-{:x}", 172 | fault_addr, 173 | expected_start, 174 | expected_end 175 | ); 176 | ::std::process::abort(); 177 | } 178 | } 179 | }) 180 | } 181 | } 182 | 183 | unsafe impl MemoryManager for NativeMemoryManager { 184 | fn grow(&mut self, inc_len: usize) { 185 | let new_len = self.mem_len + inc_len; 186 | self.set_len(new_len); 187 | } 188 | 189 | fn len(&self) -> usize { 190 | self.mem_len 191 | } 192 | 193 | fn get_ref(&self) -> &[u8] { 194 | unsafe { ::std::slice::from_raw_parts( 195 | self.mem_ptr, 196 | self.mem_len 197 | ) } 198 | } 199 | 200 | fn get_ref_mut(&mut self) -> &mut [u8] { 201 | unsafe { ::std::slice::from_raw_parts_mut( 202 | self.mem_ptr, 203 | self.mem_len 204 | ) } 205 | } 206 | 207 | fn hints(&self) -> MemCodegenHints { 208 | MemCodegenHints { 209 | needs_bounds_check: false, 210 | address_mask: self.mapped_len - 1, 211 | indirect_len_ptr: &self.mem_len, 212 | indirect_start_address_ptr: &self.mem_ptr, 213 | static_start_address: Some(self.mem_ptr) 214 | } 215 | } 216 | 217 | fn start_address(&self) -> *mut u8 { 218 | self.mem_ptr 219 | } 220 | } 221 | 222 | impl Drop for NativeMemoryManager { 223 | fn drop(&mut self) { 224 | let ret = unsafe { 225 | libc::munmap( 226 | self.mem_ptr as *mut libc::c_void, 227 | self.mapped_len 228 | ) 229 | }; 230 | if ret != 0 { 231 | panic!("munmap failed"); 232 | } 233 | } 234 | } 235 | 236 | #[inline] 237 | fn round_up_to_page_size(size: usize) -> usize { 238 | let page_size: usize = *PAGE_SIZE; 239 | 240 | let rem = size % page_size; 241 | if rem > 0 { 242 | size - rem + page_size 243 | } else { 244 | size 245 | } 246 | } 247 | 248 | #[cfg(test)] 249 | mod tests { 250 | use super::*; 251 | #[test] 252 | fn mem_size_should_be_rounded_up() { 253 | let page_size: usize = *PAGE_SIZE; 254 | assert_eq!(round_up_to_page_size(123), page_size); 255 | assert_eq!(round_up_to_page_size(page_size), page_size); 256 | assert_eq!(round_up_to_page_size(page_size + 1), page_size * 2); 257 | assert_eq!(round_up_to_page_size(page_size * 2 - 1), page_size * 2); 258 | assert_eq!(round_up_to_page_size(page_size * 2), page_size * 2); 259 | assert_eq!(round_up_to_page_size(page_size * 2 + 1), page_size * 3); 260 | } 261 | 262 | #[test] 263 | fn test_native_mm() { 264 | use std::panic::{catch_unwind, AssertUnwindSafe}; 265 | 266 | let mut mm = NativeMemoryManager::new(MemInitOptions { 267 | min: 100000, 268 | max: 3221225472 269 | }); 270 | 271 | let err = catch_unwind(AssertUnwindSafe(|| { 272 | let mem = mm.get_ref_mut(); 273 | 274 | mem[0] = 1; 275 | mem[100] = 2; 276 | mem[99999] = 3; 277 | })); 278 | assert!(err.is_ok()); 279 | 280 | let err = catch_unwind(AssertUnwindSafe(|| { 281 | let mem = mm.get_ref_mut(); 282 | mem[100000] = 3; 283 | })); 284 | assert!(err.is_err()); 285 | 286 | let err = catch_unwind(AssertUnwindSafe(|| { 287 | mm.protected_call(|mm| { 288 | unsafe { 289 | *mm.start_address().offset(1048576) = 42; 290 | } 291 | }); 292 | })); 293 | assert!(err.is_err()); 294 | } 295 | 296 | #[test] 297 | fn test_native_mm_concurrent() { 298 | use std::panic::{catch_unwind, AssertUnwindSafe}; 299 | 300 | let mut handles = Vec::new(); 301 | 302 | for _ in 0..10000 { 303 | handles.push(::std::thread::spawn(|| { 304 | let mut mm = NativeMemoryManager::new(MemInitOptions { 305 | min: 100000, 306 | max: 3221225472 307 | }); 308 | let err = catch_unwind(AssertUnwindSafe(|| { 309 | mm.protected_call(|mm| { 310 | unsafe { 311 | *mm.start_address().offset(1048576) = 42; 312 | } 313 | }); 314 | })); 315 | assert!(err.is_err()); 316 | })); 317 | } 318 | 319 | for h in handles { 320 | h.join().unwrap(); 321 | } 322 | } 323 | } 324 | -------------------------------------------------------------------------------- /src/platform/x86_64_linux/sigsegv.rs: -------------------------------------------------------------------------------- 1 | use libc; 2 | 3 | #[repr(C)] 4 | #[derive(Copy, Clone, Debug)] 5 | #[allow(dead_code)] 6 | pub struct Sigval { 7 | pub sival_ptr: *mut libc::c_void 8 | } 9 | 10 | #[repr(C)] 11 | #[derive(Copy, Clone, Debug)] 12 | #[allow(dead_code)] 13 | pub struct SigsegvInfo { 14 | pub si_signo: libc::c_int, /* Signal number */ 15 | pub si_errno: libc::c_int, /* An errno value */ 16 | pub si_code: libc::c_int, /* Signal code */ 17 | pub __pad0: libc::c_int, 18 | pub si_addr: *mut libc::c_void 19 | } 20 | -------------------------------------------------------------------------------- /src/prelude.rs: -------------------------------------------------------------------------------- 1 | pub use std::*; 2 | pub use std::boxed::Box; 3 | pub use std::vec::Vec; 4 | pub use std::string::String; 5 | pub use std::collections::{BTreeMap, BTreeSet, VecDeque}; 6 | pub use std::rc::Rc; -------------------------------------------------------------------------------- /src/prelude_no_std.rs: -------------------------------------------------------------------------------- 1 | pub use core::*; 2 | pub use alloc::{Vec, String, BTreeMap}; 3 | pub use alloc::boxed::Box; 4 | pub use alloc::btree_set::BTreeSet; 5 | -------------------------------------------------------------------------------- /src/resolver.rs: -------------------------------------------------------------------------------- 1 | use executor::{NativeResolver, NativeEntry, ExecuteError}; 2 | use value::Value; 3 | 4 | pub struct NullResolver { 5 | 6 | } 7 | 8 | impl NativeResolver for NullResolver { 9 | fn resolve(&self, _module: &str, _field: &str) -> Option { 10 | None 11 | } 12 | } 13 | 14 | impl NullResolver { 15 | pub fn new() -> NullResolver { 16 | NullResolver {} 17 | } 18 | } 19 | 20 | pub struct EmscriptenResolver { 21 | inner: I 22 | } 23 | 24 | impl EmscriptenResolver { 25 | pub fn new(inner: I) -> EmscriptenResolver { 26 | EmscriptenResolver { 27 | inner: inner 28 | } 29 | } 30 | } 31 | 32 | impl NativeResolver for EmscriptenResolver { 33 | fn resolve(&self, module: &str, field: &str) -> Option { 34 | if module != "env" { 35 | return self.inner.resolve(module, field); 36 | } 37 | 38 | match field { 39 | "abortStackOverflow" => { 40 | Some(Box::new(|_, _| { 41 | Err(ExecuteError::Custom("Emscripten stack overflow".into())) 42 | })) 43 | }, 44 | "getTotalMemory" => { 45 | Some(Box::new(|rt, _| { 46 | Ok(Some(Value::I32(rt.get_memory().len() as i32))) 47 | })) 48 | }, 49 | _ => self.inner.resolve(module, field) 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/trans/config.rs: -------------------------------------------------------------------------------- 1 | #[derive(Serialize, Deserialize, Debug, Clone, Default)] 2 | pub struct ModuleConfig { 3 | pub emscripten: Option 4 | } 5 | 6 | impl ModuleConfig { 7 | pub fn with_emscripten(mut self) -> Self { 8 | self.emscripten = Some(true); 9 | self 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/trans/debug_print.rs: -------------------------------------------------------------------------------- 1 | /*#[cfg(feature = "debug_print")] 2 | //#[macro_export] 3 | macro_rules! dprintln { 4 | ($fmt:expr) => (eprintln!($fmt)); 5 | ($fmt:expr, $($arg:tt)*) => (eprintln!($fmt, $($arg)*)); 6 | } 7 | 8 | #[cfg(not(feature = "debug_print"))] 9 | //#[macro_export] 10 | */ 11 | macro_rules! dprintln { 12 | ($fmt:expr) => (); 13 | ($fmt:expr, $($arg:tt)*) => (); 14 | } 15 | -------------------------------------------------------------------------------- /src/trans/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod debug_print; 3 | 4 | pub mod config; 5 | mod optrans; 6 | 7 | pub use self::config::ModuleConfig; 8 | 9 | use std::collections::BTreeMap; 10 | 11 | use parity_wasm; 12 | use parity_wasm::elements; 13 | use executor::RuntimeConfig; 14 | use resolver::NullResolver; 15 | 16 | pub fn translate_value_type(v: &elements::ValueType) -> ::module::ValType { 17 | match *v { 18 | elements::ValueType::I32 => ::module::ValType::I32, 19 | elements::ValueType::I64 => ::module::ValType::I64, 20 | elements::ValueType::F32 => ::module::ValType::F32, 21 | elements::ValueType::F64 => ::module::ValType::F64 22 | } 23 | } 24 | 25 | pub fn eval_init_expr( 26 | expr: &elements::InitExpr, 27 | globals: &mut Vec<::module::Global> 28 | ) -> ::value::Value { 29 | let mut code = optrans::translate_opcodes(expr.code()); 30 | code.push(::opcode::Opcode::Return); 31 | 32 | let module = ::module::Module { 33 | types: vec! [ 34 | ::module::Type::Func(Vec::new(), vec! [ ::module::ValType::I32 ]) 35 | ], 36 | functions: vec! [ 37 | ::module::Function { 38 | name: None, 39 | typeidx: 0, 40 | locals: Vec::new(), 41 | body: ::module::FunctionBody { 42 | opcodes: code 43 | } 44 | } 45 | ], 46 | data_segments: vec! [], 47 | exports: BTreeMap::new(), 48 | tables: Vec::new(), 49 | globals: globals.clone(), 50 | natives: Vec::new(), 51 | start_function: None 52 | }; 53 | let val = module.execute(RuntimeConfig { 54 | mem_default_size_pages: 1, 55 | mem_max_size_pages: Some(1), 56 | resolver: Box::new(NullResolver::new()) 57 | }, 0).unwrap().unwrap(); 58 | val 59 | } 60 | 61 | 62 | 63 | pub fn translate_module_raw( 64 | code: &[u8], 65 | config: ModuleConfig 66 | ) -> ::module::Module { 67 | let mut module: elements::Module = parity_wasm::deserialize_buffer(code).unwrap(); 68 | module = match module.parse_names() { 69 | Ok(v) => v, 70 | Err((_, m)) => { 71 | dprintln!("Warning: Failed to parse names"); 72 | m 73 | } 74 | }; 75 | 76 | let types: Vec<::module::Type> = if let Some(s) = module.type_section() { 77 | s.types().iter().map(|t| { 78 | let elements::Type::Function(ref ft) = *t; 79 | ::module::Type::Func( 80 | ft.params().iter().map(|v| translate_value_type(v)).collect(), 81 | if let Some(ret_type) = ft.return_type() { 82 | vec! [ translate_value_type(&ret_type) ] 83 | } else { 84 | vec! [] 85 | } 86 | ) 87 | }).collect() 88 | } else { 89 | Vec::new() 90 | }; 91 | 92 | let mut export_map: BTreeMap = BTreeMap::new(); 93 | if let Some(exports) = module.export_section() { 94 | for entry in exports.entries() { 95 | use self::elements::Internal; 96 | dprintln!("Export: {} -> {:?}", entry.field(), entry.internal()); 97 | 98 | let field: &str = entry.field(); 99 | let internal: &Internal = entry.internal(); 100 | 101 | match *internal { 102 | Internal::Function(id) => { 103 | export_map.insert( 104 | field.to_string(), 105 | ::module::Export::Function(id as u32) 106 | ); 107 | }, 108 | _ => { 109 | dprintln!("Warning: Internal type not supported ({:?})", internal); 110 | } 111 | } 112 | } 113 | } else { 114 | dprintln!("Warning: Export section not found"); 115 | } 116 | 117 | let mut functions: Vec<::module::Function> = Vec::new(); 118 | let mut natives: Vec<::module::Native> = Vec::new(); 119 | let mut globals: Vec<::module::Global> = Vec::new(); 120 | let mut tables: Vec<::module::Table> = Vec::new(); 121 | 122 | module.import_section().and_then(|isec| { 123 | for entry in isec.entries() { 124 | use self::elements::External; 125 | match *entry.external() { 126 | External::Function(typeidx) => { 127 | let typeidx = typeidx as usize; 128 | 129 | use ::opcode::Opcode; 130 | use ::module::Native; 131 | 132 | dprintln!("Importing function: {:?} type: {:?}", entry, types[typeidx]); 133 | 134 | let patched = if config.emscripten.unwrap_or(false) && entry.module() == "env" { 135 | let f: Option<::module::Function> = try_patch_emscripten_func_import( 136 | entry.field(), 137 | typeidx, 138 | &types[typeidx], 139 | &export_map 140 | ); 141 | if let Some(f) = f { 142 | functions.push(f); 143 | dprintln!("Patch applied"); 144 | true 145 | } else { 146 | false 147 | } 148 | } else { 149 | false 150 | }; 151 | 152 | if !patched { 153 | let native_id = natives.len(); 154 | natives.push(Native { 155 | module: entry.module().to_string(), 156 | field: entry.field().to_string(), 157 | typeidx: typeidx as u32 158 | }); 159 | 160 | let mut opcodes: Vec = vec! []; 161 | let ::module::Type::Func(ref ty, _) = types[typeidx]; 162 | 163 | for i in 0..ty.len() { 164 | opcodes.push(Opcode::GetLocal(i as u32)); 165 | } 166 | 167 | opcodes.push(Opcode::NativeInvoke(native_id as u32)); 168 | opcodes.push(Opcode::Return); 169 | 170 | functions.push(::module::Function { 171 | name: None, 172 | typeidx: typeidx as u32, 173 | locals: Vec::new(), 174 | body: ::module::FunctionBody { 175 | opcodes: opcodes 176 | } 177 | }); 178 | } 179 | }, 180 | External::Global(ref gt) => { 181 | let patched = if config.emscripten.unwrap_or(false) { 182 | let v = try_patch_emscripten_global(entry.field()); 183 | if let Some(v) = v { 184 | dprintln!("Global {:?} patched as an Emscripten import", entry); 185 | globals.push(::module::Global { 186 | value: v 187 | }); 188 | true 189 | } else { 190 | false 191 | } 192 | } else { 193 | false 194 | }; 195 | if !patched { 196 | dprintln!("Warning: Generating undef for Global import: {:?}", entry); 197 | globals.push(::module::Global { 198 | value: ::value::Value::default() 199 | }); 200 | } 201 | }, 202 | External::Table(ref tt) => { 203 | dprintln!("Warning: Generating undef for Table import: {:?}", entry); 204 | let limits: &elements::ResizableLimits = tt.limits(); 205 | let (min, max) = (limits.initial(), limits.maximum()); 206 | 207 | // Hard limit. 208 | if min > 1048576 { 209 | panic!("Hard limit for table size (min) exceeded"); 210 | } 211 | 212 | let elements: Vec> = vec! [ None; min as usize ]; 213 | tables.push(::module::Table { 214 | min: min, 215 | max: max, 216 | elements: elements 217 | }); 218 | }, 219 | _ => { 220 | dprintln!("Warning: Import ignored: {:?}", entry); 221 | continue; 222 | } 223 | } 224 | } 225 | Some(()) 226 | }); 227 | 228 | { 229 | let to_extend = module.global_section().and_then(|gs| { 230 | Some(gs.entries().iter().map(|entry| { 231 | dprintln!("Global {:?} -> {:?}", entry, entry.init_expr()); 232 | ::module::Global { 233 | value: eval_init_expr( 234 | entry.init_expr(), 235 | &mut globals 236 | ) 237 | } 238 | }).collect()) 239 | }).or_else(|| Some(Vec::new())).unwrap(); 240 | globals.extend(to_extend.into_iter()); 241 | } 242 | 243 | tables.extend( 244 | module.table_section().and_then(|ts| { 245 | Some(ts.entries().iter().map(|entry| { 246 | let limits: &elements::ResizableLimits = entry.limits(); 247 | let (min, max) = (limits.initial(), limits.maximum()); 248 | 249 | // Hard limit. 250 | if min > 1048576 { 251 | panic!("Hard limit for table size (min) exceeded"); 252 | } 253 | 254 | let elements: Vec> = vec! [ None; min as usize ]; 255 | ::module::Table { 256 | min: min, 257 | max: max, 258 | elements: elements 259 | } 260 | }).collect()) 261 | }).or_else(|| Some(Vec::new())).unwrap().into_iter() 262 | ); 263 | 264 | let code_section: &elements::CodeSection = module.code_section().unwrap(); 265 | let function_section: &elements::FunctionSection = module.function_section().unwrap(); 266 | 267 | let bodies: &[elements::FuncBody] = code_section.bodies(); 268 | let fdefs: &[elements::Func] = function_section.entries(); 269 | 270 | assert_eq!(bodies.len(), fdefs.len()); 271 | 272 | functions.extend((0..bodies.len()).map(|i| { 273 | //dprintln!("Function {}: {:?} {:?}", i, fdefs[i], bodies[i]); 274 | let typeidx = fdefs[i].type_ref() as usize; 275 | let mut locals: Vec<::module::ValType> = Vec::new(); 276 | for lc in bodies[i].locals() { 277 | let t = translate_value_type(&lc.value_type()); 278 | for _ in 0..lc.count() { 279 | locals.push(t); 280 | } 281 | } 282 | let mut opcodes = optrans::translate_opcodes(bodies[i].code().elements()); 283 | opcodes.push(::opcode::Opcode::Return); 284 | 285 | ::module::Function { 286 | name: None, 287 | typeidx: typeidx as u32, 288 | locals: locals, 289 | body: ::module::FunctionBody { 290 | opcodes: opcodes 291 | } 292 | } 293 | })); 294 | 295 | let start_func_id = module.start_section(); 296 | 297 | for sec in module.sections() { 298 | let ns = if let elements::Section::Name(ref ns) = *sec { 299 | ns 300 | } else { 301 | continue; 302 | }; 303 | if let elements::NameSection::Function(ref fns) = *ns { 304 | dprintln!("Found function name section"); 305 | for (i, name) in fns.names() { 306 | functions[i as usize].name = Some(name.to_string()); 307 | } 308 | break; 309 | } 310 | } 311 | 312 | let mut data_segs: Vec<::module::DataSegment> = Vec::new(); 313 | if let Some(ds) = module.data_section() { 314 | for seg in ds.entries() { 315 | let offset = eval_init_expr(seg.offset(), &mut globals).get_i32().unwrap() as u32; 316 | //dprintln!("Offset resolved: {} {:?}", offset, seg.value()); 317 | data_segs.push(::module::DataSegment { 318 | offset: offset, 319 | data: seg.value().to_vec() 320 | }); 321 | } 322 | } else { 323 | dprintln!("Warning: Data section not found"); 324 | } 325 | 326 | if config.emscripten.unwrap_or(false) { 327 | dprintln!("Writing DYNAMICTOP_PTR"); 328 | let mem_end = unsafe { 329 | ::std::mem::transmute::(8323072) 330 | }; 331 | data_segs.push(::module::DataSegment { 332 | offset: 16, 333 | data: mem_end.to_vec() 334 | }); 335 | } 336 | 337 | if let Some(elems) = module.elements_section() { 338 | for entry in elems.entries() { 339 | let offset = eval_init_expr(entry.offset(), &mut globals).get_i32().unwrap() as u32 as usize; 340 | let members = entry.members(); 341 | let end = offset + members.len(); 342 | let tt = &mut tables[entry.index() as usize]; 343 | 344 | if end > tt.elements.len() { 345 | if let Some(max) = tt.max { 346 | if end > max as usize { 347 | panic!("Max table length exceeded"); 348 | } 349 | } 350 | // Hard limit. 351 | if end > 1048576 { 352 | panic!("Hard limit for table size (max) exceeded"); 353 | } 354 | while end > tt.elements.len() { 355 | tt.elements.push(None); 356 | } 357 | } 358 | 359 | for i in 0..members.len() { 360 | tt.elements[offset + i] = Some(members[i]); 361 | } 362 | 363 | dprintln!("Elements written to table: {}, {}", offset, members.len()); 364 | } 365 | dprintln!("{} elements added to table", elems.entries().len()); 366 | } else { 367 | dprintln!("Warning: Elements section not found"); 368 | } 369 | 370 | dprintln!("Start: {:?}", start_func_id); 371 | 372 | let target_module = ::module::Module { 373 | types: types, 374 | functions: functions, 375 | data_segments: data_segs, 376 | exports: export_map, 377 | tables: tables, 378 | globals: globals, 379 | natives: natives, 380 | start_function: start_func_id 381 | }; 382 | 383 | target_module 384 | } 385 | 386 | pub fn translate_module( 387 | code: &[u8], 388 | config: ModuleConfig 389 | ) -> Vec { 390 | let serialized = translate_module_raw(code, config) 391 | .std_serialize().unwrap(); 392 | serialized 393 | } 394 | 395 | fn try_patch_emscripten_global(field_name: &str) -> Option<::value::Value> { 396 | use ::value::Value; 397 | 398 | match field_name { 399 | "memoryBase" => Some(Value::I32(0)), 400 | "tableBase" => Some(Value::I32(0)), 401 | "DYNAMICTOP_PTR" => Some(Value::I32(16)), // needs mem init 402 | "tempDoublePtr" => Some(Value::I32(64)), 403 | "STACK_BASE" => Some(Value::I32(4096)), 404 | "STACKTOP" => Some(Value::I32(4096)), 405 | "STACK_MAX" => Some(Value::I32(4096 + 1048576)), 406 | _ => None 407 | } 408 | } 409 | 410 | fn try_patch_emscripten_func_import( 411 | field_name: &str, 412 | typeidx: usize, 413 | ty: &::module::Type, 414 | export_map: &BTreeMap 415 | ) -> Option<::module::Function> { 416 | use ::module::{Function, FunctionBody}; 417 | use ::opcode::Opcode; 418 | 419 | if field_name.starts_with("invoke_") { 420 | let parts: Vec<&str> = field_name.split("_").collect(); 421 | if parts.len() == 2 { 422 | return Some( 423 | gen_invoke_n_to_dyncall_n( 424 | parts[1], 425 | typeidx, 426 | ty, 427 | export_map 428 | ) 429 | ); 430 | } 431 | } 432 | 433 | match field_name { 434 | "_emscripten_memcpy_big" => Some( 435 | Function { 436 | name: Some("_emscripten_memcpy_big".into()), 437 | typeidx: typeidx as u32, 438 | locals: Vec::new(), 439 | body: FunctionBody { 440 | opcodes: vec! [ 441 | Opcode::GetLocal(0), // dest 442 | Opcode::GetLocal(1), // src 443 | Opcode::GetLocal(2), // n_bytes 444 | Opcode::Memcpy, 445 | Opcode::GetLocal(0), 446 | Opcode::Return 447 | ] 448 | } 449 | } 450 | ), 451 | "enlargeMemory" => Some( 452 | Function { 453 | name: Some("enlargeMemory".into()), 454 | typeidx: typeidx as u32, 455 | locals: Vec::new(), 456 | body: FunctionBody { 457 | opcodes: vec! [ 458 | Opcode::I32Const(1), 459 | Opcode::GrowMemory, 460 | Opcode::Drop, 461 | Opcode::I32Const(1), 462 | Opcode::Return 463 | ] 464 | } 465 | } 466 | ), 467 | _ => None 468 | } 469 | } 470 | 471 | fn gen_invoke_n_to_dyncall_n( 472 | suffix: &str, 473 | typeidx: usize, 474 | ty: &::module::Type, 475 | export_map: &BTreeMap 476 | ) -> ::module::Function { 477 | use ::module::{Function, FunctionBody, Type, Export}; 478 | use ::opcode::Opcode; 479 | 480 | let invoke_name = format!("invoke_{}", suffix); 481 | let dyncall_name = format!("dynCall_{}", suffix); 482 | 483 | let Export::Function(fn_idx) = *export_map.get(dyncall_name.as_str()).unwrap(); 484 | 485 | let mut opcodes: Vec = Vec::new(); 486 | 487 | let Type::Func(ref ft_args, ref ft_ret) = *ty; 488 | 489 | for i in 0..ft_args.len() { 490 | opcodes.push(Opcode::GetLocal(i as u32)); 491 | } 492 | opcodes.push(Opcode::Call(fn_idx)); 493 | opcodes.push(Opcode::Return); 494 | 495 | Function { 496 | name: Some(invoke_name.clone()), 497 | typeidx: typeidx as u32, 498 | locals: Vec::new(), 499 | body: FunctionBody { 500 | opcodes: opcodes 501 | } 502 | } 503 | } 504 | -------------------------------------------------------------------------------- /src/trans/optrans.rs: -------------------------------------------------------------------------------- 1 | use opcode::Memarg; 2 | use parity_wasm::elements; 3 | 4 | struct Continuation { 5 | opcode_index: usize, 6 | brtable_index: Option 7 | } 8 | 9 | impl Continuation { 10 | fn with_opcode_index(index: usize) -> Continuation { 11 | Continuation { 12 | opcode_index: index, 13 | brtable_index: None 14 | } 15 | } 16 | 17 | fn brtable(index: usize, brt_index: usize) -> Continuation { 18 | Continuation { 19 | opcode_index: index, 20 | brtable_index: Some(brt_index) 21 | } 22 | } 23 | 24 | fn write(&self, target: usize, opcodes: &mut [::opcode::Opcode]) { 25 | use ::opcode::Opcode; 26 | 27 | let op_index = self.opcode_index; 28 | 29 | let new_op = match ::std::mem::replace( 30 | &mut opcodes[op_index], 31 | Opcode::Unreachable 32 | ) { 33 | Opcode::Jmp(_) => Opcode::Jmp(target as u32), 34 | Opcode::JmpIf(_) => Opcode::JmpIf(target as u32), 35 | Opcode::JmpTable(mut table, otherwise) => { 36 | let table_index = self.brtable_index.unwrap(); 37 | if table_index < table.len() { 38 | table[table_index] = target as u32; 39 | Opcode::JmpTable(table, otherwise) 40 | } else if table_index == table.len() { 41 | Opcode::JmpTable(table, target as u32) 42 | } else { 43 | panic!("Table index out of bound"); 44 | } 45 | }, 46 | _ => panic!("Expecting Jmp*") 47 | }; 48 | opcodes[op_index] = new_op; 49 | } 50 | } 51 | 52 | #[derive(Copy, Clone, Debug, Eq, PartialEq)] 53 | enum LabelType { 54 | Block, 55 | Loop(usize), // begin 56 | If(usize), // branch-if-false instr 57 | Else 58 | } 59 | 60 | struct Label { 61 | continuations: Vec, 62 | ty: LabelType 63 | } 64 | 65 | impl Label { 66 | fn new(ty: LabelType) -> Label { 67 | Label { 68 | continuations: Vec::new(), 69 | ty: ty 70 | } 71 | } 72 | 73 | fn terminate(&self, opcodes: &mut [::opcode::Opcode]) { 74 | let target = match self.ty { 75 | LabelType::Block | LabelType::If(_) | LabelType::Else => opcodes.len(), 76 | LabelType::Loop(begin) => begin 77 | }; 78 | for cont in &self.continuations { 79 | cont.write(target, opcodes); 80 | } 81 | } 82 | } 83 | 84 | pub fn translate_opcodes(ops: &[elements::Opcode]) -> Vec<::opcode::Opcode> { 85 | use self::elements::Opcode as PwOp; 86 | use ::opcode::Opcode as WcOp; 87 | 88 | let mut result: Vec<::opcode::Opcode> = Vec::new(); 89 | let mut labels: Vec