├── .gitignore ├── .gitmodules ├── .travis.yml ├── Cargo.toml ├── README.md ├── ci ├── run.sh └── tools.sh ├── mimalloc-sys-test ├── Cargo.lock ├── Cargo.toml ├── build.rs └── test │ └── main.rs ├── mimalloc-sys ├── Cargo.lock ├── Cargo.toml ├── README.md ├── build.rs ├── rust_impl │ ├── Cargo.toml │ ├── citrus │ │ ├── alloc-aligned.rs │ │ ├── alloc-override-osx.rs │ │ ├── alloc-override-win.rs │ │ ├── alloc-override.rs │ │ ├── alloc.rs │ │ ├── heap.rs │ │ ├── init.rs │ │ ├── options.rs │ │ ├── os.rs │ │ ├── page-queue.rs │ │ ├── page.rs │ │ ├── segment.rs │ │ ├── static.rs │ │ └── stats.rs │ └── src │ │ ├── alloc.rs │ │ ├── alloc_aligned.rs │ │ ├── alloc_override.rs │ │ ├── alloc_override_osx.rs │ │ ├── alloc_override_win.rs │ │ ├── heap.rs │ │ ├── init.rs │ │ ├── lib.rs │ │ ├── options.rs │ │ ├── os.rs │ │ ├── page.rs │ │ ├── page_queue.rs │ │ ├── segment.rs │ │ ├── static_.rs │ │ └── stats.rs └── src │ └── lib.rs ├── rustfmt.toml ├── src └── lib.rs └── tests └── smoke.rs /.gitignore: -------------------------------------------------------------------------------- 1 | ################# 2 | ## Rust specific 3 | ################# 4 | 5 | **/target/ 6 | Cargo.lock 7 | 8 | ############## 9 | ## C Specific 10 | ############## 11 | 12 | # Prerequisites 13 | *.d 14 | 15 | # Object files 16 | *.o 17 | *.ko 18 | *.obj 19 | *.elf 20 | 21 | # Linker output 22 | *.ilk 23 | *.map 24 | *.exp 25 | 26 | # Precompiled Headers 27 | *.gch 28 | *.pch 29 | 30 | # Libraries 31 | *.lib 32 | *.a 33 | *.la 34 | *.lo 35 | 36 | # Shared objects (inc. Windows DLLs) 37 | *.dll 38 | *.so 39 | *.so.* 40 | *.dylib 41 | 42 | # Executables 43 | *.exe 44 | *.out 45 | *.app 46 | *.i*86 47 | *.x86_64 48 | *.hex 49 | 50 | # Debug files 51 | *.dSYM/ 52 | *.su 53 | *.idb 54 | *.pdb 55 | 56 | # Kernel Module Compile Results 57 | *.mod* 58 | *.cmd 59 | .tmp_versions/ 60 | modules.order 61 | Module.symvers 62 | Mkfile.old 63 | dkms.conf 64 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "mimalloc-sys/mimalloc"] 2 | path = mimalloc-sys/mimalloc 3 | url = https://github.com/rusch95/mimalloc 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: nightly 3 | services: docker 4 | 5 | matrix: 6 | include: 7 | - name: "Tools" 8 | script: sh ci/tools.sh 9 | - name: "x86_64-unknown-linux-gnu" 10 | env: TARGET=x86_64-unknown-linux-gnu 11 | - name: "x86_64-apple-darwin" 12 | env: TARGET=x86_64-apple-darwin 13 | os: osx 14 | osx_image: xcode10 15 | - name: "x86_64-pc-windows-msvc" 16 | env: TARGET=x86_64-pc-windows-msvc 17 | os: windows 18 | 19 | script: sh ci/run.sh 20 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mimallocator" 3 | version = "0.1.0" 4 | authors = ["gnzlbg "] 5 | edition = "2018" 6 | license = "MIT/Apache-2.0" 7 | keywords = ["allocator", "mimalloc"] 8 | categories = ["memory-management", "api-bindings"] 9 | repository = "https://github.com/gnzlbg/mimallocator" 10 | homepage = "https://github.com/gnzlbg/mimallocator" 11 | documentation = "https://docs.rs/mimallocator" 12 | description = """ 13 | A Rust allocator backed by mimalloc 14 | """ 15 | 16 | [badges] 17 | travis-ci = { repository = "gnzlbg/mimallocator" } 18 | is-it-maintained-issue-resolution = { repository = "gnzlbg/mimallocator" } 19 | is-it-maintained-open-issues = { repository = "gnzlbg/mimallocator" } 20 | maintenance = { status = "actively-developed" } 21 | 22 | [dependencies] 23 | mimalloc-sys = { version = "0.1", path = "mimalloc-sys" } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | A Rust allocator backed by mimalloc 2 | === 3 | 4 | [![Travis-CI Status]][travis] 5 | 6 | This crates provides a Rust `#[global_allocator]` backed by [`mimalloc`]. 7 | 8 | See also the [`mimalloc-sys`] crate providing raw FFI bindings to [`mimalloc`]. 9 | 10 | ## Design Decisions 11 | 12 | mimalloc-rs aims to allow for a function-by-function port of mimalloc, https://github.com/microsoft/mimalloc (written in C), to Rust. It does so by dynamically linking the Rust code to mimalloc, so that functions can be replaced one-by-one with a suite of tests run in between. Eventually, once all of the code is ported over to Rust, the FFI and porting infrastructure will be replaced with direct calls to the rust_impl code. 13 | 14 | c_impl refers to the forked mimalloc code written in C 15 | 16 | rust_impl refers to the mimalloc code ported over to Rust. 17 | 18 | ## Testing 19 | 20 | ``` 21 | # Make sure cmake is installed for building the mimalloc c_impl 22 | 23 | # Populate the submodule fork of mimalloc 24 | $ git submodule init 25 | $ git submodule update 26 | 27 | # Build the rust impl library 28 | $ cd mimalloc-rs/mimalloc-sys/rust_impl 29 | $ cargo build 30 | 31 | # Run tests 32 | $ cd mimalloc-rs 33 | $ LD_LIBRARY_PATH=mimalloc-sys/rust_impl/target/debug/ cargo test 34 | ``` 35 | 36 | ## License 37 | 38 | This project is licensed under either of 39 | 40 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 41 | http://www.apache.org/licenses/LICENSE-2.0) 42 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 43 | http://opensource.org/licenses/MIT) 44 | 45 | at your option. 46 | 47 | ## Contribution 48 | 49 | Unless you explicitly state otherwise, any contribution intentionally submitted 50 | for inclusion in `mimalloc-sys` by you, as defined in the Apache-2.0 license, 51 | shall be dual licensed as above, without any additional terms or conditions. 52 | 53 | [`mimalloc-sys`]: https://crates.io/crates/mimalloc-sys 54 | [`mimalloc`]: https://github.com/microsoft/mimalloc 55 | [travis]: https://travis-ci.com/gnzlbg/mimallocator 56 | [Travis-CI Status]: https://travis-ci.com/gnzlbg/mimallocator.svg?branch=master 57 | -------------------------------------------------------------------------------- /ci/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/sh 2 | 3 | set -ex 4 | 5 | # Build mimalloc-sys 6 | ( 7 | cd mimalloc-sys 8 | cargo build 9 | cargo build --release 10 | ) 11 | 12 | # Test mimallocator 13 | cargo test 14 | cargo test --release 15 | 16 | # Test mimalloc-sys ABI: 17 | ( 18 | cd mimalloc-sys-test 19 | cargo test 20 | cargo test --release 21 | ) 22 | -------------------------------------------------------------------------------- /ci/tools.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/sh 2 | 3 | set -ex 4 | 5 | # Documentation 6 | ( 7 | cd mimalloc-sys 8 | cargo doc 9 | ) 10 | cargo doc 11 | 12 | # Formatting 13 | if rustup component add rustfmt-preview ; then 14 | ( 15 | cd mimalloc-sys 16 | cargo fmt -- --check 17 | ) 18 | cargo fmt -- --check 19 | fi 20 | 21 | # Clippy 22 | if rustup component add clippy-preview ; then 23 | ( 24 | cd mimalloc-sys 25 | cargo clippy -- -D clippy::pedantic 26 | ) 27 | cargo clippy -- -D clippy::pedantic 28 | fi 29 | 30 | shellcheck ci/*.sh 31 | -------------------------------------------------------------------------------- /mimalloc-sys-test/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | [[package]] 4 | name = "autocfg" 5 | version = "0.1.4" 6 | source = "registry+https://github.com/rust-lang/crates.io-index" 7 | 8 | [[package]] 9 | name = "bitflags" 10 | version = "0.9.1" 11 | source = "registry+https://github.com/rust-lang/crates.io-index" 12 | 13 | [[package]] 14 | name = "bitflags" 15 | version = "1.1.0" 16 | source = "registry+https://github.com/rust-lang/crates.io-index" 17 | 18 | [[package]] 19 | name = "cc" 20 | version = "1.0.37" 21 | source = "registry+https://github.com/rust-lang/crates.io-index" 22 | 23 | [[package]] 24 | name = "cfg-if" 25 | version = "0.1.9" 26 | source = "registry+https://github.com/rust-lang/crates.io-index" 27 | 28 | [[package]] 29 | name = "cloudabi" 30 | version = "0.0.3" 31 | source = "registry+https://github.com/rust-lang/crates.io-index" 32 | dependencies = [ 33 | "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 34 | ] 35 | 36 | [[package]] 37 | name = "cmake" 38 | version = "0.1.40" 39 | source = "registry+https://github.com/rust-lang/crates.io-index" 40 | dependencies = [ 41 | "cc 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)", 42 | ] 43 | 44 | [[package]] 45 | name = "ctest" 46 | version = "0.2.14" 47 | source = "registry+https://github.com/rust-lang/crates.io-index" 48 | dependencies = [ 49 | "cc 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)", 50 | "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", 51 | "syntex_syntax 0.59.1 (registry+https://github.com/rust-lang/crates.io-index)", 52 | ] 53 | 54 | [[package]] 55 | name = "extprim" 56 | version = "1.7.0" 57 | source = "registry+https://github.com/rust-lang/crates.io-index" 58 | dependencies = [ 59 | "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 60 | "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", 61 | "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", 62 | "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", 63 | "serde 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)", 64 | ] 65 | 66 | [[package]] 67 | name = "fs_extra" 68 | version = "1.1.0" 69 | source = "registry+https://github.com/rust-lang/crates.io-index" 70 | 71 | [[package]] 72 | name = "fuchsia-cprng" 73 | version = "0.1.1" 74 | source = "registry+https://github.com/rust-lang/crates.io-index" 75 | 76 | [[package]] 77 | name = "itoa" 78 | version = "0.4.4" 79 | source = "registry+https://github.com/rust-lang/crates.io-index" 80 | 81 | [[package]] 82 | name = "kernel32-sys" 83 | version = "0.2.2" 84 | source = "registry+https://github.com/rust-lang/crates.io-index" 85 | dependencies = [ 86 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 87 | "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 88 | ] 89 | 90 | [[package]] 91 | name = "libc" 92 | version = "0.2.58" 93 | source = "registry+https://github.com/rust-lang/crates.io-index" 94 | 95 | [[package]] 96 | name = "log" 97 | version = "0.3.9" 98 | source = "registry+https://github.com/rust-lang/crates.io-index" 99 | dependencies = [ 100 | "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", 101 | ] 102 | 103 | [[package]] 104 | name = "log" 105 | version = "0.4.6" 106 | source = "registry+https://github.com/rust-lang/crates.io-index" 107 | dependencies = [ 108 | "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", 109 | ] 110 | 111 | [[package]] 112 | name = "mimalloc-sys" 113 | version = "0.1.1" 114 | dependencies = [ 115 | "cmake 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", 116 | "fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 117 | "libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", 118 | ] 119 | 120 | [[package]] 121 | name = "mimalloc-sys-test" 122 | version = "0.1.0" 123 | dependencies = [ 124 | "ctest 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", 125 | "libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", 126 | "mimalloc-sys 0.1.1", 127 | ] 128 | 129 | [[package]] 130 | name = "num-traits" 131 | version = "0.2.8" 132 | source = "registry+https://github.com/rust-lang/crates.io-index" 133 | dependencies = [ 134 | "autocfg 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", 135 | ] 136 | 137 | [[package]] 138 | name = "proc-macro2" 139 | version = "0.4.30" 140 | source = "registry+https://github.com/rust-lang/crates.io-index" 141 | dependencies = [ 142 | "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 143 | ] 144 | 145 | [[package]] 146 | name = "quote" 147 | version = "0.6.12" 148 | source = "registry+https://github.com/rust-lang/crates.io-index" 149 | dependencies = [ 150 | "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", 151 | ] 152 | 153 | [[package]] 154 | name = "rand" 155 | version = "0.6.5" 156 | source = "registry+https://github.com/rust-lang/crates.io-index" 157 | dependencies = [ 158 | "autocfg 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", 159 | "libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", 160 | "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 161 | "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 162 | "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 163 | "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 164 | "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", 165 | "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", 166 | "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 167 | "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 168 | "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", 169 | ] 170 | 171 | [[package]] 172 | name = "rand_chacha" 173 | version = "0.1.1" 174 | source = "registry+https://github.com/rust-lang/crates.io-index" 175 | dependencies = [ 176 | "autocfg 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", 177 | "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", 178 | ] 179 | 180 | [[package]] 181 | name = "rand_core" 182 | version = "0.3.1" 183 | source = "registry+https://github.com/rust-lang/crates.io-index" 184 | dependencies = [ 185 | "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 186 | ] 187 | 188 | [[package]] 189 | name = "rand_core" 190 | version = "0.4.0" 191 | source = "registry+https://github.com/rust-lang/crates.io-index" 192 | 193 | [[package]] 194 | name = "rand_hc" 195 | version = "0.1.0" 196 | source = "registry+https://github.com/rust-lang/crates.io-index" 197 | dependencies = [ 198 | "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", 199 | ] 200 | 201 | [[package]] 202 | name = "rand_isaac" 203 | version = "0.1.1" 204 | source = "registry+https://github.com/rust-lang/crates.io-index" 205 | dependencies = [ 206 | "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", 207 | ] 208 | 209 | [[package]] 210 | name = "rand_jitter" 211 | version = "0.1.4" 212 | source = "registry+https://github.com/rust-lang/crates.io-index" 213 | dependencies = [ 214 | "libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", 215 | "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 216 | "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", 217 | ] 218 | 219 | [[package]] 220 | name = "rand_os" 221 | version = "0.1.3" 222 | source = "registry+https://github.com/rust-lang/crates.io-index" 223 | dependencies = [ 224 | "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", 225 | "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 226 | "libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", 227 | "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 228 | "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 229 | "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", 230 | ] 231 | 232 | [[package]] 233 | name = "rand_pcg" 234 | version = "0.1.2" 235 | source = "registry+https://github.com/rust-lang/crates.io-index" 236 | dependencies = [ 237 | "autocfg 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", 238 | "rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 239 | ] 240 | 241 | [[package]] 242 | name = "rand_xorshift" 243 | version = "0.1.1" 244 | source = "registry+https://github.com/rust-lang/crates.io-index" 245 | dependencies = [ 246 | "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", 247 | ] 248 | 249 | [[package]] 250 | name = "rdrand" 251 | version = "0.4.0" 252 | source = "registry+https://github.com/rust-lang/crates.io-index" 253 | dependencies = [ 254 | "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", 255 | ] 256 | 257 | [[package]] 258 | name = "rustc_version" 259 | version = "0.2.3" 260 | source = "registry+https://github.com/rust-lang/crates.io-index" 261 | dependencies = [ 262 | "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", 263 | ] 264 | 265 | [[package]] 266 | name = "ryu" 267 | version = "0.2.8" 268 | source = "registry+https://github.com/rust-lang/crates.io-index" 269 | 270 | [[package]] 271 | name = "semver" 272 | version = "0.9.0" 273 | source = "registry+https://github.com/rust-lang/crates.io-index" 274 | dependencies = [ 275 | "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", 276 | ] 277 | 278 | [[package]] 279 | name = "semver-parser" 280 | version = "0.7.0" 281 | source = "registry+https://github.com/rust-lang/crates.io-index" 282 | 283 | [[package]] 284 | name = "serde" 285 | version = "1.0.92" 286 | source = "registry+https://github.com/rust-lang/crates.io-index" 287 | dependencies = [ 288 | "serde_derive 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)", 289 | ] 290 | 291 | [[package]] 292 | name = "serde_derive" 293 | version = "1.0.92" 294 | source = "registry+https://github.com/rust-lang/crates.io-index" 295 | dependencies = [ 296 | "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", 297 | "quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", 298 | "syn 0.15.37 (registry+https://github.com/rust-lang/crates.io-index)", 299 | ] 300 | 301 | [[package]] 302 | name = "serde_json" 303 | version = "1.0.39" 304 | source = "registry+https://github.com/rust-lang/crates.io-index" 305 | dependencies = [ 306 | "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", 307 | "ryu 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 308 | "serde 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)", 309 | ] 310 | 311 | [[package]] 312 | name = "syn" 313 | version = "0.15.37" 314 | source = "registry+https://github.com/rust-lang/crates.io-index" 315 | dependencies = [ 316 | "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", 317 | "quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)", 318 | "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 319 | ] 320 | 321 | [[package]] 322 | name = "syntex_errors" 323 | version = "0.59.1" 324 | source = "registry+https://github.com/rust-lang/crates.io-index" 325 | dependencies = [ 326 | "libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", 327 | "serde 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)", 328 | "serde_derive 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)", 329 | "syntex_pos 0.59.1 (registry+https://github.com/rust-lang/crates.io-index)", 330 | "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", 331 | "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 332 | ] 333 | 334 | [[package]] 335 | name = "syntex_pos" 336 | version = "0.59.1" 337 | source = "registry+https://github.com/rust-lang/crates.io-index" 338 | dependencies = [ 339 | "serde 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)", 340 | "serde_derive 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)", 341 | ] 342 | 343 | [[package]] 344 | name = "syntex_syntax" 345 | version = "0.59.1" 346 | source = "registry+https://github.com/rust-lang/crates.io-index" 347 | dependencies = [ 348 | "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", 349 | "extprim 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", 350 | "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", 351 | "serde 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)", 352 | "serde_derive 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)", 353 | "serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)", 354 | "syntex_errors 0.59.1 (registry+https://github.com/rust-lang/crates.io-index)", 355 | "syntex_pos 0.59.1 (registry+https://github.com/rust-lang/crates.io-index)", 356 | "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 357 | ] 358 | 359 | [[package]] 360 | name = "term" 361 | version = "0.4.6" 362 | source = "registry+https://github.com/rust-lang/crates.io-index" 363 | dependencies = [ 364 | "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", 365 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 366 | ] 367 | 368 | [[package]] 369 | name = "unicode-xid" 370 | version = "0.1.0" 371 | source = "registry+https://github.com/rust-lang/crates.io-index" 372 | 373 | [[package]] 374 | name = "winapi" 375 | version = "0.2.8" 376 | source = "registry+https://github.com/rust-lang/crates.io-index" 377 | 378 | [[package]] 379 | name = "winapi" 380 | version = "0.3.7" 381 | source = "registry+https://github.com/rust-lang/crates.io-index" 382 | dependencies = [ 383 | "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 384 | "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 385 | ] 386 | 387 | [[package]] 388 | name = "winapi-build" 389 | version = "0.1.1" 390 | source = "registry+https://github.com/rust-lang/crates.io-index" 391 | 392 | [[package]] 393 | name = "winapi-i686-pc-windows-gnu" 394 | version = "0.4.0" 395 | source = "registry+https://github.com/rust-lang/crates.io-index" 396 | 397 | [[package]] 398 | name = "winapi-x86_64-pc-windows-gnu" 399 | version = "0.4.0" 400 | source = "registry+https://github.com/rust-lang/crates.io-index" 401 | 402 | [metadata] 403 | "checksum autocfg 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "0e49efa51329a5fd37e7c79db4621af617cd4e3e5bc224939808d076077077bf" 404 | "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" 405 | "checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" 406 | "checksum cc 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)" = "39f75544d7bbaf57560d2168f28fd649ff9c76153874db88bdbdfd839b1a7e7d" 407 | "checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" 408 | "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" 409 | "checksum cmake 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "2ca4386c8954b76a8415b63959337d940d724b336cabd3afe189c2b51a7e1ff0" 410 | "checksum ctest 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "7f1840a3b51b6842af2c700c322863fbdf193f64f38dec51e2660f62e9c7e3a5" 411 | "checksum extprim 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cfba1bd0c749760b3dad3e4d3926b2bf6186f48e244456bfe1ad3aecd55b4fb1" 412 | "checksum fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674" 413 | "checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" 414 | "checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" 415 | "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" 416 | "checksum libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "6281b86796ba5e4366000be6e9e18bf35580adf9e63fbe2294aadb587613a319" 417 | "checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" 418 | "checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6" 419 | "checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32" 420 | "checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" 421 | "checksum quote 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "faf4799c5d274f3868a4aae320a0a182cbd2baee377b378f080e16a23e9d80db" 422 | "checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" 423 | "checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" 424 | "checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" 425 | "checksum rand_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d0e7a549d590831370895ab7ba4ea0c1b6b011d106b5ff2da6eee112615e6dc0" 426 | "checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" 427 | "checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" 428 | "checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" 429 | "checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" 430 | "checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" 431 | "checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" 432 | "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" 433 | "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" 434 | "checksum ryu 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "b96a9549dc8d48f2c283938303c4b5a77aa29bfbc5b54b084fb1630408899a8f" 435 | "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" 436 | "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" 437 | "checksum serde 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)" = "32746bf0f26eab52f06af0d0aa1984f641341d06d8d673c693871da2d188c9be" 438 | "checksum serde_derive 1.0.92 (registry+https://github.com/rust-lang/crates.io-index)" = "46a3223d0c9ba936b61c0d2e3e559e3217dbfb8d65d06d26e8b3c25de38bae3e" 439 | "checksum serde_json 1.0.39 (registry+https://github.com/rust-lang/crates.io-index)" = "5a23aa71d4a4d43fdbfaac00eff68ba8a06a51759a89ac3304323e800c4dd40d" 440 | "checksum syn 0.15.37 (registry+https://github.com/rust-lang/crates.io-index)" = "e11410033fd5cf69a1cf2084604e011190c56f11e08ffc53df880f5f65f1c6e4" 441 | "checksum syntex_errors 0.59.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3133289179676c9f5c5b2845bf5a2e127769f4889fcbada43035ef6bd662605e" 442 | "checksum syntex_pos 0.59.1 (registry+https://github.com/rust-lang/crates.io-index)" = "30ab669fa003d208c681f874bbc76d91cc3d32550d16b5d9d2087cf477316470" 443 | "checksum syntex_syntax 0.59.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03815b9f04d95828770d9c974aa39c6e1f6ef3114eb77a3ce09008a0d15dd142" 444 | "checksum term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "fa63644f74ce96fbeb9b794f66aff2a52d601cbd5e80f4b97123e3899f4570f1" 445 | "checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" 446 | "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" 447 | "checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770" 448 | "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" 449 | "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 450 | "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 451 | -------------------------------------------------------------------------------- /mimalloc-sys-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mimalloc-sys-test" 3 | version = "0.1.0" 4 | authors = ["gnzlbg "] 5 | edition = "2015" 6 | build = "build.rs" 7 | 8 | [dependencies] 9 | mimalloc-sys = { path = "../mimalloc-sys" } 10 | libc = "0.2" 11 | 12 | [build-dependencies] 13 | ctest = "0.2" 14 | 15 | [[test]] 16 | name = "main" 17 | path = "test/main.rs" 18 | harness = false 19 | -------------------------------------------------------------------------------- /mimalloc-sys-test/build.rs: -------------------------------------------------------------------------------- 1 | //! Generate C FFI binding tests 2 | 3 | extern crate ctest; 4 | 5 | use std::{env, path::PathBuf}; 6 | 7 | fn main() { 8 | let root = PathBuf::from(env::var_os("DEP_MIMALLOC_ROOT").unwrap()); 9 | eprintln!("ROOT={:?}", root); 10 | let mut cfg = ctest::TestGenerator::new(); 11 | cfg.header("mimalloc.h") 12 | .include(root.join("lib").join("mimalloc-1.0").join("include")) 13 | .fn_cname(|rust, link_name| link_name.unwrap_or(rust).to_string()) 14 | .skip_signededness(|c| c.ends_with("_t")); 15 | 16 | cfg.generate("../mimalloc-sys/src/lib.rs", "all.rs"); 17 | } 18 | -------------------------------------------------------------------------------- /mimalloc-sys-test/test/main.rs: -------------------------------------------------------------------------------- 1 | #![allow(bad_style, improper_ctypes, dead_code, unused_imports)] 2 | 3 | extern crate mimalloc_sys; 4 | extern crate libc; 5 | 6 | use mimalloc_sys::*; 7 | use libc::{size_t, c_void}; 8 | 9 | include!(concat!(env!("OUT_DIR"), "/all.rs")); 10 | -------------------------------------------------------------------------------- /mimalloc-sys/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | [[package]] 4 | name = "cc" 5 | version = "1.0.37" 6 | source = "registry+https://github.com/rust-lang/crates.io-index" 7 | 8 | [[package]] 9 | name = "cmake" 10 | version = "0.1.40" 11 | source = "registry+https://github.com/rust-lang/crates.io-index" 12 | dependencies = [ 13 | "cc 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)", 14 | ] 15 | 16 | [[package]] 17 | name = "fs_extra" 18 | version = "1.1.0" 19 | source = "registry+https://github.com/rust-lang/crates.io-index" 20 | 21 | [[package]] 22 | name = "libc" 23 | version = "0.2.58" 24 | source = "registry+https://github.com/rust-lang/crates.io-index" 25 | 26 | [[package]] 27 | name = "mimalloc-sys" 28 | version = "0.1.1" 29 | dependencies = [ 30 | "cmake 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", 31 | "fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 32 | "libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)", 33 | ] 34 | 35 | [metadata] 36 | "checksum cc 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)" = "39f75544d7bbaf57560d2168f28fd649ff9c76153874db88bdbdfd839b1a7e7d" 37 | "checksum cmake 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "2ca4386c8954b76a8415b63959337d940d724b336cabd3afe189c2b51a7e1ff0" 38 | "checksum fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674" 39 | "checksum libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "6281b86796ba5e4366000be6e9e18bf35580adf9e63fbe2294aadb587613a319" 40 | -------------------------------------------------------------------------------- /mimalloc-sys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mimalloc-sys" 3 | version = "0.1.1" 4 | authors = ["gnzlbg "] 5 | edition = "2018" 6 | build = "build.rs" 7 | links = "mimalloc" 8 | license = "MIT/Apache-2.0" 9 | repository = "https://github.com/gnzlbg/mimallocator" 10 | homepage = "https://github.com/gnzlbg/mimallocator" 11 | documentation = "https://docs.rs/mimalloc-sys" 12 | keywords = ["allocator", "mimalloc"] 13 | categories = ["memory-management", "api-bindings"] 14 | description = """ 15 | Rust FFI bindings to mimalloc 16 | """ 17 | 18 | [badges] 19 | travis-ci = { repository = "gnzlbg/mimallocator" } 20 | is-it-maintained-issue-resolution = { repository = "gnzlbg/mimallocator" } 21 | is-it-maintained-open-issues = { repository = "gnzlbg/mimallocator" } 22 | maintenance = { status = "actively-developed" } 23 | 24 | [dependencies] 25 | libc = "0.2" 26 | 27 | [build-dependencies] 28 | cmake = "0.1" 29 | fs_extra = "^1.1" 30 | -------------------------------------------------------------------------------- /mimalloc-sys/README.md: -------------------------------------------------------------------------------- 1 | Raw C FFI bindings to mimalloc 2 | === 3 | 4 | This crates provides raw C FFI bindings to the [`mimalloc`] library. 5 | 6 | ## Documentation 7 | 8 | The documentation of the FFI bindings can be found in [docs.rs]. 9 | 10 | **Current mimalloc version**: 1.0.1 11 | 12 | **Build dependencies**: `cmake`. 13 | 14 | ## Cargo features 15 | 16 | * `secure`.` 17 | 18 | ## Platform support 19 | 20 | The following table describes the supported platforms: 21 | 22 | * `build`: does the library compile for the target? 23 | * `run`: do our own tests pass on the target? 24 | * `mimalloc`: do `mimalloc`'s tests pass on the target? 25 | * `valgrind`: do our own tests pass under valgrind? 26 | 27 | Tier 1 targets are tested on all Rust channels (stable, beta, and nightly). 28 | All other targets are only tested on Rust nightly. 29 | 30 | | **Apple** targets: | build | run | mimalloc | valgrind | 31 | |----------------------------------|-----------|---------|----------|----------| 32 | | `x86_64-apple-darwin` | ✓ | ✓ | ✗ | ✗ | 33 | 34 | ## License 35 | 36 | This project is licensed under either of 37 | 38 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 39 | http://www.apache.org/licenses/LICENSE-2.0) 40 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 41 | http://opensource.org/licenses/MIT) 42 | 43 | at your option. 44 | 45 | ## Contribution 46 | 47 | Unless you explicitly state otherwise, any contribution intentionally submitted 48 | for inclusion in `mimalloc-sys` by you, as defined in the Apache-2.0 license, 49 | shall be dual licensed as above, without any additional terms or conditions. 50 | 51 | [`mimalloc`]: https://github.com/microsoft/mimalloc 52 | -------------------------------------------------------------------------------- /mimalloc-sys/build.rs: -------------------------------------------------------------------------------- 1 | //! Builds and links mimalloc 2 | 3 | macro_rules! info { 4 | ($($args:tt)*) => { println!($($args)*) } 5 | } 6 | 7 | use std::{env, fs, path::PathBuf}; 8 | 9 | fn main() { 10 | // Get environment variables: 11 | let target = env::var("TARGET").expect("TARGET was not set"); 12 | let host = env::var("HOST").expect("HOST was not set"); 13 | let profile = env::var("PROFILE").expect("PROFILE was not set"); 14 | let num_jobs = env::var("NUM_JOBS").expect("NUM_JOBS was not set"); 15 | let out_dir = 16 | PathBuf::from(env::var_os("OUT_DIR").expect("OUT_DIR was not set")); 17 | let src_dir = env::current_dir().expect("failed to get current directory"); 18 | let build_dir = out_dir.join("build"); 19 | let mimalloc_src_dir = src_dir.join("mimalloc"); 20 | let mimalloc_out_src_dir = build_dir.join("mimalloc"); 21 | let rust_impl_lib_dir = src_dir.join("rust_impl/target/debug"); 22 | 23 | info!("TARGET={}", target.clone()); 24 | info!("HOST={}", host.clone()); 25 | info!("PROFILE={}", profile.clone()); 26 | info!("NUM_JOBS={}", num_jobs.clone()); 27 | info!("OUT_DIR={:?}", out_dir); 28 | info!("BUILD_DIR={:?}", build_dir); 29 | info!("SRC_DIR={:?}", src_dir); 30 | info!("mimalloc_src_dir={:?}", mimalloc_src_dir); 31 | info!("mimalloc_out_src_dir={:?}", mimalloc_out_src_dir); 32 | info!("rust_impl_lib_dir={:?}", rust_impl_lib_dir); 33 | 34 | // Copy the mimalloc source code to the OUT_DIR: 35 | // 36 | // This ensures that building mimalloc-sys does not modify 37 | // the source directory. 38 | fs::create_dir_all(&build_dir).expect("failed to create build directory"); 39 | if mimalloc_out_src_dir.exists() { 40 | fs::remove_dir_all(mimalloc_out_src_dir.clone()) 41 | .expect("failed to remove mimalloc source from the OUT_DIR"); 42 | } 43 | let mut copy_options = fs_extra::dir::CopyOptions::new(); 44 | copy_options.overwrite = true; 45 | copy_options.copy_inside = true; 46 | fs_extra::dir::copy( 47 | &mimalloc_src_dir, 48 | &mimalloc_out_src_dir, 49 | ©_options, 50 | ) 51 | .expect("failed to copy jemalloc source code to OUT_DIR"); 52 | assert!(mimalloc_out_src_dir.exists()); 53 | 54 | // Build mimalloc 55 | let dst = cmake::Config::new(mimalloc_out_src_dir) 56 | .define("OVERRIDE", "OFF") 57 | .cflag(format!("-L {}", rust_impl_lib_dir.to_str().unwrap())) 58 | .cflag("-lrust_impl") 59 | .build(); 60 | 61 | println!("cargo:rustc-link-search=native={}/lib", dst.display()); 62 | let lib_name = match profile.as_str() { 63 | "debug" => "mimalloc-debug", 64 | "release" => "mimalloc", 65 | p => panic!("unknown profile \"{}\"", p), 66 | }; 67 | println!("cargo:rustc-link-lib=dylib={}", lib_name); 68 | } 69 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rust_impl" 3 | version = "0.1.0" 4 | authors = ["Robert Rusch "] 5 | edition = "2018" 6 | 7 | [lib] 8 | name = "rust_impl" 9 | crate-type = ["cdylib"] 10 | 11 | [dependencies] 12 | libc = "0.2.58" 13 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/alloc-aligned.rs: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | Copyright (c) 2018, Microsoft Research, Daan Leijen 3 | This is free software; you can redistribute it and/or modify it under the 4 | terms of the MIT license. A copy of the license can be found in the file 5 | "LICENSE" at the root of this distribution. 6 | -----------------------------------------------------------------------------*/ 7 | // memset 8 | // ------------------------------------------------------ 9 | // Aligned Allocation 10 | // ------------------------------------------------------ 11 | // note: we don't require `size > offset`, we just guarantee that 12 | // the address at offset is aligned regardless of the allocated size. 13 | pub static SIZE_MAX: c_long = 18446744073709551615; 14 | // overflow 15 | // try if there is a current small block with just the right alignment 16 | pub static MI_SMALL_SIZE_MAX: c_long = 17 | 128 * std::mem::size_of::<*mut c_void>(); 18 | unsafe fn mi_heap_malloc_zero_aligned_at(mut heap: &mut mi_heap_t, 19 | mut size: usize, 20 | mut alignment: usize, 21 | mut offset: usize, mut zero: bool) 22 | -> *mut c_void { 23 | if alignment > 0 { 24 | 0 25 | } else { 26 | _mi_assert_fail("alignment > 0", "src/alloc-aligned.c", 21, 27 | "mi_heap_malloc_zero_aligned_at") 28 | } 29 | if alignment <= std::mem::size_of::() { 30 | return _mi_heap_malloc_zero(heap, size, zero != 0); 31 | } 32 | if size >= (SIZE_MAX - alignment) { return ptr::null_mut(); } 33 | if size <= MI_SMALL_SIZE_MAX { 34 | let mut page = _mi_heap_get_free_small_page(heap, size); 35 | if !page.free.is_null() && 36 | (((page.free as usize) + offset) % alignment) == 0 { 37 | _mi_stat_increase(&mut ((heap).tld.stats.malloc), size); 38 | let mut p = _mi_page_malloc(heap, page, size); 39 | if zero != 0 { memset(p, 0, size); } 40 | return p; 41 | }; 42 | } 43 | // otherwise over-allocate 44 | let mut p = _mi_heap_malloc_zero(heap, size + alignment - 1, zero != 0); 45 | if p.is_null() { return ptr::null_mut(); } 46 | // .. and align within the allocation 47 | _mi_ptr_page(p).flags.has_aligned = 48 | true; // reallocation still fits, is aligned and not more than 50% waste 49 | let mut adjust = alignment - (((p as usize) + offset) % alignment); 50 | let mut aligned_p = 51 | if adjust == alignment { 52 | p 53 | } else { ((p as usize) + adjust) as *mut c_void }; 54 | return aligned_p; 55 | } 56 | unsafe fn mi_malloc_zero_aligned_at(mut size: usize, mut alignment: usize, 57 | mut offset: usize, mut zero: bool) 58 | -> *mut c_void { 59 | return mi_heap_malloc_zero_aligned_at(mi_get_default_heap(), size, 60 | alignment, offset, zero != 0); 61 | } 62 | #[no_mangle] 63 | pub unsafe extern "C" fn mi_malloc_aligned_at(mut size: usize, 64 | mut alignment: usize, 65 | mut offset: usize) 66 | -> *mut c_void { 67 | return mi_malloc_zero_aligned_at(size, alignment, offset, false); 68 | } 69 | #[no_mangle] 70 | pub unsafe extern "C" fn mi_malloc_aligned(mut size: usize, 71 | mut alignment: usize) 72 | -> *mut c_void { 73 | return mi_malloc_aligned_at(size, alignment, 0); 74 | } 75 | #[no_mangle] 76 | pub unsafe extern "C" fn mi_zalloc_aligned_at(mut size: usize, 77 | mut alignment: usize, 78 | mut offset: usize) 79 | -> *mut c_void { 80 | return mi_malloc_zero_aligned_at(size, alignment, offset, true); 81 | } 82 | #[no_mangle] 83 | pub unsafe extern "C" fn mi_zalloc_aligned(mut size: usize, 84 | mut alignment: usize) 85 | -> *mut c_void { 86 | return mi_zalloc_aligned_at(size, alignment, 0); 87 | } 88 | #[no_mangle] 89 | pub unsafe extern "C" fn mi_calloc_aligned_at(mut count: usize, 90 | mut size: usize, 91 | mut alignment: usize, 92 | mut offset: usize) 93 | -> *mut c_void { 94 | let mut total: usize; 95 | if mi_mul_overflow(count, size, &mut total) { return ptr::null_mut(); } 96 | return mi_zalloc_aligned_at(total, alignment, offset); 97 | } 98 | #[no_mangle] 99 | pub unsafe extern "C" fn mi_calloc_aligned(mut count: usize, mut size: usize, 100 | mut alignment: usize) 101 | -> *mut c_void { 102 | let mut total: usize; 103 | if mi_mul_overflow(count, size, &mut total) { return ptr::null_mut(); } 104 | return mi_zalloc_aligned(total, alignment); 105 | } 106 | unsafe fn mi_realloc_zero_aligned_at(mut p: *mut c_void, mut newsize: usize, 107 | mut alignment: usize, mut offset: usize, 108 | mut zero: bool) -> *mut c_void { 109 | if alignment > 0 { 110 | 0 111 | } else { 112 | _mi_assert_fail("alignment > 0", "src/alloc-aligned.c", 90, 113 | "mi_realloc_zero_aligned_at") 114 | } 115 | if alignment <= std::mem::size_of::() { 116 | return _mi_realloc_zero(p, newsize, zero != 0); 117 | } 118 | if p.is_null() { 119 | return mi_malloc_zero_aligned_at(newsize, alignment, offset, 120 | zero != 0); 121 | } 122 | let mut size = mi_usable_size(p); 123 | if newsize <= size && newsize >= (size - (size / 2)) && 124 | (((p as usize) + offset) % alignment) == 0 { 125 | return p; 126 | } else { 127 | let mut newp = mi_malloc_aligned_at(newsize, alignment, offset); 128 | if !newp.is_null() { 129 | if zero != 0 != 0 && newsize > size { 130 | // also set last word in the previous allocation to zero to ensure any padding is zero-initialized 131 | let mut start = 132 | if size >= std::mem::size_of::() { 133 | size - std::mem::size_of::() 134 | } else { 0 }; // only free if successful 135 | memset((newp as *mut u8).offset(start), 0, 136 | newsize - 137 | start); // use offset of previous allocation (p can be NULL) 138 | } 139 | memcpy(newp, p as *const c_void, 140 | if newsize > size { size } else { newsize }); 141 | mi_free(p); 142 | } 143 | return newp; 144 | }; 145 | } 146 | unsafe fn _mi_realloc_aligned(mut p: *mut c_void, mut newsize: usize, 147 | mut alignment: usize, mut zero: bool) 148 | -> *mut c_void { 149 | if alignment > 0 { 150 | 0 151 | } else { 152 | _mi_assert_fail("alignment > 0", "src/alloc-aligned.c", 114, 153 | "_mi_realloc_aligned") 154 | } 155 | if alignment <= std::mem::size_of::() { 156 | return _mi_realloc_zero(p, newsize, zero != 0); 157 | } 158 | let mut offset = ((p as usize) % alignment); 159 | return mi_realloc_zero_aligned_at(p, newsize, alignment, offset, 160 | zero != 0); 161 | } 162 | #[no_mangle] 163 | pub unsafe extern "C" fn mi_realloc_aligned_at(mut p: *mut c_void, 164 | mut newsize: usize, 165 | mut alignment: usize, 166 | mut offset: usize) 167 | -> *mut c_void { 168 | return mi_realloc_zero_aligned_at(p, newsize, alignment, offset, false); 169 | } 170 | #[no_mangle] 171 | pub unsafe extern "C" fn mi_realloc_aligned(mut p: *mut c_void, 172 | mut newsize: usize, 173 | mut alignment: usize) 174 | -> *mut c_void { 175 | return _mi_realloc_aligned(p, newsize, alignment, false); 176 | } 177 | #[no_mangle] 178 | pub unsafe extern "C" fn mi_rezalloc_aligned_at(mut p: *mut c_void, 179 | mut newsize: usize, 180 | mut alignment: usize, 181 | mut offset: usize) 182 | -> *mut c_void { 183 | return mi_realloc_zero_aligned_at(p, newsize, alignment, offset, true); 184 | } 185 | #[no_mangle] 186 | pub unsafe extern "C" fn mi_rezalloc_aligned(mut p: *mut c_void, 187 | mut newsize: usize, 188 | mut alignment: usize) 189 | -> *mut c_void { 190 | return _mi_realloc_aligned(p, newsize, alignment, true); 191 | } 192 | #[no_mangle] 193 | pub unsafe extern "C" fn mi_recalloc_aligned_at(mut p: *mut c_void, 194 | mut count: usize, 195 | mut size: usize, 196 | mut alignment: usize, 197 | mut offset: usize) 198 | -> *mut c_void { 199 | let mut total: usize; 200 | if mi_mul_overflow(count, size, &mut total) { return ptr::null_mut(); } 201 | return mi_rezalloc_aligned_at(p, total, alignment, offset); 202 | } 203 | #[no_mangle] 204 | pub unsafe extern "C" fn mi_recalloc_aligned(mut p: *mut c_void, 205 | mut count: usize, 206 | mut size: usize, 207 | mut alignment: usize) 208 | -> *mut c_void { 209 | let mut total: usize; 210 | if mi_mul_overflow(count, size, &mut total) { return ptr::null_mut(); } 211 | return mi_rezalloc_aligned(p, total, alignment); 212 | } 213 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/alloc-override-osx.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/alloc-override-win.rs: -------------------------------------------------------------------------------- 1 | failed function body: 2 | • CompoundStmt("" src/alloc-override-win.c:456:1) 3 | • IfStmt("" src/alloc-override-win.c:457:3) 4 | • BinaryOperator("" src/alloc-override-win.c:457:7) 5 | +Int<"int"> 6 | • ImplicitCastExpr!("original" src/alloc-override-win.c:457:14) 7 | +Pointer<"void *" ->Void<"void">> 8 | • MemberRefExpr("original" src/alloc-override-win.c:457:14) 9 | +Pointer<"void *" ->Void<"void">> 10 | • ImplicitCastExpr!("patch" src/alloc-override-win.c:457:7) 11 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 12 | • DeclRefExpr("patch" src/alloc-override-win.c:457:7) 13 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 14 | • ParenExpr("" src/alloc-override-win.c:457:26) 15 | +Pointer<"void *" ->Void<"void">> 16 | • CStyleCastExpr("" src/alloc-override-win.c:457:26) 17 | +Pointer<"void *" ->Void<"void">> 18 | • IntegerLiteral("" src/alloc-override-win.c:457:26) 19 | +Int<"int"> 20 | • ReturnStmt("" src/alloc-override-win.c:457:32) 21 | • ImplicitCastExpr!("" src/alloc-override-win.c:457:39) 22 | +Bool<"_Bool"> 23 | • IntegerLiteral("" src/alloc-override-win.c:457:39) 24 | +Int<"int"> 25 | • IfStmt("" src/alloc-override-win.c:458:3) 26 | • BinaryOperator("" src/alloc-override-win.c:458:7) 27 | +Int<"int"> 28 | • BinaryOperator("" src/alloc-override-win.c:458:7) 29 | +Int<"int"> 30 | • ImplicitCastExpr!("apply" src/alloc-override-win.c:458:7) 31 | +UInt<"unsigned int"> 32 | • ImplicitCastExpr!("apply" src/alloc-override-win.c:458:7) 33 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 34 | • DeclRefExpr("apply" src/alloc-override-win.c:458:7) 35 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 36 | • ImplicitCastExpr!("PATCH_TARGET_TERM" src/alloc-override-win.c:458:16) 37 | +UInt<"unsigned int"> 38 | • DeclRefExpr("PATCH_TARGET_TERM" src/alloc-override-win.c:458:16) 39 | +Int<"int"> 40 | • BinaryOperator("" src/alloc-override-win.c:458:37) 41 | +Int<"int"> 42 | • ImplicitCastExpr!("target_term" src/alloc-override-win.c:458:44) 43 | +Pointer<"void *" ->Void<"void">> 44 | • MemberRefExpr("target_term" src/alloc-override-win.c:458:44) 45 | +Pointer<"void *" ->Void<"void">> 46 | • ImplicitCastExpr!("patch" src/alloc-override-win.c:458:37) 47 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 48 | • DeclRefExpr("patch" src/alloc-override-win.c:458:37) 49 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 50 | • ParenExpr("" src/alloc-override-win.c:458:59) 51 | +Pointer<"void *" ->Void<"void">> 52 | • CStyleCastExpr("" src/alloc-override-win.c:458:59) 53 | +Pointer<"void *" ->Void<"void">> 54 | • IntegerLiteral("" src/alloc-override-win.c:458:59) 55 | +Int<"int"> 56 | • BinaryOperator("" src/alloc-override-win.c:458:65) 57 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 58 | • DeclRefExpr("apply" src/alloc-override-win.c:458:65) 59 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 60 | • ImplicitCastExpr!("PATCH_TARGET" src/alloc-override-win.c:458:73) 61 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 62 | • DeclRefExpr("PATCH_TARGET" src/alloc-override-win.c:458:73) 63 | +Int<"int"> 64 | • IfStmt("" src/alloc-override-win.c:459:3) 65 | • BinaryOperator("" src/alloc-override-win.c:459:7) 66 | +Int<"int"> 67 | • ImplicitCastExpr!("applied" src/alloc-override-win.c:459:14) 68 | +UInt<"unsigned int"> 69 | • ImplicitCastExpr!("applied" src/alloc-override-win.c:459:14) 70 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 71 | • MemberRefExpr("applied" src/alloc-override-win.c:459:14) 72 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 73 | • ImplicitCastExpr!("patch" src/alloc-override-win.c:459:7) 74 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 75 | • DeclRefExpr("patch" src/alloc-override-win.c:459:7) 76 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 77 | • ImplicitCastExpr!("apply" src/alloc-override-win.c:459:25) 78 | +UInt<"unsigned int"> 79 | • ImplicitCastExpr!("apply" src/alloc-override-win.c:459:25) 80 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 81 | • DeclRefExpr("apply" src/alloc-override-win.c:459:25) 82 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 83 | • ReturnStmt("" src/alloc-override-win.c:459:32) 84 | • ImplicitCastExpr!("" src/alloc-override-win.c:459:39) 85 | +Bool<"_Bool"> 86 | • IntegerLiteral("" src/alloc-override-win.c:459:39) 87 | +Int<"int"> 88 | • IfStmt("" src/alloc-override-win.c:462:3) 89 | • OpaqueValueExpr!("" builtin definitions) 90 | +Bool<"_Bool"> 91 | • ReturnStmt("" src/alloc-override-win.c:462:89) 92 | • ImplicitCastExpr!("" src/alloc-override-win.c:462:96) 93 | +Bool<"_Bool"> 94 | • IntegerLiteral("" src/alloc-override-win.c:462:96) 95 | +Int<"int"> 96 | • IfStmt("" src/alloc-override-win.c:463:3) 97 | • BinaryOperator("" src/alloc-override-win.c:463:7) 98 | +Int<"int"> 99 | • ImplicitCastExpr!("apply" src/alloc-override-win.c:463:7) 100 | +UInt<"unsigned int"> 101 | • ImplicitCastExpr!("apply" src/alloc-override-win.c:463:7) 102 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 103 | • DeclRefExpr("apply" src/alloc-override-win.c:463:7) 104 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 105 | • ImplicitCastExpr!("PATCH_NONE" src/alloc-override-win.c:463:16) 106 | +UInt<"unsigned int"> 107 | • DeclRefExpr("PATCH_NONE" src/alloc-override-win.c:463:16) 108 | +Int<"int"> 109 | • CompoundStmt("" src/alloc-override-win.c:463:28) 110 | • CompoundStmt("" src/alloc-override-win.c:466:8) 111 | • DeclStmt("" src/alloc-override-win.c:467:5) 112 | • VarDecl("target" src/alloc-override-win.c:467:11) 113 | +Pointer<"void *" ->Void<"void">> 114 | • ParenExpr("" src/alloc-override-win.c:467:20) 115 | +Pointer<"void *" ->Void<"void">> 116 | • ConditionalOperator("" src/alloc-override-win.c:467:21) 117 | +Pointer<"void *" ->Void<"void">> 118 | • BinaryOperator("" src/alloc-override-win.c:467:21) 119 | +Int<"int"> 120 | • ImplicitCastExpr!("apply" src/alloc-override-win.c:467:21) 121 | +UInt<"unsigned int"> 122 | • ImplicitCastExpr!("apply" src/alloc-override-win.c:467:21) 123 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 124 | • DeclRefExpr("apply" src/alloc-override-win.c:467:21) 125 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 126 | • ImplicitCastExpr!("PATCH_TARGET" src/alloc-override-win.c:467:30) 127 | +UInt<"unsigned int"> 128 | • DeclRefExpr("PATCH_TARGET" src/alloc-override-win.c:467:30) 129 | +Int<"int"> 130 | • ImplicitCastExpr!("target" src/alloc-override-win.c:467:52) 131 | +Pointer<"void *" ->Void<"void">> 132 | • MemberRefExpr("target" src/alloc-override-win.c:467:52) 133 | +Pointer<"void *" ->Void<"void">> 134 | • ImplicitCastExpr!("patch" src/alloc-override-win.c:467:45) 135 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 136 | • DeclRefExpr("patch" src/alloc-override-win.c:467:45) 137 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 138 | • ImplicitCastExpr!("target_term" src/alloc-override-win.c:467:68) 139 | +Pointer<"void *" ->Void<"void">> 140 | • MemberRefExpr("target_term" src/alloc-override-win.c:467:68) 141 | +Pointer<"void *" ->Void<"void">> 142 | • ImplicitCastExpr!("patch" src/alloc-override-win.c:467:61) 143 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 144 | • DeclRefExpr("patch" src/alloc-override-win.c:467:61) 145 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 146 | • NullStmt("" src/alloc-override-win.c:468:37) 147 | • BinaryOperator("" src/alloc-override-win.c:471:3) 148 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 149 | • MemberRefExpr("applied" src/alloc-override-win.c:471:10) 150 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 151 | • ImplicitCastExpr!("patch" src/alloc-override-win.c:471:3) 152 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 153 | • DeclRefExpr("patch" src/alloc-override-win.c:471:3) 154 | +Pointer<"mi_patch_t *" canon=Pointer<"struct mi_patch_s *" ->Record<"struct mi_patch_s" decl=StructDecl("mi_patch_s" src/alloc-override-win.c:378:16)>> ->Typedef<"mi_patch_t" decl=TypedefDecl("mi_patch_t" src/alloc-override-win.c:385:3)>> 155 | • ImplicitCastExpr!("apply" src/alloc-override-win.c:471:20) 156 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 157 | • DeclRefExpr("apply" src/alloc-override-win.c:471:20) 158 | +Typedef<"patch_apply_t" decl=TypedefDecl("patch_apply_t" src/alloc-override-win.c:376:3)> 159 | • ReturnStmt("" src/alloc-override-win.c:473:3) 160 | • ImplicitCastExpr!("" src/alloc-override-win.c:473:10) 161 | +Bool<"_Bool"> 162 | • IntegerLiteral("" src/alloc-override-win.c:473:10) 163 | +Int<"int"> 164 | 165 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/alloc-override.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/alloc.rs: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | Copyright (c) 2018, Microsoft Research, Daan Leijen 3 | This is free software; you can redistribute it and/or modify it under the 4 | terms of the MIT license. A copy of the license can be found in the file 5 | "LICENSE" at the root of this distribution. 6 | -----------------------------------------------------------------------------*/ 7 | // memset 8 | // ------------------------------------------------------ 9 | // Allocation 10 | // ------------------------------------------------------ 11 | // Fast allocation in a page: just pop from the free list. 12 | // Fall back to generic allocation only if the list is empty. 13 | // slow path 14 | // pop from the free list 15 | pub static MI_DEBUG_UNINIT: c_int = 208; 16 | pub static MI_LARGE_SIZE_MAX: c_long = ((1 << (6 + (13 + 3))) / 8); 17 | #[no_mangle] 18 | pub unsafe extern "C" fn _mi_page_malloc(mut heap: &mut mi_heap_t, 19 | mut page: &mut mi_page_t, 20 | mut size: usize) -> *mut c_void { 21 | let mut block = page.free; 22 | if __builtin_expect((block.is_null()), 0) != 0 { 23 | return _mi_malloc_generic(heap, size); 24 | } 25 | page.free = mi_block_next(page, block); 26 | page.used += 1; 27 | memset(block as *mut _, MI_DEBUG_UNINIT, size); 28 | if size <= MI_LARGE_SIZE_MAX { 29 | _mi_stat_increase(&mut ((heap).tld.stats.normal[_mi_bin(size)]), 1); 30 | } 31 | return block as *mut _; 32 | } 33 | // allocate a small block 34 | pub static MI_SMALL_SIZE_MAX: c_long = 35 | 128 * std::mem::size_of::<*mut c_void>(); 36 | #[no_mangle] 37 | pub unsafe extern "C" fn mi_heap_malloc_small(mut heap: *mut mi_heap_t, 38 | mut size: usize) 39 | -> *mut c_void { 40 | if size <= MI_SMALL_SIZE_MAX { 41 | 0 42 | } else { 43 | _mi_assert_fail("size <= MI_SMALL_SIZE_MAX", "src/alloc.c", 48, 44 | "mi_heap_malloc_small") 45 | } 46 | let mut page = _mi_heap_get_free_small_page(heap, size); 47 | return _mi_page_malloc(heap, page, size); 48 | } 49 | #[no_mangle] 50 | pub unsafe extern "C" fn mi_malloc_small(mut size: usize) -> *mut c_void { 51 | return mi_heap_malloc_small(mi_get_default_heap(), size); 52 | } 53 | // zero initialized small block 54 | #[no_mangle] 55 | pub unsafe extern "C" fn mi_zalloc_small(mut size: usize) -> *mut c_void { 56 | let mut p = mi_malloc_small(size); 57 | if !p.is_null() { memset(p, 0, size); } 58 | return p; 59 | } 60 | // The main allocation function 61 | #[no_mangle] 62 | pub unsafe extern "C" fn mi_heap_malloc(mut heap: *mut mi_heap_t, 63 | mut size: usize) -> *mut c_void { 64 | if !heap.is_null() { 65 | 0 66 | } else { 67 | _mi_assert_fail("heap!=NULL", "src/alloc.c", 66, "mi_heap_malloc") 68 | } // heaps are thread local 69 | if heap.thread_id == 0 || heap.thread_id == _mi_thread_id() { 70 | 0 71 | } else { 72 | _mi_assert_fail("heap->thread_id == 0 || heap->thread_id == _mi_thread_id()", 73 | "src/alloc.c", 67, "mi_heap_malloc") 74 | } // overestimate for aligned sizes 75 | let mut p: *mut c_void; 76 | if __builtin_expect((size <= MI_SMALL_SIZE_MAX), 1) != 0 { 77 | p = mi_heap_malloc_small(heap, size); 78 | } else { p = _mi_malloc_generic(heap, size); } 79 | if !p.is_null() { 80 | if !mi_heap_is_initialized(heap) { heap = mi_get_default_heap(); } 81 | _mi_stat_increase(&mut ((heap).tld.stats.malloc), mi_good_size(size)); 82 | } 83 | return p; 84 | } 85 | #[no_mangle] 86 | pub unsafe extern "C" fn mi_malloc(mut size: usize) -> *mut c_void { 87 | return mi_heap_malloc(mi_get_default_heap(), size); 88 | } 89 | #[no_mangle] 90 | pub unsafe extern "C" fn _mi_heap_malloc_zero(mut heap: *mut mi_heap_t, 91 | mut size: usize, mut zero: bool) 92 | -> *mut c_void { 93 | let mut p = mi_heap_malloc(heap, size); 94 | if zero != 0 != 0 && !p.is_null() { memset(p, 0, size); } 95 | return p; 96 | } 97 | #[no_mangle] 98 | pub unsafe extern "C" fn mi_heap_zalloc(mut heap: *mut mi_heap_t, 99 | mut size: usize) -> *mut c_void { 100 | return _mi_heap_malloc_zero(heap, size, true); 101 | } 102 | #[no_mangle] 103 | pub unsafe extern "C" fn mi_zalloc(mut size: usize) -> *mut c_void { 104 | return mi_heap_zalloc(mi_get_default_heap(), size); 105 | } 106 | // ------------------------------------------------------ 107 | // Free 108 | // ------------------------------------------------------ 109 | // multi-threaded free 110 | // unlikely: this only happens on the first concurrent free in a page that is in the full list 111 | // usual: directly add to page thread_free list 112 | pub static MI_TF_PTR_SHIFT: c_int = 2; 113 | unsafe fn _mi_free_block_mt(mut page: &mut mi_page_t, 114 | mut block: *mut mi_block_t) { 115 | let mut tfree: 116 | mi_thread_free_t; // increment the thread free count and return 117 | let mut tfreex: mi_thread_free_t; 118 | let mut use_delayed: bool; 119 | loop { 120 | tfreex = { tfree = page.thread_free; tfree }; 121 | use_delayed = (tfree.delayed == MI_USE_DELAYED_FREE); 122 | if __builtin_expect((use_delayed) != 0, 0) != 0 { 123 | tfreex.delayed = MI_DELAYED_FREEING; 124 | } else { 125 | mi_block_set_next(page, block, 126 | (tfree.head << MI_TF_PTR_SHIFT) as 127 | *mut mi_block_t); 128 | tfreex.head = (block as usize) >> MI_TF_PTR_SHIFT; 129 | } 130 | if !!mi_atomic_compare_exchange(&mut page.thread_free as 131 | *mut volatile_uintptr_t, 132 | tfreex.value, tfree.value) { 133 | break 134 | }; 135 | } 136 | if __builtin_expect((use_delayed == 0), 1) != 0 { 137 | mi_atomic_increment(&mut page.thread_freed); 138 | } else { 139 | // racy read on `heap`, but ok because MI_DELAYED_FREEING is set (see `mi_heap_delete` and `mi_heap_collect_abandon`) 140 | let mut heap = page.heap; 141 | if !heap.is_null() { 142 | // add to the delayed free list of this heap. (do this atomically as the lock only protects heap memory validity) 143 | let mut dfree: 144 | *mut mi_block_t; // and reset the MI_DELAYED_FREEING flag 145 | loop { 146 | dfree = heap.thread_delayed_free as *mut mi_block_t; 147 | mi_block_set_nextx(heap.cookie, block, dfree); 148 | if !!mi_atomic_compare_exchange_ptr(&mut heap.thread_delayed_free 149 | as *mut *mut c_void, 150 | block as *mut _, 151 | dfree as *mut _) { 152 | break 153 | }; 154 | }; 155 | } 156 | loop { 157 | tfreex = { tfree = page.thread_free; tfree }; 158 | tfreex.delayed = MI_NO_DELAYED_FREE; 159 | if !!mi_atomic_compare_exchange(&mut page.thread_free as 160 | *mut volatile_uintptr_t, 161 | tfreex.value, tfree.value) { 162 | break 163 | }; 164 | }; 165 | }; 166 | } 167 | // regular free 168 | pub static MI_DEBUG_FREED: c_int = 223; 169 | unsafe fn _mi_free_block(mut page: &mut mi_page_t, mut local: bool, 170 | mut block: *mut mi_block_t) { 171 | memset(block as *mut _, MI_DEBUG_FREED, page.block_size); 172 | // and push it on the free list 173 | if __builtin_expect((local) != 0, 1) != 0 174 | { // owning thread can free a block directly 175 | mi_block_set_next(page, block, page.local_free); 176 | page.local_free = block; 177 | page.used -= 1; 178 | if __builtin_expect(mi_page_all_free(page), 0) != 0 { 179 | _mi_page_retire(page); 180 | } else if __builtin_expect((page.flags.in_full) != 0, 0) != 0 { 181 | _mi_page_unfull(page); 182 | }; 183 | } else { _mi_free_block_mt(page, block); }; 184 | } 185 | // Adjust a block that was allocated aligned, to the actual start of the block in the page. 186 | #[no_mangle] 187 | pub unsafe extern "C" fn _mi_page_ptr_unalign(mut segment: 188 | *const mi_segment_t, 189 | mut page: &mi_page_t, 190 | mut p: *mut c_void) 191 | -> *mut mi_block_t { 192 | let mut diff = 193 | (p as 194 | *mut u8).offset(-_mi_page_start(segment, page, ptr::null_mut())); 195 | let mut adjust = (diff % page.block_size); 196 | return ((p as usize) - adjust) as *mut mi_block_t; 197 | } 198 | unsafe fn mi_free_generic(mut segment: *const mi_segment_t, 199 | mut page: &mut mi_page_t, mut local: bool, 200 | mut p: *mut c_void) { 201 | let mut block = 202 | if page.flags.has_aligned != 0 != 0 { 203 | _mi_page_ptr_unalign(segment, page, p) 204 | } else { p as *mut mi_block_t }; 205 | _mi_free_block(page, local != 0, block); 206 | } 207 | // Free a block 208 | // optimize: merge null check with the segment masking (below) 209 | //if (p == NULL) return; 210 | pub static MI_INTPTR_SIZE: c_int = 1 << 3; 211 | #[no_mangle] 212 | pub unsafe extern "C" fn mi_free(mut p: *mut c_void) { 213 | if __builtin_expect((((p as usize) & (MI_INTPTR_SIZE - 1)) != 0), 0) != 0 214 | { 215 | _mi_error_message("trying to free an invalid (unaligned) pointer: %p\n", 216 | p); // checks for (p==NULL) 217 | return; 218 | } 219 | let segment = _mi_ptr_segment(p as *const c_void); 220 | if segment.is_null() { return; } 221 | let mut local = (_mi_thread_id() == segment.thread_id); 222 | // preload, note: putting the thread_id in the page->flags does not improve performance 223 | if __builtin_expect((_mi_ptr_cookie(segment as *const _) != 224 | segment.cookie), 0) != 0 { 225 | _mi_error_message("trying to mi_free a pointer that does not point to a valid heap space: %p\n", 226 | p); 227 | return; 228 | } 229 | let mut page = _mi_segment_page_of(segment, p as *const c_void); 230 | let mut heap = mi_heap_get_default(); 231 | _mi_stat_decrease(&mut ((heap).tld.stats.malloc), mi_usable_size(p)); 232 | if page.block_size <= MI_LARGE_SIZE_MAX { 233 | _mi_stat_decrease(&mut ((heap).tld.stats.normal[_mi_bin(page.block_size)]), 234 | 1); 235 | } 236 | // huge page stat is accounted for in `_mi_page_retire` 237 | // adjust if it might be an un-aligned block 238 | if __builtin_expect((page.flags.value == 0), 1) != 0 { 239 | // note: merging both tests (local | value) does not matter for performance 240 | let mut block = 241 | p as *mut mi_block_t; // owning thread can free a block directly 242 | if __builtin_expect((local) != 0, 1) != 0 { 243 | mi_block_set_next(page, block, page.local_free); 244 | // note: moving this write earlier does not matter for performance 245 | page.local_free = 246 | block; // use atomic operations for a multi-threaded free 247 | page.used -= 248 | 1; // aligned blocks, or a full page; use the more generic path 249 | if __builtin_expect(mi_page_all_free(page), 0) != 0 { 250 | _mi_page_retire(page); 251 | }; 252 | } else { _mi_free_block_mt(page, block); }; 253 | } else { mi_free_generic(segment, page, local != 0, p); }; 254 | } 255 | #[no_mangle] 256 | pub unsafe extern "C" fn _mi_free_delayed_block(mut block: *mut mi_block_t) { 257 | let mut segment = _mi_ptr_segment(block as *const _); 258 | let mut page = _mi_segment_page_of(segment, block as *const _); 259 | _mi_free_block(page, true, block); 260 | } 261 | // Bytes available in a block 262 | #[no_mangle] 263 | pub unsafe extern "C" fn mi_usable_size(mut p: *mut c_void) -> usize { 264 | if p.is_null() { return 0; } 265 | let mut segment = _mi_ptr_segment(p as *const c_void); 266 | let mut page = _mi_segment_page_of(segment, p as *const c_void); 267 | let mut size = page.block_size; 268 | if __builtin_expect((page.flags.has_aligned) != 0, 0) != 0 { 269 | let mut adjust = 270 | (p as 271 | *mut u8).offset(-(_mi_page_ptr_unalign(segment, page, p) as 272 | *mut u8)); 273 | return (size - adjust); 274 | } else { return size; }; 275 | } 276 | // ------------------------------------------------------ 277 | // ensure explicit external inline definitions are emitted! 278 | // ------------------------------------------------------ 279 | // ------------------------------------------------------ 280 | // Allocation extensions 281 | // ------------------------------------------------------ 282 | #[no_mangle] 283 | pub unsafe extern "C" fn mi_heap_calloc(mut heap: *mut mi_heap_t, 284 | mut count: usize, mut size: usize) 285 | -> *mut c_void { 286 | let mut total: usize; 287 | if mi_mul_overflow(count, size, &mut total) { return ptr::null_mut(); } 288 | return mi_heap_zalloc(heap, total); 289 | } 290 | #[no_mangle] 291 | pub unsafe extern "C" fn mi_calloc(mut count: usize, mut size: usize) 292 | -> *mut c_void { 293 | return mi_heap_calloc(mi_get_default_heap(), count, size); 294 | } 295 | // Uninitialized `calloc` 296 | #[no_mangle] 297 | pub unsafe extern "C" fn mi_heap_mallocn(mut heap: *mut mi_heap_t, 298 | mut count: usize, mut size: usize) 299 | -> *mut c_void { 300 | let mut total: usize; 301 | if mi_mul_overflow(count, size, &mut total) { return ptr::null_mut(); } 302 | return mi_heap_malloc(heap, total); 303 | } 304 | #[no_mangle] 305 | pub unsafe extern "C" fn mi_mallocn(mut count: usize, mut size: usize) 306 | -> *mut c_void { 307 | return mi_heap_mallocn(mi_get_default_heap(), count, size); 308 | } 309 | // Expand in place or fail 310 | #[no_mangle] 311 | pub unsafe extern "C" fn mi_expand(mut p: *mut c_void, mut newsize: usize) 312 | -> *mut c_void { 313 | if p.is_null() { 314 | return ptr::null_mut(); // it fits 315 | } // reallocation still fits and not more than 50% waste 316 | let mut size = mi_usable_size(p); // maybe in another heap 317 | if newsize > size { return ptr::null_mut(); } 318 | return p; 319 | } 320 | #[no_mangle] 321 | pub unsafe extern "C" fn _mi_realloc_zero(mut p: *mut c_void, 322 | mut newsize: usize, mut zero: bool) 323 | -> *mut c_void { 324 | if p.is_null() { 325 | return _mi_heap_malloc_zero(mi_get_default_heap(), newsize, 326 | zero != 0); 327 | } 328 | let mut size = mi_usable_size(p); 329 | if newsize <= size && newsize >= (size / 2) { return p; } 330 | let mut newp = mi_malloc(newsize); 331 | if __builtin_expect((!newp.is_null()), 1) != 0 { 332 | if zero != 0 != 0 && newsize > size { 333 | // also set last word in the previous allocation to zero to ensure any padding is zero-initialized 334 | let mut start = 335 | if size >= std::mem::size_of::() { 336 | size - std::mem::size_of::() 337 | } else { 0 }; // only free if successful 338 | memset((newp as *mut u8).offset(start), 0, newsize - start); 339 | } 340 | memcpy(newp, p as *const c_void, 341 | if newsize > size { size } else { newsize }); 342 | mi_free(p); 343 | } 344 | return newp; 345 | } 346 | #[no_mangle] 347 | pub unsafe extern "C" fn mi_realloc(mut p: *mut c_void, mut newsize: usize) 348 | -> *mut c_void { 349 | return _mi_realloc_zero(p, newsize, false); 350 | } 351 | // Zero initialized reallocation 352 | #[no_mangle] 353 | pub unsafe extern "C" fn mi_rezalloc(mut p: *mut c_void, mut newsize: usize) 354 | -> *mut c_void { 355 | return _mi_realloc_zero(p, newsize, true); 356 | } 357 | #[no_mangle] 358 | pub unsafe extern "C" fn mi_recalloc(mut p: *mut c_void, mut count: usize, 359 | mut size: usize) -> *mut c_void { 360 | let mut total: usize; 361 | if mi_mul_overflow(count, size, &mut total) { return ptr::null_mut(); } 362 | return mi_rezalloc(p, total); 363 | } 364 | #[no_mangle] 365 | pub unsafe extern "C" fn mi_reallocn(mut p: *mut c_void, mut count: usize, 366 | mut size: usize) -> *mut c_void { 367 | let mut total: usize; 368 | if mi_mul_overflow(count, size, &mut total) { return ptr::null_mut(); } 369 | return mi_realloc(p, total); 370 | } 371 | // Reallocate but free `p` on errors 372 | #[no_mangle] 373 | pub unsafe extern "C" fn mi_reallocf(mut p: *mut c_void, mut newsize: usize) 374 | -> *mut c_void { 375 | let mut newp = mi_realloc(p, newsize); 376 | if newp.is_null() && !p.is_null() { mi_free(p); } 377 | return newp; 378 | } 379 | // `strdup` using mi_malloc 380 | #[no_mangle] 381 | pub unsafe extern "C" fn mi_heap_strdup(mut heap: *mut mi_heap_t, 382 | mut s: *const i8) -> *mut i8 { 383 | if s.is_null() { return ptr::null_mut(); } 384 | let mut n = strlen(s); 385 | let mut t = mi_heap_malloc(heap, n + 1) as *mut i8; 386 | if !t.is_null() { memcpy(t as *mut _, s as *const _, n + 1); } 387 | return t; 388 | } 389 | #[no_mangle] 390 | pub unsafe extern "C" fn mi_strdup(mut s: *const i8) -> *mut i8 { 391 | return mi_heap_strdup(mi_get_default_heap(), s); 392 | } 393 | // `strndup` using mi_malloc 394 | #[no_mangle] 395 | pub unsafe extern "C" fn mi_heap_strndup(mut heap: *mut mi_heap_t, 396 | mut s: *const i8, mut n: usize) 397 | -> *mut i8 { 398 | if s.is_null() { return ptr::null_mut(); } 399 | let mut m = strlen(s); 400 | if n > m { n = m; } 401 | let mut t = mi_heap_malloc(heap, n + 1) as *mut i8; 402 | if t.is_null() { return ptr::null_mut(); } 403 | memcpy(t as *mut _, s as *const _, n); 404 | *t.offset(n) = 0i8; 405 | return t; 406 | } 407 | #[no_mangle] 408 | pub unsafe extern "C" fn mi_strndup(mut s: *const i8, mut n: usize) 409 | -> *mut i8 { 410 | return mi_heap_strndup(mi_get_default_heap(), s, n); 411 | } 412 | // `realpath` using mi_malloc 413 | // todo: use GetFullPathNameW to allow longer file names 414 | pub static PATH_MAX: usize = 4096; 415 | #[no_mangle] 416 | pub unsafe extern "C" fn mi_heap_realpath(mut heap: *mut mi_heap_t, 417 | mut fname: *const i8, 418 | mut resolved_name: *mut i8) 419 | -> *mut i8 { 420 | if !resolved_name.is_null() { 421 | return realpath(fname, resolved_name); // ok if `rname==NULL` 422 | } else { 423 | let mut buf: [i8; 4097]; 424 | let mut rname = realpath(fname, buf); 425 | return mi_heap_strndup(heap, rname, PATH_MAX); 426 | }; 427 | } 428 | #[no_mangle] 429 | pub unsafe extern "C" fn mi_realpath(mut fname: *const i8, 430 | mut resolved_name: *mut i8) -> *mut i8 { 431 | return mi_heap_realpath(mi_get_default_heap(), fname, resolved_name); 432 | } 433 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/heap.rs: -------------------------------------------------------------------------------- 1 | /*---------------------------------------------------------------------------- 2 | Copyright (c) 2018, Microsoft Research, Daan Leijen 3 | This is free software; you can redistribute it and/or modify it under the 4 | terms of the MIT license. A copy of the license can be found in the file 5 | "LICENSE" at the root of this distribution. 6 | -----------------------------------------------------------------------------*/ 7 | // memset, memcpy 8 | /* ----------------------------------------------------------- 9 | Helpers 10 | ----------------------------------------------------------- */ 11 | // return `true` if ok, `false` to break 12 | pub type heap_page_visitor_fun = unsafe extern "C" fn(, ...); 13 | // Visit all pages in a heap; returns `false` if break was called. 14 | // visit all pages 15 | pub static MI_BIN_FULL: c_long = (64 + 1); 16 | unsafe fn mi_heap_visit_pages(mut heap: *mut mi_heap_t, 17 | mut fn_: &mut heap_page_visitor_fun, 18 | mut arg1: *mut c_void, mut arg2: *mut c_void) 19 | -> bool { 20 | if heap.is_null() || heap.page_count == 0 { 21 | return 0 != 22 | 0; // save next in case the page gets removed from the queue 23 | } // and continue 24 | let mut count = 0; 25 | for mut i in 0..(MI_BIN_FULL + 1) { 26 | let mut pq = &mut heap.pages[i]; 27 | let mut page = pq.first; 28 | while !page.is_null() { 29 | let mut next = page.next; 30 | count += 1; 31 | if !fn_(heap, pq, page, arg1, arg2) { return false; } 32 | page = next; 33 | }; 34 | } 35 | return true; 36 | } 37 | /* ----------------------------------------------------------- 38 | "Collect" pages by migrating `local_free` and `thread_free` 39 | lists and freeing empty pages. This is done when a thread 40 | stops (and in that case abandons pages if there are still 41 | blocks alive) 42 | ----------------------------------------------------------- */ 43 | pub enum mi_collect_e { NORMAL, FORCE, ABANDON, } 44 | unsafe fn mi_heap_page_collect(mut heap: *mut mi_heap_t, 45 | mut pq: *mut mi_page_queue_t, 46 | mut page: *mut mi_page_t, 47 | mut arg_collect: *mut c_void, 48 | mut arg2: *mut c_void) -> bool { 49 | (arg2); 50 | (heap); 51 | let mut collect = arg_collect as mi_collect_t; 52 | _mi_page_free_collect(page); 53 | if mi_page_all_free(page) { 54 | // no more used blocks, free the page. TODO: should we retire here and be less aggressive? 55 | _mi_page_free(page, pq, 56 | collect != 57 | NORMAL); // still used blocks but the thread is done; abandon the page 58 | } else if collect == ABANDON { 59 | _mi_page_abandon(page, pq); // don't break 60 | } 61 | return true; 62 | } 63 | unsafe fn mi_heap_collect_ex(mut heap: &mut mi_heap_t, 64 | mut collect: mi_collect_t) { 65 | _mi_deferred_free(heap, collect > NORMAL); 66 | if !mi_heap_is_initialized(heap) { return; } 67 | // collect (some) abandoned pages 68 | if collect >= NORMAL && heap.no_reclaim == 0 { 69 | if collect == NORMAL 70 | { // this may free some segments (but also take ownership of abandoned pages) 71 | _mi_segment_try_reclaim_abandoned(heap, false, 72 | &mut heap.tld.segments); // the main thread is abandoned, try to free all abandoned segments. 73 | } else if collect == ABANDON && _mi_is_main_thread() != 0 && 74 | mi_heap_is_backing(heap) != 0 75 | { // if all memory is freed by now, all segments should be freed. 76 | _mi_segment_try_reclaim_abandoned(heap, true, 77 | &mut heap.tld.segments); 78 | }; 79 | } 80 | // if abandoning, mark all full pages to no longer add to delayed_free 81 | if collect == ABANDON { 82 | let mut page = (heap.pages[MI_BIN_FULL]).first; 83 | while !page.is_null() { 84 | _mi_page_use_delayed_free(page, false); 85 | page = page.next 86 | // set thread_free.delayed to MI_NO_DELAYED_FREE 87 | }; 88 | } 89 | // free thread delayed blocks. 90 | // (if abandoning, after this there are no more local references into the pages.) 91 | _mi_heap_delayed_free(heap); 92 | // collect all pages owned by this thread 93 | mi_heap_visit_pages(heap, &mut mi_heap_page_collect, 94 | (collect) as *mut c_void, ptr::null_mut()); 95 | // collect segment caches 96 | if collect >= FORCE { 97 | _mi_segment_thread_collect(&mut heap.tld.segments); 98 | }; 99 | } 100 | #[no_mangle] 101 | pub unsafe extern "C" fn _mi_heap_collect_abandon(mut heap: *mut mi_heap_t) { 102 | mi_heap_collect_ex(heap, ABANDON); 103 | } 104 | #[no_mangle] 105 | pub unsafe extern "C" fn mi_heap_collect(mut heap: *mut mi_heap_t, 106 | mut force: bool) { 107 | mi_heap_collect_ex(heap, if force != 0 != 0 { FORCE } else { NORMAL }); 108 | } 109 | #[no_mangle] 110 | pub unsafe extern "C" fn mi_collect(mut force: bool) { 111 | mi_heap_collect(mi_get_default_heap(), force != 0); 112 | } 113 | /* ----------------------------------------------------------- 114 | Heap new 115 | ----------------------------------------------------------- */ 116 | #[no_mangle] 117 | pub unsafe extern "C" fn mi_heap_get_default() -> *mut mi_heap_t { 118 | mi_thread_init(); // don't reclaim abandoned pages or otherwise destroy is unsafe 119 | return mi_get_default_heap(); 120 | } 121 | #[no_mangle] 122 | pub unsafe extern "C" fn mi_heap_get_backing() -> *mut mi_heap_t { 123 | let mut heap = mi_heap_get_default(); 124 | let mut bheap = heap.tld.heap_backing; 125 | return bheap; 126 | } 127 | #[no_mangle] 128 | pub unsafe extern "C" fn _mi_heap_random(mut heap: &mut mi_heap_t) -> usize { 129 | let mut r = heap.random; 130 | heap.random = _mi_random_shuffle(r); 131 | return r; 132 | } 133 | #[no_mangle] 134 | pub unsafe extern "C" fn mi_heap_new() -> *mut mi_heap_t { 135 | let mut bheap = mi_heap_get_backing(); 136 | let mut heap = 137 | (mi_heap_malloc(bheap, std::mem::size_of::()) as 138 | *mut mi_heap_t); 139 | if heap.is_null() { return ptr::null_mut(); } 140 | memcpy(heap as *mut _, &_mi_heap_empty, std::mem::size_of::()); 141 | heap.tld = bheap.tld; 142 | heap.thread_id = _mi_thread_id(); 143 | heap.cookie = ((heap as usize) ^ _mi_heap_random(bheap)) | 1; 144 | heap.random = _mi_heap_random(bheap); 145 | heap.no_reclaim = true; 146 | return heap; 147 | } 148 | // zero out the page queues 149 | unsafe fn mi_heap_reset_pages(mut heap: &mut mi_heap_t) { 150 | // TODO: copy full empty heap instead? 151 | memset(&mut heap.pages_free_direct, 0, 152 | std::mem::size_of::<[*mut mi_page_t; 130]>()); 153 | memcpy(&mut heap.pages, &_mi_heap_empty.pages, 154 | std::mem::size_of::<[mi_page_queue_t; 66]>()); 155 | heap.thread_delayed_free = ptr::null_mut(); 156 | heap.page_count = 0; 157 | } 158 | // called from `mi_heap_destroy` and `mi_heap_delete` to free the internal heap resources. 159 | unsafe fn mi_heap_free(mut heap: &mut mi_heap_t) { 160 | if mi_heap_is_backing(heap) { 161 | return; // dont free the backing heap 162 | } 163 | // reset default 164 | if mi_heap_is_default(heap) { _mi_heap_default = heap.tld.heap_backing; } 165 | // and free the used memory 166 | mi_free(heap as *mut _); 167 | } 168 | /* ----------------------------------------------------------- 169 | Heap destroy 170 | ----------------------------------------------------------- */ 171 | // ensure no more thread_delayed_free will be added 172 | // stats 173 | pub static MI_LARGE_SIZE_MAX: c_long = ((1 << (6 + (13 + 3))) / 8); 174 | unsafe fn _mi_heap_page_destroy(mut heap: &mut mi_heap_t, 175 | mut pq: *mut mi_page_queue_t, 176 | mut page: &mut mi_page_t, 177 | mut arg1: *mut c_void, mut arg2: *mut c_void) 178 | -> bool { 179 | (arg1); // todo: off for aligned blocks... 180 | (arg2); 181 | (heap); 182 | (pq); 183 | _mi_page_use_delayed_free(page, false); 184 | if page.block_size > MI_LARGE_SIZE_MAX { 185 | _mi_stat_decrease(&mut ((heap).tld.stats.huge), page.block_size); 186 | } 187 | let mut inuse = page.used - page.thread_freed; 188 | if page.block_size <= MI_LARGE_SIZE_MAX { 189 | _mi_stat_decrease(&mut ((heap).tld.stats.normal[_mi_bin(page.block_size)]), 190 | inuse); 191 | } 192 | _mi_stat_decrease(&mut ((heap).tld.stats.malloc), 193 | page.block_size * inuse); 194 | // pretend it is all free now 195 | page.used = page.thread_freed as u16; 196 | // and free the page 197 | _mi_segment_page_free(page, false, /* no force? */ 198 | &mut heap.tld.segments); // keep going 199 | return true; // don't free in case it may contain reclaimed pages 200 | } 201 | #[no_mangle] 202 | pub unsafe extern "C" fn _mi_heap_destroy_pages(mut heap: *mut mi_heap_t) { 203 | mi_heap_visit_pages(heap, &mut _mi_heap_page_destroy, ptr::null_mut(), 204 | ptr::null_mut()); // free all pages 205 | mi_heap_reset_pages(heap); 206 | } 207 | #[no_mangle] 208 | pub unsafe extern "C" fn mi_heap_destroy(mut heap: &mut mi_heap_t) { 209 | if mi_heap_is_initialized(heap) != 0 { 210 | 0 211 | } else { 212 | _mi_assert_fail("mi_heap_is_initialized(heap)", "src/heap.c", 261, 213 | "mi_heap_destroy") 214 | } 215 | if (heap.no_reclaim) != 0 != 0 { 216 | 0 217 | } else { 218 | _mi_assert_fail("heap->no_reclaim", "src/heap.c", 262, 219 | "mi_heap_destroy") 220 | } 221 | if !mi_heap_is_initialized(heap) { return; } 222 | if heap.no_reclaim == 0 { 223 | mi_heap_delete(heap); 224 | } else { _mi_heap_destroy_pages(heap); mi_heap_free(heap); }; 225 | } 226 | /* ----------------------------------------------------------- 227 | Safe Heap delete 228 | ----------------------------------------------------------- */ 229 | // Tranfer the pages from one heap to the other 230 | unsafe fn mi_heap_absorb(mut heap: &mut mi_heap_t, mut from: *mut mi_heap_t) { 231 | if from.is_null() || from.page_count == 0 { return; } 232 | // unfull all full pages 233 | let mut page = (heap.pages[MI_BIN_FULL]).first; 234 | while !page.is_null() { 235 | let mut next = page.next; 236 | _mi_page_unfull(page); 237 | page = next; 238 | } 239 | // free outstanding thread delayed free blocks 240 | _mi_heap_delayed_free(from); 241 | // transfer all pages by appending the queues; this will set 242 | // a new heap field which is ok as all pages are unfull'd and thus 243 | // other threads won't access this field anymore (see `mi_free_block_mt`) 244 | for mut i in 0..MI_BIN_FULL { 245 | let mut pq = &mut heap.pages[i]; 246 | let mut append = &mut from.pages[i]; 247 | _mi_page_queue_append(heap, pq, append); 248 | } 249 | // and reset the `from` heap 250 | mi_heap_reset_pages(from); 251 | } 252 | // Safe delete a heap without freeing any still allocated blocks in that heap. 253 | #[no_mangle] 254 | pub unsafe extern "C" fn mi_heap_delete(mut heap: &mut mi_heap_t) { 255 | if mi_heap_is_initialized(heap) != 0 { 256 | 0 257 | } else { 258 | _mi_assert_fail("mi_heap_is_initialized(heap)", "src/heap.c", 316, 259 | "mi_heap_delete") 260 | } // tranfer still used pages to the backing heap 261 | if !mi_heap_is_initialized(heap) { 262 | return; // the backing heap abandons its pages 263 | } 264 | if !mi_heap_is_backing(heap) { 265 | mi_heap_absorb(heap.tld.heap_backing, heap); 266 | } else { _mi_heap_collect_abandon(heap); } 267 | mi_heap_free(heap); 268 | } 269 | #[no_mangle] 270 | pub unsafe extern "C" fn mi_heap_set_default(mut heap: *mut mi_heap_t) 271 | -> *mut mi_heap_t { 272 | if mi_heap_is_initialized(heap) != 0 { 273 | 0 274 | } else { 275 | _mi_assert_fail("mi_heap_is_initialized(heap)", "src/heap.c", 333, 276 | "mi_heap_set_default") 277 | } 278 | if !mi_heap_is_initialized(heap) { return ptr::null_mut(); } 279 | let mut old = _mi_heap_default; 280 | _mi_heap_default = heap; 281 | return old; 282 | } 283 | /* ----------------------------------------------------------- 284 | Analysis 285 | ----------------------------------------------------------- */ 286 | // static since it is not thread safe to access heaps from other threads. 287 | unsafe fn mi_heap_of_block(mut p: *const c_void) -> *mut mi_heap_t { 288 | if p.is_null() { 289 | return ptr::null_mut(); // continue if not found 290 | } // only aligned pointers 291 | let mut segment = _mi_ptr_segment(p); 292 | let mut valid = (_mi_ptr_cookie(segment as *const _) == segment.cookie); 293 | if __builtin_expect((valid == 0), 0) != 0 { return ptr::null_mut(); } 294 | return _mi_segment_page_of(segment, p).heap; 295 | } 296 | #[no_mangle] 297 | pub unsafe extern "C" fn mi_heap_contains_block(mut heap: *mut mi_heap_t, 298 | mut p: *const c_void) 299 | -> bool { 300 | if !heap.is_null() { 301 | 0 302 | } else { 303 | _mi_assert_fail("heap != NULL", "src/heap.c", 359, 304 | "mi_heap_contains_block") 305 | } 306 | if !mi_heap_is_initialized(heap) { return false; } 307 | return (heap == mi_heap_of_block(p)); 308 | } 309 | unsafe fn mi_heap_page_check_owned(mut heap: *mut mi_heap_t, 310 | mut pq: *mut mi_page_queue_t, 311 | mut page: &mut mi_page_t, 312 | mut p: *mut c_void, 313 | mut vfound: *mut c_void) -> bool { 314 | (heap); 315 | (pq); 316 | let mut found = vfound as *mut bool; 317 | let mut segment = _mi_page_segment(page); 318 | let mut start = _mi_page_start(segment, page, ptr::null_mut()); 319 | let mut end = 320 | (start as *mut u8).offset((page.capacity * page.block_size)); 321 | *found = (p >= start && p < end); 322 | return (*found == 0); 323 | } 324 | pub static MI_INTPTR_SIZE: c_int = 1 << 3; 325 | #[no_mangle] 326 | pub unsafe extern "C" fn mi_heap_check_owned(mut heap: *mut mi_heap_t, 327 | mut p: *const c_void) -> bool { 328 | if !heap.is_null() { 329 | 0 330 | } else { 331 | _mi_assert_fail("heap != NULL", "src/heap.c", 377, 332 | "mi_heap_check_owned") 333 | } 334 | if !mi_heap_is_initialized(heap) { return false; } 335 | if ((p as usize) & (MI_INTPTR_SIZE - 1)) != 0 { return false; } 336 | let mut found = false; 337 | mi_heap_visit_pages(heap, &mut mi_heap_page_check_owned, p as *mut c_void, 338 | &mut found); 339 | return found != 0; 340 | } 341 | #[no_mangle] 342 | pub unsafe extern "C" fn mi_check_owned(mut p: *const c_void) -> bool { 343 | return mi_heap_check_owned(mi_get_default_heap(), p); 344 | } 345 | /* ----------------------------------------------------------- 346 | Visit all heap blocks and areas 347 | Todo: enable visiting abandoned pages, and 348 | enable visiting all blocks of all heaps across threads 349 | ----------------------------------------------------------- */ 350 | // Separate struct to keep `mi_page_t` out of the public interface 351 | pub struct mi_heap_area_ex_s { 352 | pub area: mi_heap_area_t, 353 | pub page: *mut mi_page_t, 354 | } 355 | // optimize page with one block 356 | // create a bitmap of free blocks. 357 | // Todo: avoid division? 358 | // walk through all blocks skipping the free ones 359 | pub static UINTPTR_MAX: c_long = 18446744073709551615; 360 | unsafe fn mi_heap_area_visit_blocks(mut xarea: *const mi_heap_area_ex_t, 361 | mut visitor: &mut mi_block_visit_fun, 362 | mut arg: *mut c_void) -> bool { 363 | if !xarea.is_null() { 364 | 0 365 | } else { 366 | _mi_assert_fail("xarea != NULL", "src/heap.c", 402, 367 | "mi_heap_area_visit_blocks") 368 | } // skip a run of free blocks 369 | if xarea.is_null() { 370 | return true; // race is ok 371 | } 372 | let mut area = &xarea.area; 373 | let mut page = xarea.page; 374 | if !page.is_null() { 375 | 0 376 | } else { 377 | _mi_assert_fail("page != NULL", "src/heap.c", 406, 378 | "mi_heap_area_visit_blocks") 379 | } 380 | if page.is_null() { return true; } 381 | _mi_page_free_collect(page); 382 | if page.used == 0 { return true; } 383 | let mut psize: usize; 384 | let mut pstart = _mi_page_start(_mi_page_segment(page), page, &mut psize); 385 | if page.capacity == 1 { 386 | return visitor(page.heap, area, pstart as *mut _, page.block_size, 387 | arg); 388 | } 389 | let mut free_map: [usize; 1024]; 390 | memset(free_map as *mut _, 0, std::mem::size_of::<[usize; 1024]>()); 391 | let mut free_count = 0; 392 | let mut block = page.free; 393 | while !block.is_null() { 394 | free_count += 1; 395 | let mut offset = (block as *mut u8).offset(-pstart); 396 | let mut blockidx = offset / page.block_size; 397 | let mut bitidx = (blockidx / std::mem::size_of::()); 398 | let mut bit = blockidx - (bitidx * std::mem::size_of::()); 399 | free_map[bitidx] |= (1 << bit); 400 | block = mi_block_next(page, block) 401 | } 402 | let mut used_count = 0; 403 | for mut i in 0..page.capacity { 404 | let mut bitidx = (i / std::mem::size_of::()); 405 | let mut bit = i - (bitidx * std::mem::size_of::()); 406 | let mut m = free_map[bitidx]; 407 | if bit == 0 && m == UINTPTR_MAX { 408 | i += (std::mem::size_of::() - 1); 409 | } else if (m & (1 << bit)) == 0 { 410 | used_count += 1; 411 | let mut block = pstart.offset((i * page.block_size)); 412 | if !visitor(page.heap, area, block as *mut _, page.block_size, 413 | arg) { 414 | return false; 415 | }; 416 | }; 417 | } 418 | return true; 419 | } 420 | pub type mi_heap_area_visit_fun = unsafe extern "C" fn(, ...); 421 | unsafe fn mi_heap_visit_areas_page(mut heap: *mut mi_heap_t, 422 | mut pq: *mut mi_page_queue_t, 423 | mut page: &mut mi_page_t, 424 | mut vfun: *mut c_void, 425 | mut arg: *mut c_void) -> bool { 426 | (heap); 427 | (pq); 428 | let mut fun = vfun as *mut mi_heap_area_visit_fun; 429 | let mut xarea: mi_heap_area_ex_t; 430 | xarea.page = page; 431 | xarea.area.reserved = page.reserved * page.block_size; 432 | xarea.area.committed = page.capacity * page.block_size; 433 | xarea.area.blocks = 434 | _mi_page_start(_mi_page_segment(page), page, ptr::null_mut()); 435 | xarea.area.used = page.used - page.thread_freed; 436 | xarea.area.block_size = page.block_size; 437 | return fun(heap, &mut xarea, arg); 438 | } 439 | // Visit all heap pages as areas 440 | unsafe fn mi_heap_visit_areas(mut heap: *const mi_heap_t, 441 | mut visitor: *mut mi_heap_area_visit_fun, 442 | mut arg: *mut c_void) -> bool { 443 | if visitor.is_null() { return false; } 444 | return mi_heap_visit_pages(heap as *mut mi_heap_t, 445 | &mut mi_heap_visit_areas_page, 446 | visitor as *mut _, arg); 447 | } 448 | // Just to pass arguments 449 | pub struct mi_visit_blocks_args_s { 450 | pub visit_blocks: bool, 451 | pub visitor: *mut mi_block_visit_fun, 452 | pub arg: *mut c_void, 453 | } 454 | unsafe fn mi_heap_area_visitor(mut heap: *const mi_heap_t, 455 | mut xarea: &mi_heap_area_ex_t, 456 | mut arg: *mut c_void) -> bool { 457 | let mut args = arg as *mut mi_visit_blocks_args_t; 458 | if !args.visitor(heap, &xarea.area, ptr::null(), xarea.area.block_size, 459 | arg) { 460 | return false; 461 | } 462 | if args.visit_blocks != 0 { 463 | return mi_heap_area_visit_blocks(xarea, args.visitor, args.arg); 464 | } else { return true; }; 465 | } 466 | // Visit all blocks in a heap 467 | #[no_mangle] 468 | pub unsafe extern "C" fn mi_heap_visit_blocks(mut heap: *const mi_heap_t, 469 | mut visit_blocks: bool, 470 | mut visitor: 471 | *mut mi_block_visit_fun, 472 | mut arg: *mut c_void) -> bool { 473 | let mut args = 474 | mi_visit_blocks_args_t{_0: visit_blocks != 0, _1: visitor, _2: arg,}; 475 | return mi_heap_visit_areas(heap, &mut mi_heap_area_visitor, &mut args); 476 | } 477 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/options.rs: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | Copyright (c) 2018, Microsoft Research, Daan Leijen 3 | This is free software; you can redistribute it and/or modify it under the 4 | terms of the MIT license. A copy of the license can be found in the file 5 | "LICENSE" at the root of this distribution. 6 | -----------------------------------------------------------------------------*/ 7 | // strcmp 8 | // toupper 9 | // -------------------------------------------------------- 10 | // Options 11 | // -------------------------------------------------------- 12 | pub enum mi_init_e { 13 | UNINIT, 14 | DEFAULTED, 15 | INITIALIZED, // not yet initialized 16 | // not found in the environment, use default value 17 | // found in environment or set explicitly 18 | } 19 | pub struct mi_option_desc_s { 20 | pub value: c_long, 21 | pub init: mi_init_t, 22 | pub name: *const i8, // the value 23 | // is it initialized yet? (from the environment) 24 | // option name without `mimalloc_` prefix 25 | } 26 | // in secure build the environment setting is ignored 27 | pub static MI_DEBUG: c_long = 1; 28 | pub static mut options: [mi_option_desc_t; 7] = 29 | [mi_option_desc_t{_0: 0, _1: UNINIT, _2: "page_reset",}, 30 | mi_option_desc_t{_0: 0, _1: UNINIT, _2: "cache_reset",}, 31 | mi_option_desc_t{_0: 0, _1: UNINIT, _2: "pool_commit",}, 32 | mi_option_desc_t{_0: 0, _1: UNINIT, _2: "secure",}, 33 | mi_option_desc_t{_0: 0, _1: UNINIT, _2: "show_stats",}, 34 | mi_option_desc_t{_0: MI_DEBUG, _1: UNINIT, _2: "show_errors",}, 35 | mi_option_desc_t{_0: MI_DEBUG, _1: UNINIT, _2: "verbose",}]; 36 | #[no_mangle] 37 | pub unsafe extern "C" fn mi_option_get(mut option: mi_option_t) -> c_long { 38 | if option >= 0 && option < _mi_option_last { 39 | 0 40 | } else { 41 | _mi_assert_fail("option >= 0 && option < _mi_option_last", 42 | "src/options.c", 47, "mi_option_get") 43 | } 44 | let mut desc = &mut options[option]; 45 | if desc.init == UNINIT { 46 | mi_option_init(desc); 47 | if option != mi_option_verbose { 48 | _mi_verbose_message("option \'%s\': %zd\n", desc.name, 49 | desc.value); 50 | }; 51 | } 52 | return desc.value; 53 | } 54 | #[no_mangle] 55 | pub unsafe extern "C" fn mi_option_set(mut option: mi_option_t, 56 | mut value: c_long) { 57 | if option >= 0 && option < _mi_option_last { 58 | 0 59 | } else { 60 | _mi_assert_fail("option >= 0 && option < _mi_option_last", 61 | "src/options.c", 59, "mi_option_set") 62 | } 63 | let mut desc = &mut options[option]; 64 | desc.value = value; 65 | desc.init = INITIALIZED; 66 | } 67 | #[no_mangle] 68 | pub unsafe extern "C" fn mi_option_set_default(mut option: mi_option_t, 69 | mut value: c_long) { 70 | if option >= 0 && option < _mi_option_last { 71 | 0 72 | } else { 73 | _mi_assert_fail("option >= 0 && option < _mi_option_last", 74 | "src/options.c", 66, "mi_option_set_default") 75 | } 76 | let mut desc = &mut options[option]; 77 | if desc.init != INITIALIZED { desc.value = value; }; 78 | } 79 | #[no_mangle] 80 | pub unsafe extern "C" fn mi_option_is_enabled(mut option: mi_option_t) 81 | -> bool { 82 | return (mi_option_get(option) != 0); 83 | } 84 | #[no_mangle] 85 | pub unsafe extern "C" fn mi_option_enable(mut option: mi_option_t, 86 | mut enable: bool) { 87 | mi_option_set(option, if enable != 0 != 0 { 1 } else { 0 }); 88 | } 89 | #[no_mangle] 90 | pub unsafe extern "C" fn mi_option_enable_default(mut option: mi_option_t, 91 | mut enable: bool) { 92 | mi_option_set_default(option, if enable != 0 != 0 { 1 } else { 0 }); 93 | } 94 | // -------------------------------------------------------- 95 | // Messages 96 | // -------------------------------------------------------- 97 | // Define our own limited `fprintf` that avoids memory allocation. 98 | // We do this using `snprintf` with a limited buffer. 99 | unsafe fn mi_vfprintf(mut out: *mut FILE, mut prefix: *const i8, 100 | mut fmt: *const i8, mut args: va_list) { 101 | let mut buf: [i8; 256]; 102 | if fmt.is_null() { return; } 103 | if out.is_null() { out = stdout; } 104 | vsnprintf(buf, std::mem::size_of_val(&buf) - 1, fmt, args); 105 | if !prefix.is_null() { fputs(prefix, out); } 106 | fputs(buf, out); 107 | } 108 | #[no_mangle] 109 | pub unsafe extern "C" fn _mi_fprintf(mut out: *mut FILE, 110 | mut fmt: *const i8, ...) { 111 | let mut args: va_list; 112 | __builtin_va_start(args, fmt); 113 | mi_vfprintf(out, ptr::null_mut(), fmt, args); 114 | __builtin_va_end(args); 115 | } 116 | #[no_mangle] 117 | pub unsafe extern "C" fn _mi_verbose_message(mut fmt: *const i8, ...) { 118 | if !mi_option_is_enabled(mi_option_verbose) { return; } 119 | let mut args: va_list; 120 | __builtin_va_start(args, fmt); 121 | mi_vfprintf(stderr, "mimalloc: ", fmt, args); 122 | __builtin_va_end(args); 123 | } 124 | #[no_mangle] 125 | pub unsafe extern "C" fn _mi_error_message(mut fmt: *const i8, ...) { 126 | if !mi_option_is_enabled(mi_option_show_errors) && 127 | !mi_option_is_enabled(mi_option_verbose) { 128 | return; 129 | } 130 | let mut args: va_list; 131 | __builtin_va_start(args, fmt); 132 | mi_vfprintf(stderr, "mimalloc: error: ", fmt, args); 133 | __builtin_va_end(args); 134 | if (false) != 0 { 135 | 0 136 | } else { 137 | _mi_assert_fail("false", "src/options.c", 121, "_mi_error_message") 138 | }; 139 | } 140 | #[no_mangle] 141 | pub unsafe extern "C" fn _mi_warning_message(mut fmt: *const i8, ...) { 142 | if !mi_option_is_enabled(mi_option_show_errors) && 143 | !mi_option_is_enabled(mi_option_verbose) { 144 | return; 145 | } 146 | let mut args: va_list; 147 | __builtin_va_start(args, fmt); 148 | mi_vfprintf(stderr, "mimalloc: warning: ", fmt, args); 149 | __builtin_va_end(args); 150 | } 151 | #[no_mangle] 152 | pub unsafe extern "C" fn _mi_assert_fail(mut assertion: *const i8, 153 | mut fname: *const i8, 154 | mut line: c_uint, 155 | mut func: *const i8) { 156 | _mi_fprintf(stderr, 157 | "mimalloc: assertion failed: at \"%s\":%u, %s\n assertion: \"%s\"\n", 158 | fname, line, if func.is_null() { "" } else { func }, 159 | assertion); 160 | abort(); 161 | } 162 | // -------------------------------------------------------- 163 | // Initialize options by checking the environment 164 | // -------------------------------------------------------- 165 | unsafe fn mi_strlcpy(mut dest: *mut i8, mut src: *const i8, 166 | mut dest_size: usize) { 167 | dest[0] = 0i8; 168 | strncpy(dest, src, dest_size - 1); 169 | dest[dest_size - 1] = 0i8; 170 | } 171 | unsafe fn mi_strlcat(mut dest: *mut i8, mut src: *const i8, 172 | mut dest_size: usize) { 173 | strncat(dest, src, dest_size - 1); 174 | dest[dest_size - 1] = 0i8; 175 | } 176 | unsafe fn mi_option_init(mut desc: &mut mi_option_desc_t) { 177 | desc.init = DEFAULTED; 178 | // Read option value from the environment 179 | let mut buf: [i8; 32]; 180 | mi_strlcpy(buf, "mimalloc_", std::mem::size_of_val(&buf)); 181 | mi_strlcat(buf, desc.name, std::mem::size_of_val(&buf)); 182 | let mut s = getenv(buf); 183 | if s.is_null() { 184 | for mut i in 0..strlen(buf) { buf[i] = toupper(buf[i]); } 185 | s = getenv(buf); 186 | } 187 | if !s.is_null() { 188 | mi_strlcpy(buf, s, std::mem::size_of_val(&buf)); 189 | for mut i in 0..strlen(buf) { buf[i] = toupper(buf[i]); } 190 | if buf[0] == 0 || !strstr("1;TRUE;YES;ON", buf).is_null() { 191 | desc.value = 1; 192 | desc.init = INITIALIZED; 193 | } else if !strstr("0;FALSE;NO;OFF", buf).is_null() { 194 | desc.value = 0; 195 | desc.init = INITIALIZED; 196 | } else { 197 | let mut end = buf; 198 | let mut value = strtol(buf, &mut end, 10); 199 | if *end == 0 { 200 | desc.value = value; 201 | desc.init = INITIALIZED; 202 | } else { 203 | _mi_warning_message("environment option mimalloc_%s has an invalid value: %s\n", 204 | desc.name, buf); 205 | }; 206 | }; 207 | }; 208 | } 209 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/os.rs: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | Copyright (c) 2018, Microsoft Research, Daan Leijen 3 | This is free software; you can redistribute it and/or modify it under the 4 | terms of the MIT license. A copy of the license can be found in the file 5 | "LICENSE" at the root of this distribution. 6 | -----------------------------------------------------------------------------*/ 7 | // ensure mmap flags are defined 8 | // memset 9 | // debug fprintf 10 | /* ----------------------------------------------------------- 11 | Raw allocation on Windows (VirtualAlloc) and Unix's (mmap). 12 | Defines a portable `mmap`, `munmap` and `mmap_trim`. 13 | ----------------------------------------------------------- */ 14 | // mmap 15 | // sysconf 16 | // Comment out functions ported to Rust 17 | // uintptr_t _mi_align_up(uintptr_t sz, size_t alignment) { 18 | // uintptr_t x = (sz / alignment) * alignment; 19 | // if (x < sz) x += alignment; 20 | // if (x < sz) return 0; // overflow 21 | // return x; 22 | // } 23 | unsafe fn mi_align_up_ptr(mut p: *mut c_void, mut alignment: usize) 24 | -> *mut c_void { 25 | return _mi_align_up_rs(p as usize, alignment) as *mut c_void; 26 | } 27 | unsafe fn _mi_align_down(mut sz: usize, mut alignment: usize) -> usize { 28 | return (sz / alignment) * alignment; 29 | } 30 | unsafe fn mi_align_down_ptr(mut p: *mut c_void, mut alignment: usize) 31 | -> *mut c_void { 32 | return _mi_align_down(p as usize, alignment) as *mut c_void; 33 | } 34 | // cached OS page size 35 | pub static _SC_PAGESIZE: c_int = _SC_PAGESIZE; 36 | #[no_mangle] 37 | pub unsafe extern "C" fn _mi_os_page_size() -> usize { 38 | let mut page_size = 0; // BSD 39 | if page_size == 0 { 40 | let mut result = sysconf(_SC_PAGESIZE); // Linux 41 | page_size = if result > 0 { result as usize } else { 4096 }; 42 | } 43 | return page_size; 44 | } 45 | pub static errno: c_int = (*__errno_location()); 46 | unsafe fn mi_munmap(mut addr: *mut c_void, mut size: usize) { 47 | if addr.is_null() || size == 0 { return; } 48 | let mut err = false; 49 | err = (munmap(addr, size) == -1); 50 | if err != 0 { 51 | _mi_warning_message("munmap failed: %s, addr 0x%8li, size %lu\n", 52 | strerror(errno), addr as usize, size); 53 | }; 54 | } 55 | pub static MAP_PRIVATE: c_int = 2; 56 | pub static MAP_ANONYMOUS: c_int = 32; 57 | pub static MAP_FIXED: c_int = 16; 58 | pub static PROT_READ: c_int = 1; 59 | pub static PROT_WRITE: c_int = 2; 60 | pub static MAP_FAILED: *mut c_void = -1 as *mut c_void; 61 | unsafe fn mi_mmap(mut addr: *mut c_void, mut size: usize, 62 | mut extra_flags: c_int, mut stats: &mut mi_stats_t) 63 | -> *mut c_void { 64 | (stats); 65 | if size == 0 { return ptr::null_mut(); } 66 | let mut flags = MAP_PRIVATE | MAP_ANONYMOUS | extra_flags; 67 | if !addr.is_null() { flags |= MAP_FIXED; } 68 | let mut p = mmap(addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0); 69 | if p == MAP_FAILED { p = ptr::null_mut(); } 70 | if !addr.is_null() && p != addr { 71 | mi_munmap(p, size); 72 | p = ptr::null_mut(); 73 | } 74 | if p.is_null() || (addr.is_null() && p != addr) || 75 | (!addr.is_null() && p == addr) { 76 | 0 77 | } else { 78 | _mi_assert_fail("p == NULL || (addr == NULL && p != addr) || (addr != NULL && p == addr)", 79 | "src/os.c", 113, "mi_mmap") 80 | } 81 | if !p.is_null() { _mi_stat_increase(&mut (stats.mmap_calls), 1); } 82 | return p; 83 | } 84 | unsafe fn mi_os_page_align_region(mut addr: *mut c_void, mut size: usize, 85 | mut newsize: *mut usize) -> *mut c_void { 86 | if !addr.is_null() && size > 0 { 87 | 0 88 | } else { 89 | _mi_assert_fail("addr != NULL && size > 0", "src/os.c", 120, 90 | "mi_os_page_align_region") 91 | } 92 | if !newsize.is_null() { *newsize = 0; } 93 | if size == 0 || addr.is_null() { return ptr::null_mut(); } 94 | // page align conservatively within the range 95 | let mut start = mi_align_up_ptr(addr, _mi_os_page_size()); 96 | let mut end = 97 | mi_align_down_ptr((addr as *mut u8).offset(size), _mi_os_page_size()); 98 | let mut diff = (end as *mut u8).offset(-(start as *mut u8)); 99 | if diff <= 0 { return ptr::null_mut(); } 100 | if !newsize.is_null() { *newsize = diff as usize; } 101 | return start; 102 | } 103 | // Signal to the OS that the address range is no longer in use 104 | // but may be used later again. This will release physical memory 105 | // pages and reduce swapping while keeping the memory committed. 106 | // We page align to a conservative area inside the range to reset. 107 | // page align conservatively within the range 108 | pub static MADV_FREE: c_int = 8; 109 | pub static EINVAL: c_int = 22; 110 | // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on 111 | pub static MADV_DONTNEED: c_int = 4; 112 | #[no_mangle] 113 | pub unsafe extern "C" fn _mi_os_reset(mut addr: *mut c_void, mut size: usize) 114 | -> bool { 115 | let mut csize: usize; 116 | let mut start = mi_os_page_align_region(addr, size, &mut csize); 117 | if csize == 0 { return true; } 118 | let mut advice = MADV_FREE; 119 | let mut err = madvise(start, csize, advice); 120 | if err != 0 && errno == EINVAL && advice == MADV_FREE { 121 | advice = MADV_DONTNEED; 122 | err = madvise(start, csize, advice); 123 | } 124 | if err != 0 { 125 | _mi_warning_message("madvise reset error: start: 0x%8p, csize: 0x%8zux, errno: %i\n", 126 | start, csize, errno); 127 | } 128 | //mi_assert(err == 0); 129 | return (err == 0); 130 | } 131 | // Protect a region in memory to be not accessible. 132 | // page align conservatively within the range 133 | pub static PROT_NONE: c_int = 0; 134 | unsafe fn mi_os_protectx(mut addr: *mut c_void, mut size: usize, 135 | mut protect: bool) -> bool { 136 | let mut csize = 0; 137 | let mut start = mi_os_page_align_region(addr, size, &mut csize); 138 | if csize == 0 { return false; } 139 | let mut err = 0; 140 | err = 141 | mprotect(start, csize, 142 | if protect != 0 != 0 { 143 | PROT_NONE 144 | } else { PROT_READ | PROT_WRITE }); 145 | if err != 0 { 146 | _mi_warning_message("mprotect error: start: 0x%8p, csize: 0x%8zux, errno: %i\n", 147 | start, csize, errno); 148 | } 149 | return (err == 0); 150 | } 151 | #[no_mangle] 152 | pub unsafe extern "C" fn _mi_os_protect(mut addr: *mut c_void, 153 | mut size: usize) -> bool { 154 | return mi_os_protectx(addr, size, true); 155 | } 156 | #[no_mangle] 157 | pub unsafe extern "C" fn _mi_os_unprotect(mut addr: *mut c_void, 158 | mut size: usize) -> bool { 159 | return mi_os_protectx(addr, size, false); 160 | } 161 | /* ----------------------------------------------------------- 162 | OS allocation using mmap/munmap 163 | ----------------------------------------------------------- */ 164 | #[no_mangle] 165 | pub unsafe extern "C" fn _mi_os_alloc(mut size: usize, 166 | mut stats: &mut mi_stats_t) 167 | -> *mut c_void { 168 | if size == 0 { return ptr::null_mut(); } 169 | let mut p = mi_mmap(ptr::null_mut(), size, 0, stats); 170 | if !p.is_null() { 171 | 0 172 | } else { _mi_assert_fail("p!=NULL", "src/os.c", 205, "_mi_os_alloc") } 173 | if !p.is_null() { _mi_stat_increase(&mut (stats.reserved), size); } 174 | return p; 175 | } 176 | #[no_mangle] 177 | pub unsafe extern "C" fn _mi_os_free(mut p: *mut c_void, mut size: usize, 178 | mut stats: &mut mi_stats_t) { 179 | (stats); 180 | mi_munmap(p, size); 181 | _mi_stat_decrease(&mut (stats.reserved), size); 182 | } 183 | // Slow but guaranteed way to allocated aligned memory 184 | // by over-allocating and then reallocating at a fixed aligned 185 | // address that should be available then. 186 | unsafe fn mi_os_alloc_aligned_ensured(mut size: usize, mut alignment: usize, 187 | mut trie: usize, 188 | mut stats: *mut mi_stats_t) 189 | -> *mut c_void { 190 | if trie >= 3 { 191 | return ptr::null_mut(); // stop recursion (only on Windows) 192 | } // overflow? 193 | let mut alloc_size = size + alignment; 194 | if alloc_size >= size { 195 | 0 196 | } else { 197 | _mi_assert_fail("alloc_size >= size", "src/os.c", 223, 198 | "mi_os_alloc_aligned_ensured") 199 | } 200 | if alloc_size < size { return ptr::null_mut(); } 201 | // allocate a chunk that includes the alignment 202 | let mut p = mi_mmap(ptr::null_mut(), alloc_size, 0, stats); 203 | if p.is_null() { return ptr::null_mut(); } 204 | // create an aligned pointer in the allocated area 205 | let mut aligned_p = mi_align_up_ptr(p, alignment); 206 | if !aligned_p.is_null() { 207 | 0 208 | } else { 209 | _mi_assert_fail("aligned_p != NULL", "src/os.c", 231, 210 | "mi_os_alloc_aligned_ensured") 211 | } 212 | // free it and try to allocate `size` at exactly `aligned_p` 213 | // note: this may fail in case another thread happens to VirtualAlloc 214 | // concurrently at that spot. We try up to 3 times to mitigate this. 215 | // we selectively unmap parts around the over-allocated area. 216 | let mut pre_size = (aligned_p as *mut u8).offset(-(p as *mut u8)); 217 | let mut mid_size = _mi_align_up_rs(size, _mi_os_page_size()); 218 | let mut post_size = alloc_size - pre_size - mid_size; 219 | if pre_size > 0 { mi_munmap(p, pre_size); } 220 | if post_size > 0 { 221 | mi_munmap((aligned_p as *mut u8).offset(mid_size), post_size); 222 | } 223 | if (aligned_p as usize) % alignment == 0 { 224 | 0 225 | } else { 226 | _mi_assert_fail("((uintptr_t)aligned_p) % alignment == 0", "src/os.c", 227 | 251, "mi_os_alloc_aligned_ensured") 228 | } 229 | return aligned_p; 230 | } 231 | // Allocate an aligned block. 232 | // Since `mi_mmap` is relatively slow we try to allocate directly at first and 233 | // hope to get an aligned address; only when that fails we fall back 234 | // to a guaranteed method by overallocating at first and adjusting. 235 | // TODO: use VirtualAlloc2 with alignment on Windows 10 / Windows Server 2016. 236 | // on BSD, use the aligned mmap api 237 | // alignment is a power of 2 and >= 4096 238 | // use the NetBSD/freeBSD aligned flags 239 | // if the next probable address is aligned, 240 | // then try to just allocate `size` and hope it is aligned... 241 | //fprintf(stderr, "segment address guess: %s, p=%lxu, guess:%lxu\n", (p != NULL && (uintptr_t)p % alignment ==0 ? "correct" : "incorrect"), (uintptr_t)p, next_probable); 242 | // if `p` is not yet aligned after all, free the block and use a slower 243 | // but guaranteed way to allocate an aligned block 244 | //fprintf(stderr, "mimalloc: slow mmap 0x%lx\n", _mi_thread_id()); 245 | // next probable address is the page-aligned address just after the newly allocated area. 246 | // Windows allocates 64kb aligned 247 | // page size on other OS's 248 | pub static MI_SEGMENT_SIZE: usize = (1 << (6 + (13 + 3))); 249 | #[no_mangle] 250 | pub unsafe extern "C" fn _mi_os_alloc_aligned(mut size: usize, 251 | mut alignment: usize, 252 | mut tld: &mut mi_os_tld_t) 253 | -> *mut c_void { 254 | if size == 0 { 255 | return ptr::null_mut(); // Linux tends to allocate downward 256 | } 257 | if alignment < 1024 { return _mi_os_alloc(size, tld.stats); } 258 | let mut p = os_pool_alloc(size, alignment, tld); 259 | if !p.is_null() { return p; } 260 | let mut suggest = ptr::null_mut(); 261 | if p.is_null() && (tld.mmap_next_probable % alignment) == 0 { 262 | p = mi_mmap(suggest, size, 0, tld.stats); 263 | if p.is_null() { return ptr::null_mut(); } 264 | if ((p as usize) % alignment) == 0 { 265 | _mi_stat_increase(&mut (tld.stats.mmap_right_align), 1); 266 | }; 267 | } 268 | if p.is_null() || ((p as usize) % alignment) != 0 { 269 | if !p.is_null() { mi_munmap(p, size); } 270 | _mi_stat_increase(&mut (tld.stats.mmap_ensure_aligned), 1); 271 | p = mi_os_alloc_aligned_ensured(size, alignment, 0, tld.stats); 272 | } 273 | if !p.is_null() { 274 | _mi_stat_increase(&mut (tld.stats.reserved), size); 275 | let alloc_align = _mi_os_page_size(); 276 | let mut probable_size = MI_SEGMENT_SIZE; 277 | if tld.mmap_previous > p { 278 | tld.mmap_next_probable = 279 | _mi_align_down((p as usize) - probable_size, alloc_align); 280 | // ((uintptr_t)previous - (uintptr_t)p); 281 | } else { // Otherwise, guess the next address is page aligned `size` from current pointer 282 | tld.mmap_next_probable = 283 | _mi_align_up_rs((p as usize) + probable_size, alloc_align); 284 | } 285 | tld.mmap_previous = p; 286 | } 287 | return p; 288 | } 289 | // Pooled allocation: on 64-bit systems with plenty 290 | // of virtual addresses, we allocate 10 segments at the 291 | // time to minimize `mmap` calls and increase aligned 292 | // allocations. This is only good on systems that 293 | // do overcommit so we put it behind the `MIMALLOC_POOL_COMMIT` option. 294 | // For now, we disable it on windows as VirtualFree must 295 | // be called on the original allocation and cannot be called 296 | // for individual fragments. 297 | pub static MI_POOL_ALIGNMENT: c_long = (1 << (6 + (13 + 3))); 298 | pub static MI_POOL_SIZE: c_long = (10 * (1 << (6 + (13 + 3)))); 299 | unsafe fn os_pool_alloc(mut size: usize, mut alignment: usize, 300 | mut tld: &mut mi_os_tld_t) -> *mut c_void { 301 | if !mi_option_is_enabled(mi_option_pool_commit) { 302 | return ptr::null_mut(); 303 | } 304 | if alignment != MI_POOL_ALIGNMENT { return ptr::null_mut(); } 305 | size = _mi_align_up_rs(size, MI_POOL_ALIGNMENT); 306 | if size > MI_POOL_SIZE { return ptr::null_mut(); } 307 | if tld.pool_available == 0 { 308 | tld.pool = 309 | mi_os_alloc_aligned_ensured(MI_POOL_SIZE, MI_POOL_ALIGNMENT, 0, 310 | tld.stats) as *mut u8; 311 | if tld.pool.is_null() { return ptr::null_mut(); } 312 | tld.pool_available += MI_POOL_SIZE; 313 | } 314 | if size > tld.pool_available { return ptr::null_mut(); } 315 | let mut p = tld.pool as *mut _; 316 | tld.pool_available -= size; 317 | tld.pool = tld.pool.offset(size); 318 | return p; 319 | } 320 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/page-queue.rs: -------------------------------------------------------------------------------- 1 | failed function body: 2 | • CompoundStmt("" src/page-queue.c:84:30) 3 | • IfStmt("" src/page-queue.c:85:3) 4 | • OpaqueValueExpr!("" builtin definitions) 5 | +Bool<"_Bool"> 6 | • ReturnStmt("" src/page-queue.c:85:15) 7 | • IntegerLiteral("" src/page-queue.c:85:22) 8 | +Int<"int"> 9 | 10 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/page.rs: -------------------------------------------------------------------------------- 1 | /*---------------------------------------------------------------------------- 2 | Copyright (c) 2018, Microsoft Research, Daan Leijen 3 | This is free software; you can redistribute it and/or modify it under the 4 | terms of the MIT license. A copy of the license can be found in the file 5 | "LICENSE" at the root of this distribution. 6 | -----------------------------------------------------------------------------*/ 7 | /* ----------------------------------------------------------- 8 | The core of the allocator. Every segment contains 9 | pages of a certain block size. The main function 10 | exported is `mi_malloc_generic`. 11 | ----------------------------------------------------------- */ 12 | // memset, memcpy 13 | /* ----------------------------------------------------------- 14 | Definition of page queues for each block size 15 | ----------------------------------------------------------- */ 16 | /* ----------------------------------------------------------- 17 | Page helpers 18 | ----------------------------------------------------------- */ 19 | // Index a block in a page 20 | unsafe fn mi_page_block_at(mut page: &mi_page_t, mut page_start: *mut c_void, 21 | mut i: usize) -> *mut mi_block_t { 22 | return (page_start as *mut u8).offset((i * page.block_size)) as 23 | *mut mi_block_t; 24 | } 25 | // Start of the page available memory 26 | //mi_assert_internal(start + page->capacity*page->block_size == page->top); 27 | #[no_mangle] 28 | pub unsafe extern "C" fn _mi_page_use_delayed_free(mut page: &mut mi_page_t, 29 | mut enable: bool) { 30 | let mut tfree: 31 | mi_thread_free_t; // delay until outstanding MI_DELAYED_FREEING are done. 32 | let mut tfreex: mi_thread_free_t; // and try again 33 | loop { 34 | tfreex = 35 | { 36 | tfree = 37 | page.thread_free; // avoid atomic operation if already equal 38 | tfree 39 | }; 40 | tfreex.delayed = 41 | if enable != 0 != 0 { 42 | MI_USE_DELAYED_FREE 43 | } else { MI_NO_DELAYED_FREE }; 44 | if __builtin_expect((tfree.delayed == MI_DELAYED_FREEING), 0) != 0 { 45 | mi_atomic_yield(); 46 | continue ; 47 | } 48 | if !(tfreex.delayed != tfree.delayed && 49 | !mi_atomic_compare_exchange(&mut page.thread_free as 50 | *mut volatile_uintptr_t, 51 | tfreex.value, tfree.value)) { 52 | break 53 | }; 54 | }; 55 | } 56 | /* ----------------------------------------------------------- 57 | Page collect the `local_free` and `thread_free` lists 58 | ----------------------------------------------------------- */ 59 | // Collect the local `thread_free` list using an atomic exchange. 60 | // Note: The exchange must be done atomically as this is used right after 61 | // moving to the full list in `mi_page_collect_ex` and we need to 62 | // ensure that there was no race where the page became unfull just before the move. 63 | pub static MI_TF_PTR_SHIFT: c_int = 2; 64 | unsafe fn mi_page_thread_free_collect(mut page: &mut mi_page_t) { 65 | let mut head: *mut mi_block_t; 66 | let mut tfree: mi_thread_free_t; 67 | let mut tfreex: mi_thread_free_t; 68 | loop { 69 | tfreex = { tfree = page.thread_free; tfree }; 70 | head = (tfree.head << MI_TF_PTR_SHIFT) as *mut mi_block_t; 71 | tfreex.head = 0; 72 | if !!mi_atomic_compare_exchange(&mut page.thread_free as 73 | *mut volatile_uintptr_t, 74 | tfreex.value, tfree.value) { 75 | break 76 | }; 77 | } 78 | // return if the list is empty 79 | if head.is_null() { return; } 80 | // find the tail 81 | let mut count = 1; 82 | let mut tail = head; 83 | let mut next: *mut mi_block_t; 84 | while !{ next = mi_block_next(page, tail); next }.is_null() { 85 | count += 1; 86 | tail = next; 87 | } 88 | // and prepend to the free list 89 | mi_block_set_next(page, tail, page.free); 90 | page.free = head; 91 | // update counts now 92 | mi_atomic_subtract(&mut page.thread_freed, count); 93 | page.used -= count; 94 | } 95 | #[no_mangle] 96 | pub unsafe extern "C" fn _mi_page_free_collect(mut page: &mut mi_page_t) { 97 | //if (page->free != NULL) return; // avoid expensive append 98 | // free the local free list 99 | if !page.local_free.is_null() { 100 | if __builtin_expect((page.free.is_null()), 1) != 0 { // usual caes 101 | page.free = page.local_free; 102 | } else { 103 | let mut tail = page.free; 104 | let mut next: *mut mi_block_t; 105 | while !{ next = mi_block_next(page, tail); next }.is_null() { 106 | tail = next; 107 | } 108 | mi_block_set_next(page, tail, page.local_free); 109 | } 110 | page.local_free = ptr::null_mut(); 111 | } 112 | // and the thread free list 113 | if page.thread_free.head != 0 { // quick test to avoid an atomic operation 114 | mi_page_thread_free_collect(page); 115 | }; 116 | } 117 | /* ----------------------------------------------------------- 118 | Page fresh and retire 119 | ----------------------------------------------------------- */ 120 | // called from segments when reclaiming abandoned pages 121 | #[no_mangle] 122 | pub unsafe extern "C" fn _mi_page_reclaim(mut heap: *mut mi_heap_t, 123 | mut page: &mut mi_page_t) { 124 | _mi_page_free_collect(page); 125 | let mut pq = mi_page_queue(heap, page.block_size); 126 | mi_page_queue_push(heap, pq, page); 127 | } 128 | // allocate a fresh page from a segment 129 | unsafe fn mi_page_fresh_alloc(mut heap: &mut mi_heap_t, 130 | mut pq: *mut mi_page_queue_t, 131 | mut block_size: usize) -> *mut mi_page_t { 132 | let mut page = 133 | _mi_segment_page_alloc(block_size, &mut heap.tld.segments, 134 | &mut heap.tld.os); 135 | if page.is_null() { return ptr::null_mut(); } 136 | mi_page_init(heap, page, block_size, &mut heap.tld.stats); 137 | _mi_stat_increase(&mut ((heap).tld.stats.pages), 1); 138 | mi_page_queue_push(heap, pq, page); 139 | return page; 140 | } 141 | // Get a fresh page to use 142 | unsafe fn mi_page_fresh(mut heap: &mut mi_heap_t, 143 | mut pq: &mut mi_page_queue_t) -> *mut mi_page_t { 144 | // try to reclaim an abandoned page first 145 | let mut page = 146 | pq.first; // we reclaimed, and we got lucky with a reclaimed page in our queue 147 | if heap.no_reclaim == 0 && 148 | _mi_segment_try_reclaim_abandoned(heap, false, 149 | &mut heap.tld.segments) != 0 && 150 | page != pq.first { 151 | page = pq.first; 152 | if !page.free.is_null() { return page; }; 153 | } 154 | // otherwise allocate the page 155 | page = mi_page_fresh_alloc(heap, pq, pq.block_size); 156 | if page.is_null() { return ptr::null_mut(); } 157 | return page; 158 | } 159 | /* ----------------------------------------------------------- 160 | Do any delayed frees 161 | (put there by other threads if they deallocated in a full page) 162 | ----------------------------------------------------------- */ 163 | #[no_mangle] 164 | pub unsafe extern "C" fn _mi_heap_delayed_free(mut heap: &mut mi_heap_t) { 165 | // take over the list 166 | let mut block: *mut mi_block_t; 167 | loop { 168 | block = heap.thread_delayed_free as *mut mi_block_t; 169 | if !(!block.is_null() && 170 | !mi_atomic_compare_exchange_ptr(&mut heap.thread_delayed_free 171 | as *mut *mut c_void, 172 | ptr::null(), 173 | block as *mut _)) { 174 | break 175 | }; 176 | } 177 | // and free them all 178 | while !block.is_null() { 179 | let mut next = 180 | mi_block_nextx(heap.cookie, 181 | block); // use internal free instead of regular one to keep stats etc correct 182 | _mi_free_delayed_block(block); 183 | block = next; 184 | }; 185 | } 186 | /* ----------------------------------------------------------- 187 | Unfull, abandon, free and retire 188 | ----------------------------------------------------------- */ 189 | // Move a page from the full list back to a regular list 190 | pub static MI_BIN_FULL: c_uint = 64 + 1; 191 | #[no_mangle] 192 | pub unsafe extern "C" fn _mi_page_unfull(mut page: &mut mi_page_t) { 193 | _mi_page_use_delayed_free(page, false); // to get the right queue 194 | if page.flags.in_full == 0 { return; } 195 | let mut heap = page.heap; 196 | let mut pqfull = &mut heap.pages[MI_BIN_FULL]; 197 | page.flags.in_full = false; 198 | let mut pq = mi_heap_page_queue_of(heap, page); 199 | page.flags.in_full = true; 200 | mi_page_queue_enqueue_from(pq, pqfull, page); 201 | } 202 | unsafe fn mi_page_to_full(mut page: &mut mi_page_t, 203 | mut pq: *mut mi_page_queue_t) { 204 | _mi_page_use_delayed_free(page, true); 205 | if page.flags.in_full != 0 { return; } 206 | mi_page_queue_enqueue_from(&mut page.heap.pages[MI_BIN_FULL], pq, page); 207 | mi_page_thread_free_collect(page); 208 | // try to collect right away in case another thread freed just before MI_USE_DELAYED_FREE was set 209 | } 210 | // Abandon a page with used blocks at the end of a thread. 211 | // Note: only call if it is ensured that no references exist from 212 | // the `page->heap->thread_delayed_free` into this page. 213 | // Currently only called through `mi_heap_collect_ex` which ensures this. 214 | #[no_mangle] 215 | pub unsafe extern "C" fn _mi_page_abandon(mut page: &mut mi_page_t, 216 | mut pq: *mut mi_page_queue_t) { 217 | // check there are no references left.. 218 | // and then remove from our page list 219 | let mut segments_tld = &mut page.heap.tld.segments; 220 | mi_page_queue_remove(pq, page); 221 | // and abandon it 222 | _mi_segment_page_abandon(page, segments_tld); 223 | } 224 | // Free a page with no more free blocks 225 | // account for huge pages here 226 | pub static MI_LARGE_SIZE_MAX: c_long = ((1 << (6 + (13 + 3))) / 8); 227 | #[no_mangle] 228 | pub unsafe extern "C" fn _mi_page_free(mut page: &mut mi_page_t, 229 | mut pq: *mut mi_page_queue_t, 230 | mut force: bool) { 231 | page.flags.has_aligned = false; 232 | if page.block_size > MI_LARGE_SIZE_MAX { 233 | _mi_stat_decrease(&mut ((page.heap).tld.stats.huge), page.block_size); 234 | } 235 | // remove from the page list 236 | // (no need to do _mi_heap_delayed_free first as all blocks are already free) 237 | let mut segments_tld = &mut page.heap.tld.segments; 238 | mi_page_queue_remove(pq, page); 239 | // and free it 240 | _mi_segment_page_free(page, force != 0, segments_tld); 241 | } 242 | // Retire a page with no more used blocks 243 | // Important to not retire too quickly though as new 244 | // allocations might coming. 245 | // Note: called from `mi_free` and benchmarks often 246 | // trigger this due to freeing everything and then 247 | // allocating again so careful when changing this. 248 | #[no_mangle] 249 | pub unsafe extern "C" fn _mi_page_retire(mut page: &mut mi_page_t) { 250 | page.flags.has_aligned = false; 251 | // don't retire too often.. 252 | // (or we end up retiring and re-allocating most of the time) 253 | // NOTE: refine this more: we should not retire if this 254 | // is the only page left with free blocks. It is not clear 255 | // how to check this efficiently though... for now we just check 256 | // if its neighbours are almost fully used. 257 | if __builtin_expect((page.block_size <= MI_LARGE_SIZE_MAX), 1) != 0 { 258 | if mi_page_mostly_used(page.prev) != 0 && 259 | mi_page_mostly_used(page.next) != 0 { 260 | return; // dont't retire after all 261 | }; 262 | } 263 | _mi_page_free(page, mi_page_queue_of(page), false); 264 | } 265 | /* ----------------------------------------------------------- 266 | Initialize the initial free list in a page. 267 | In secure mode we initialize a randomized list by 268 | alternating between slices. 269 | ----------------------------------------------------------- */ 270 | // at most 64 slices 271 | pub static MI_MIN_SLICES: c_long = 2; 272 | pub static MI_MAX_SLICE_SHIFT: usize = 273 | // initialize a sequential free list 274 | // initialize a randomized free list 275 | // set up `slice_count` slices to alternate between 276 | 6; 277 | // current start of the slice 278 | // available objects in the slice 279 | // final slice holds the modulus too (todo: distribute evenly?) 280 | // and initialize the free list by randomly threading through them 281 | // set up first element 282 | // and iterate through the rest 283 | // call random_shuffle only every INTPTR_SIZE rounds 284 | pub static MI_INTPTR_SIZE: c_long = (1 << 3); 285 | unsafe fn mi_page_free_list_extend(mut heap: &mut mi_heap_t, 286 | mut page: &mut mi_page_t, 287 | mut extend: usize, 288 | mut stats: &mut mi_stats_t) { 289 | (stats); // select a random next slice index 290 | let mut page_area = 291 | _mi_page_start(_mi_page_segment(page), page, 292 | ptr::null_mut()); // ensure it still has space 293 | let mut bsize = page.block_size; // and link the current block to it 294 | let mut start = 295 | mi_page_block_at(page, page_area, 296 | page.capacity); // bump to the following block 297 | if extend < MI_MIN_SLICES || !mi_option_is_enabled(mi_option_secure) { 298 | let mut end = 299 | mi_page_block_at(page, page_area, page.capacity + extend - 1); 300 | let mut block = start; 301 | for mut i in 0..extend { 302 | let mut next = 303 | (block as *mut u8).offset(bsize) as *mut mi_block_t; 304 | mi_block_set_next(page, block, next); 305 | block = next; 306 | } 307 | mi_block_set_next(page, end, ptr::null_mut()); 308 | page.free = start; 309 | } else { 310 | let mut shift = MI_MAX_SLICE_SHIFT; 311 | while (extend >> shift) == 0 { shift -= 1; } 312 | let mut slice_count = 1 << shift; 313 | let mut slice_extend = extend / slice_count; 314 | let mut blocks: [*mut mi_block_t; 64]; 315 | let mut counts: [usize; 64]; 316 | for mut i in 0..slice_count { 317 | blocks[i] = 318 | mi_page_block_at(page, page_area, 319 | page.capacity + i * slice_extend); 320 | counts[i] = slice_extend; 321 | } 322 | counts[slice_count - 1] += (extend % slice_count); 323 | let mut current = _mi_heap_random(heap) % slice_count; 324 | counts[current] -= 1; 325 | page.free = blocks[current]; 326 | let mut rnd = heap.random; 327 | for mut i in 1..extend { 328 | let mut round = i % MI_INTPTR_SIZE; 329 | if round == 0 { rnd = _mi_random_shuffle(rnd); } 330 | let mut next = ((rnd >> 8 * round) & (slice_count - 1)); 331 | while counts[next] == 0 { 332 | next += 1; 333 | if next == slice_count { next = 0; }; 334 | } 335 | counts[next] -= 1; 336 | let mut block = blocks[current]; 337 | blocks[current] = 338 | (block as *mut u8).offset(bsize) as *mut mi_block_t; 339 | mi_block_set_next(page, block, blocks[next]); 340 | // and set next; note: we may have `current == next` 341 | current = next; // end of the list 342 | } 343 | mi_block_set_next(page, blocks[current], ptr::null_mut()); 344 | heap.random = _mi_random_shuffle(rnd); 345 | } 346 | // enable the new free list 347 | page.capacity += extend as u16; 348 | _mi_stat_increase(&mut (stats.committed), extend * page.block_size); 349 | } 350 | /* ----------------------------------------------------------- 351 | Page initialize and extend the capacity 352 | ----------------------------------------------------------- */ 353 | pub static MI_MAX_EXTEND_SIZE: c_long = 354 | // heuristic, one OS page seems to work well. 355 | // extend at least by this many 356 | // Extend the capacity (up to reserved) by initializing a free list 357 | // We do at most `MI_MAX_EXTEND` to avoid touching too much memory 358 | // Note: we also experimented with "bump" allocation on the first 359 | // allocations but this did not speed up any benchmark (due to an 360 | // extra test in malloc? or cache effects?) 361 | // calculate the extend count 362 | (4 * 1024); 363 | pub static MI_MIN_EXTEND: c_long = 1; 364 | unsafe fn mi_page_extend_free(mut heap: *mut mi_heap_t, 365 | mut page: &mut mi_page_t, 366 | mut stats: &mut mi_stats_t) { 367 | (stats); // ensure we don't touch memory beyond the page to reduce page commit. 368 | if page.free.is_null() { 369 | 0 370 | } else { 371 | _mi_assert_fail("page->free == NULL", "src/page.c", 477, 372 | "mi_page_extend_free") 373 | } // the `lean` benchmark tests this. Going from 1 to 8 increases rss by 50%. 374 | if page.local_free.is_null() { 375 | 0 376 | } else { 377 | _mi_assert_fail("page->local_free == NULL", "src/page.c", 478, 378 | "mi_page_extend_free") 379 | } 380 | if !page.free.is_null() { return; } 381 | if page.capacity >= page.reserved { return; } 382 | let mut page_size: usize; 383 | _mi_page_start(_mi_page_segment(page), page, &mut page_size); 384 | if page.is_reset != 0 { 385 | page.is_reset = false; 386 | _mi_stat_decrease(&mut (stats.reset), page_size); 387 | } 388 | _mi_stat_increase(&mut (stats.pages_extended), 1); 389 | let mut extend = page.reserved - page.capacity; 390 | let mut max_extend = MI_MAX_EXTEND_SIZE / page.block_size; 391 | if max_extend < MI_MIN_EXTEND { max_extend = MI_MIN_EXTEND; } 392 | if extend > max_extend { 393 | extend = if max_extend == 0 { 1 } else { max_extend }; 394 | } 395 | // and append the extend the free list 396 | mi_page_free_list_extend(heap, page, extend, stats); 397 | } 398 | // Initialize a fresh page 399 | unsafe fn mi_page_init(mut heap: *mut mi_heap_t, mut page: *mut mi_page_t, 400 | mut block_size: usize, mut stats: *mut mi_stats_t) { 401 | if !page.is_null() { 402 | 0 403 | } else { 404 | _mi_assert_fail("page != NULL", "src/page.c", 514, "mi_page_init") 405 | } 406 | let mut segment = _mi_page_segment(page); 407 | if !segment.is_null() { 408 | 0 409 | } else { 410 | _mi_assert_fail("segment != NULL", "src/page.c", 516, "mi_page_init") 411 | } 412 | // set fields 413 | let mut page_size: usize; 414 | _mi_segment_page_start(segment, page, &mut page_size); 415 | page.block_size = block_size; 416 | page.reserved = (page_size / block_size) as u16; 417 | page.cookie = _mi_heap_random(heap) | 1; 418 | // initialize an initial free list 419 | mi_page_extend_free(heap, page, stats); 420 | if mi_page_immediate_available(page) != 0 { 421 | 0 422 | } else { 423 | _mi_assert_fail("mi_page_immediate_available(page)", "src/page.c", 424 | 539, "mi_page_init") 425 | }; 426 | } 427 | /* ----------------------------------------------------------- 428 | Find pages with free blocks 429 | -------------------------------------------------------------*/ 430 | // Find a page with free blocks of `page->block_size`. 431 | unsafe fn mi_page_queue_find_free_ex(mut heap: &mut mi_heap_t, 432 | mut pq: &mut mi_page_queue_t) 433 | -> *mut mi_page_t { 434 | // search through the pages in "next fit" order 435 | let mut rpage = ptr::null_mut(); // remember next 436 | let mut count = 0; // 0. collect freed blocks by us and other threads 437 | let mut page_free_count = 438 | 0; // 1. if the page contains free blocks, we are done 439 | let mut page = 440 | pq.first; // If all blocks are free, we might retire this page instead. 441 | while !page.is_null() { 442 | let mut next = 443 | page.next; // do this at most 8 times to bound allocation time. 444 | count += 445 | 1; // (note: this can happen if a page was earlier not retired due 446 | _mi_page_free_collect(page); // to having neighbours that were mostly full or due to concurrent frees) 447 | if mi_page_immediate_available(page) { 448 | if page_free_count < 8 && mi_page_all_free(page) != 0 { 449 | page_free_count += 1; // and keep looking 450 | if !rpage.is_null() { 451 | _mi_page_free(rpage, pq, false); // pick this one 452 | } // 2. Try to extend 453 | rpage = 454 | page; // 3. If the page is completely full, move it to the `mi_pages_full` 455 | page = 456 | next; // queue so we don't visit long-lived pages too often. 457 | continue ; // for each page 458 | } else { break ; }; 459 | } 460 | if page.capacity < page.reserved { 461 | mi_page_extend_free(heap, page, &mut heap.tld.stats); 462 | break ; 463 | } 464 | mi_page_to_full(page, pq); 465 | page = next; 466 | } 467 | _mi_stat_counter_increase(&mut (heap.tld.stats.searches), count); 468 | if page.is_null() { page = rpage; rpage = ptr::null_mut(); } 469 | if !rpage.is_null() { _mi_page_free(rpage, pq, false); } 470 | if page.is_null() { 471 | page = mi_page_fresh(heap, pq); 472 | } else { 473 | if pq.first == page { 474 | 0 475 | } else { 476 | _mi_assert_fail("pq->first == page", "src/page.c", 609, 477 | "mi_page_queue_find_free_ex") 478 | }; 479 | } 480 | return page; 481 | } 482 | // Find a page with free blocks of `size`. 483 | unsafe fn mi_find_free_page(mut heap: &mut mi_heap_t, mut size: usize) 484 | -> *mut mi_page_t { 485 | _mi_heap_delayed_free(heap); // in secure mode, we extend half the time to increase randomness 486 | let mut pq = mi_page_queue(heap, size); // fast path 487 | let mut page = pq.first; 488 | if !page.is_null() { 489 | if mi_option_get(mi_option_secure) >= 3 && 490 | page.capacity < page.reserved && 491 | ((_mi_heap_random(heap) & 1) == 1) { 492 | mi_page_extend_free(heap, page, &mut heap.tld.stats); 493 | } else { _mi_page_free_collect(page); } 494 | if mi_page_immediate_available(page) { return page; }; 495 | } 496 | return mi_page_queue_find_free_ex(heap, pq); 497 | } 498 | /* ----------------------------------------------------------- 499 | Users can register a deferred free function called 500 | when the `free` list is empty. Since the `local_free` 501 | is separate this is deterministically called after 502 | a certain number of allocations. 503 | ----------------------------------------------------------- */ 504 | pub static mut deferred_free: *mut mi_deferred_free_fun = ptr::null_mut(); 505 | #[no_mangle] 506 | pub unsafe extern "C" fn _mi_deferred_free(mut heap: &mut mi_heap_t, 507 | mut force: bool) { 508 | heap.tld.heartbeat += 1; 509 | if !deferred_free.is_null() { 510 | deferred_free(force != 0, heap.tld.heartbeat); 511 | }; 512 | } 513 | #[no_mangle] 514 | pub unsafe extern "C" fn mi_register_deferred_free(mut fn_: 515 | *mut mi_deferred_free_fun) { 516 | deferred_free = fn_; 517 | } 518 | /* ----------------------------------------------------------- 519 | General allocation 520 | ----------------------------------------------------------- */ 521 | // A huge page is allocated directly without being in a queue 522 | unsafe fn mi_huge_page_alloc(mut heap: &mut mi_heap_t, mut size: usize) 523 | -> *mut mi_page_t { 524 | let mut block_size = 525 | _mi_wsize_from_size(size) * std::mem::size_of::(); 526 | let mut pq = mi_page_queue(heap, block_size); 527 | let mut page = mi_page_fresh_alloc(heap, pq, block_size); 528 | if !page.is_null() { 529 | _mi_stat_increase(&mut ((heap).tld.stats.huge), block_size); 530 | } 531 | return page; 532 | } 533 | // Generic allocation routine if the fast path (`alloc.c:mi_page_malloc`) does not succeed. 534 | #[no_mangle] 535 | pub unsafe extern "C" fn _mi_malloc_generic(mut heap: *mut mi_heap_t, 536 | mut size: usize) -> *mut c_void { 537 | // initialize if necessary 538 | if __builtin_expect((!mi_heap_is_initialized(heap)), 0) != 0 { 539 | mi_thread_init(); // calls `_mi_heap_init` in turn 540 | heap = mi_get_default_heap(); 541 | } 542 | // call potential deferred free routines 543 | _mi_deferred_free(heap, false); 544 | // huge allocation? 545 | let mut page: 546 | *mut mi_page_t; // otherwise find a page with free blocks in our size segregated queues 547 | if __builtin_expect((size > MI_LARGE_SIZE_MAX), 0) != 0 { 548 | page = mi_huge_page_alloc(heap, size); // out of memory 549 | } else { page = mi_find_free_page(heap, size); } 550 | if page.is_null() { return ptr::null_mut(); } 551 | // and try again, this time succeeding! (i.e. this should never recurse) 552 | return _mi_page_malloc(heap, page, size); 553 | } 554 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/segment.rs: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | Copyright (c) 2018, Microsoft Research, Daan Leijen 3 | This is free software; you can redistribute it and/or modify it under the 4 | terms of the MIT license. A copy of the license can be found in the file 5 | "LICENSE" at the root of this distribution. 6 | -----------------------------------------------------------------------------*/ 7 | // memset 8 | /* ----------------------------------------------------------- 9 | Segment allocation 10 | We allocate pages inside big OS allocated "segments" 11 | (2mb on 64-bit). This is to avoid splitting VMA's on Linux 12 | and reduce fragmentation on other OS's. Each thread 13 | owns its own segments. 14 | 15 | Currently we have: 16 | - small pages (64kb), 32 in one segment 17 | - large pages (2mb), 1 in one segment 18 | - huge blocks > RC_LARGE_SIZE_MAX (256kb) are directly allocated by the OS 19 | 20 | It might be good to have "medium" pages too (of, say 256kb) 21 | to reduce pressure on the virtual address space on 32-bit systems 22 | but for now we choose the simpler implementation since this 23 | will only be a problem if multiple threads allocate many 24 | differently sized objects between 8kb and 2mb which is not common. 25 | 26 | In any case the memory for a segment is virtual and only 27 | committed on demand (i.e. we are careful to not touch the memory 28 | until we actually allocate a block there) 29 | 30 | If a thread ends, it "abandons" pages with used blocks 31 | and there is an abandoned segment list whose segments can 32 | be reclaimed by still running threads, much like work-stealing. 33 | ----------------------------------------------------------- */ 34 | // or 0 35 | /* ----------------------------------------------------------- 36 | Queue of segments containing free pages 37 | ----------------------------------------------------------- */ 38 | // quick test to see if a segment is in the free pages queue 39 | unsafe fn mi_segment_is_in_free_queue(mut segment: &mut mi_segment_t, 40 | mut tld: &mut mi_segments_tld_t) 41 | -> bool { 42 | let mut in_queue = 43 | (!segment.next.is_null() || !segment.prev.is_null() || 44 | tld.small_free.first == 45 | segment); // for now we only support small pages 46 | if in_queue != 0 { 47 | if segment.page_kind == MI_PAGE_SMALL { 48 | 0 49 | } else { 50 | _mi_assert_fail("segment->page_kind == MI_PAGE_SMALL", 51 | "src/segment.c", 82, 52 | "mi_segment_is_in_free_queue") 53 | }; 54 | } 55 | return in_queue != 0; 56 | } 57 | unsafe fn mi_segment_queue_is_empty(mut queue: &mi_segment_queue_t) -> bool { 58 | return (queue.first.is_null()); 59 | } 60 | unsafe fn mi_segment_queue_remove(mut queue: &mut mi_segment_queue_t, 61 | mut segment: &mut mi_segment_t) { 62 | if !segment.prev.is_null() { segment.prev.next = segment.next; } 63 | if !segment.next.is_null() { segment.next.prev = segment.prev; } 64 | if segment == queue.first { queue.first = segment.next; } 65 | if segment == queue.last { queue.last = segment.prev; } 66 | segment.next = ptr::null_mut(); 67 | segment.prev = ptr::null_mut(); 68 | } 69 | unsafe fn mi_segment_enqueue(mut queue: &mut mi_segment_queue_t, 70 | mut segment: &mut mi_segment_t) { 71 | segment.next = ptr::null_mut(); 72 | segment.prev = queue.last; 73 | if !queue.last.is_null() { 74 | queue.last.next = segment; 75 | queue.last = segment; 76 | } else { queue.last = { queue.first = segment; queue.first }; }; 77 | } 78 | // Start of the page available memory 79 | #[no_mangle] 80 | pub unsafe extern "C" fn _mi_segment_page_start(mut segment: 81 | *const mi_segment_t, 82 | mut page: &mi_page_t, 83 | mut page_size: *mut usize) 84 | -> *mut u8 { 85 | let mut psize = 86 | if segment.page_kind == MI_PAGE_HUGE { 87 | segment.segment_size 88 | } else { 89 | 1 << segment.page_shift 90 | }; // the first page starts after the segment info (and possible guard page) 91 | let mut p = 92 | (segment as 93 | *mut u8).offset(page.segment_idx * 94 | psize); // secure == 1: the last page has an os guard page at the end 95 | if page.segment_idx == 0 { 96 | p = 97 | p.offset(segment.segment_info_size); // secure > 1: every page has an os guard page 98 | psize -= segment.segment_info_size; 99 | } 100 | let mut secure = mi_option_get(mi_option_secure); 101 | if secure > 1 || (secure == 1 && page.segment_idx == segment.capacity - 1) 102 | { 103 | psize -= _mi_os_page_size(); 104 | } 105 | if !page_size.is_null() { *page_size = psize; } 106 | return p; 107 | } 108 | /* 109 | if (mi_option_is_enabled(mi_option_secure)) { 110 | // always reserve maximally so the protection falls on 111 | // the same address area, as we need to reuse them from the caches interchangably. 112 | capacity = MI_SMALL_PAGES_PER_SEGMENT; 113 | } 114 | */ 115 | /* padding */ 116 | // normally no guard pages 117 | pub static MI_MAX_ALIGN_SIZE: c_int = 16; 118 | // in secure mode, we set up a protected page in between the segment info 119 | // and the page data (and one at the end of the segment) 120 | pub static MI_SEGMENT_SIZE: c_long = (1 << (6 + (13 + 3))); 121 | unsafe fn mi_segment_size(mut capacity: usize, mut required: usize, 122 | mut pre_size: *mut usize, mut info_size: *mut usize) 123 | -> usize { 124 | let mut minsize = 125 | std::mem::size_of::() + 126 | ((capacity - 1) * std::mem::size_of::()) + 16; 127 | let mut guardsize = 0; 128 | let mut isize = 0; 129 | if !mi_option_is_enabled(mi_option_secure) { 130 | isize = 131 | _mi_align_up_rs(minsize, 132 | if 16 > MI_MAX_ALIGN_SIZE { 133 | 16 134 | } else { MI_MAX_ALIGN_SIZE }); 135 | } else { 136 | let mut page_size = _mi_os_page_size(); 137 | isize = _mi_align_up_rs(minsize, page_size); 138 | guardsize = page_size; 139 | required = _mi_align_up_rs(required, page_size); 140 | } 141 | if !info_size.is_null() { *info_size = isize; } 142 | if !pre_size.is_null() { *pre_size = isize + guardsize; } 143 | return if required == 0 { 144 | MI_SEGMENT_SIZE 145 | } else { required + isize + 2 * guardsize }; 146 | } 147 | /* ----------------------------------------------------------- 148 | Segment caches 149 | We keep a small segment cache per thread to avoid repeated allocation 150 | and free in the OS if a program allocates memory and then frees 151 | all again repeatedly. (We tried a one-element cache but that 152 | proves to be too small for certain workloads). 153 | ----------------------------------------------------------- */ 154 | unsafe fn mi_segments_count_add(mut inc: c_long, 155 | mut tld: &mut mi_segments_tld_t) { 156 | if inc >= 0 { 157 | _mi_stat_increase(&mut (tld.stats.segments), inc); 158 | } else { _mi_stat_decrease(&mut (tld.stats.segments), -inc); } 159 | tld.count += inc; 160 | if tld.count > tld.peak { tld.peak = tld.count; }; 161 | } 162 | unsafe fn mi_segments_peak(mut tld: &mut mi_segments_tld_t) -> usize { 163 | return tld.peak; 164 | } 165 | unsafe fn mi_segment_os_free(mut segment: *mut mi_segment_t, 166 | mut segment_size: usize, 167 | mut tld: &mut mi_segments_tld_t) { 168 | mi_segments_count_add(-1, tld); 169 | _mi_os_free(segment as *mut _, segment_size, tld.stats); 170 | } 171 | // The segment cache is limited to be at most 1/2 of the peak 172 | // number of segments in use (and no more than 32) 173 | unsafe fn mi_segment_cache_pop(mut tld: &mut mi_segments_tld_t) 174 | -> *mut mi_segment_t { 175 | let mut segment = tld.cache; 176 | if segment.is_null() { return ptr::null_mut(); } 177 | tld.cache_count -= 1; 178 | tld.cache = segment.next; 179 | segment.next = ptr::null_mut(); 180 | return segment; 181 | } 182 | pub static MI_SEGMENT_CACHE_MAX: c_long = 16; 183 | pub static MI_SEGMENT_CACHE_FRACTION: c_long = 6; 184 | unsafe fn mi_segment_cache_full(mut tld: &mut mi_segments_tld_t) -> bool { 185 | if tld.cache_count < MI_SEGMENT_CACHE_MAX && 186 | tld.cache_count * MI_SEGMENT_CACHE_FRACTION < mi_segments_peak(tld) 187 | { 188 | return false; 189 | } 190 | // take the opportunity to reduce the segment cache if it is too large (now) 191 | while tld.cache_count * MI_SEGMENT_CACHE_FRACTION >= 192 | mi_segments_peak(tld) + 1 { 193 | let mut segment = mi_segment_cache_pop(tld); 194 | if !segment.is_null() { 195 | mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld); 196 | }; 197 | } 198 | return true; 199 | } 200 | unsafe fn mi_segment_cache_push(mut segment: *mut mi_segment_t, 201 | mut tld: &mut mi_segments_tld_t) -> bool { 202 | if mi_segment_cache_full(tld) { return false; } 203 | if mi_option_is_enabled(mi_option_cache_reset) != 0 && 204 | !mi_option_is_enabled(mi_option_page_reset) { 205 | _mi_os_reset((segment as *mut u8).offset(segment.segment_info_size), 206 | segment.segment_size - segment.segment_info_size); 207 | } 208 | segment.next = tld.cache; 209 | tld.cache = segment; 210 | tld.cache_count += 1; 211 | return true; 212 | } 213 | // called by ending threads to free cached segments 214 | #[no_mangle] 215 | pub unsafe extern "C" fn _mi_segment_thread_collect(mut tld: 216 | *mut mi_segments_tld_t) { 217 | let mut segment: *mut mi_segment_t; 218 | while !{ segment = mi_segment_cache_pop(tld); segment }.is_null() { 219 | mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld); 220 | }; 221 | } 222 | /* ----------------------------------------------------------- 223 | Segment allocation 224 | ----------------------------------------------------------- */ 225 | // Allocate a segment from the OS aligned to `MI_SEGMENT_SIZE` . 226 | unsafe fn mi_segment_alloc(mut required: usize, mut page_kind: mi_page_kind_t, 227 | mut page_shift: usize, 228 | mut tld: &mut mi_segments_tld_t, 229 | mut os_tld: *mut mi_os_tld_t) 230 | -> *mut mi_segment_t { 231 | // calculate needed sizes first 232 | let mut capacity: usize; 233 | if page_kind == MI_PAGE_HUGE { 234 | capacity = 1; 235 | } else { 236 | let mut page_size = 1 << page_shift; 237 | capacity = MI_SEGMENT_SIZE / page_size; 238 | } 239 | let mut info_size: usize; 240 | let mut pre_size: usize; 241 | let mut segment_size = 242 | mi_segment_size(capacity, required, &mut pre_size, &mut info_size); 243 | let mut page_size = 244 | if page_kind == MI_PAGE_HUGE { 245 | segment_size 246 | } else { 1 << page_shift }; 247 | // Allocate the segment 248 | let mut segment = ptr::null_mut(); 249 | // try to get it from our caches 250 | if segment_size == MI_SEGMENT_SIZE { 251 | segment = mi_segment_cache_pop(tld); 252 | if !segment.is_null() && mi_option_is_enabled(mi_option_secure) != 0 253 | && segment.page_kind != page_kind { 254 | _mi_os_unprotect(segment as *mut _, segment.segment_size); 255 | }; 256 | } 257 | // and otherwise allocate it from the OS 258 | if segment.is_null() { 259 | segment = 260 | _mi_os_alloc_aligned(segment_size, MI_SEGMENT_SIZE, os_tld) as 261 | *mut mi_segment_t; // in secure mode, we set up a protected page in between the segment info 262 | if segment.is_null() { 263 | return ptr::null_mut(); // and the page data 264 | } // and protect the last page too 265 | mi_segments_count_add(1, tld); // protect every page 266 | } 267 | memset(segment as *mut _, 0, info_size); 268 | if mi_option_is_enabled(mi_option_secure) { 269 | _mi_os_protect((segment as *mut u8).offset(info_size), 270 | (pre_size - info_size)); 271 | let mut os_page_size = _mi_os_page_size(); 272 | if mi_option_get(mi_option_secure) <= 1 { 273 | _mi_os_protect((segment as 274 | *mut u8).offset(segment_size).offset(-os_page_size), 275 | os_page_size); 276 | } else { 277 | for mut i in 0..capacity { 278 | _mi_os_protect((segment as 279 | *mut u8).offset((i + 1) * 280 | page_size).offset(-os_page_size), 281 | os_page_size); 282 | }; 283 | }; 284 | } 285 | segment.page_kind = page_kind; 286 | segment.capacity = capacity; 287 | segment.page_shift = page_shift; 288 | segment.segment_size = segment_size; 289 | segment.segment_info_size = pre_size; 290 | segment.thread_id = _mi_thread_id(); 291 | segment.cookie = _mi_ptr_cookie(segment as *const _); 292 | for mut i in 0..segment.capacity { (segment.pages[i]).segment_idx = i; } 293 | _mi_stat_increase(&mut (tld.stats.committed), segment.segment_info_size); 294 | //fprintf(stderr,"mimalloc: alloc segment at %p\n", (void*)segment); 295 | return segment; 296 | } 297 | // Available memory in a page 298 | unsafe fn mi_page_size(mut page: *const mi_page_t) -> usize { 299 | let mut psize: usize; 300 | _mi_segment_page_start(_mi_page_segment(page), page, &mut psize); 301 | return psize; 302 | } 303 | unsafe fn mi_segment_free(mut segment: *mut mi_segment_t, mut force: bool, 304 | mut tld: &mut mi_segments_tld_t) { 305 | //fprintf(stderr,"mimalloc: free segment at %p\n", (void*)segment); 306 | if !segment.is_null() { 307 | 0 308 | } else { 309 | _mi_assert_fail("segment != NULL", "src/segment.c", 343, 310 | "mi_segment_free") 311 | } 312 | if mi_segment_is_in_free_queue(segment, tld) { 313 | if segment.page_kind != MI_PAGE_SMALL { 314 | fprintf(stderr, 315 | "mimalloc: expecting small segment: %i, %p, %p, %p\n", 316 | segment.page_kind, segment.prev, segment.next, 317 | tld.small_free.first); 318 | fflush(stderr); 319 | } else { 320 | // for now we only support small pages 321 | mi_segment_queue_remove(&mut tld.small_free, segment); 322 | }; 323 | } 324 | if segment.next.is_null() { 325 | 0 326 | } else { 327 | _mi_assert_fail("segment->next == NULL", "src/segment.c", 356, 328 | "mi_segment_free") 329 | } 330 | if segment.prev.is_null() { 331 | 0 332 | } else { 333 | _mi_assert_fail("segment->prev == NULL", "src/segment.c", 357, 334 | "mi_segment_free") 335 | } 336 | _mi_stat_decrease(&mut (tld.stats.committed), segment.segment_info_size); 337 | segment.thread_id = 0; 338 | // update reset memory statistics 339 | for mut i in 0..segment.capacity { 340 | let mut page = &mut segment.pages[i]; // it is put in our cache 341 | if page.is_reset != 0 { 342 | page.is_reset = false; // otherwise return it to the OS 343 | _mi_stat_decrease(&mut (tld.stats.reset), mi_page_size(page)); 344 | }; 345 | } 346 | if segment.page_kind == MI_PAGE_HUGE { 347 | mi_segment_os_free(segment, segment.segment_size, tld); 348 | } else if force == 0 && mi_segment_cache_push(segment, tld) != 0 { 349 | } else { mi_segment_os_free(segment, MI_SEGMENT_SIZE, tld); }; 350 | } 351 | /* ----------------------------------------------------------- 352 | Free page management inside a segment 353 | ----------------------------------------------------------- */ 354 | unsafe fn mi_segment_has_free(mut segment: &mi_segment_t) -> bool { 355 | return (segment.used < segment.capacity); 356 | } 357 | unsafe fn mi_segment_find_free(mut segment: &mut mi_segment_t) 358 | -> *mut mi_page_t { 359 | for mut i in 0..segment.capacity { 360 | let mut page = &mut segment.pages[i]; 361 | if page.segment_in_use == 0 { return page; }; 362 | } 363 | if (false) != 0 { 364 | 0 365 | } else { 366 | _mi_assert_fail("false", "src/segment.c", 403, "mi_segment_find_free") 367 | } 368 | return ptr::null_mut(); 369 | } 370 | /* ----------------------------------------------------------- 371 | Free 372 | ----------------------------------------------------------- */ 373 | unsafe fn mi_segment_page_clear(mut segment: &mut mi_segment_t, 374 | mut page: &mut mi_page_t, 375 | mut stats: &mut mi_stats_t) { 376 | (stats); 377 | let mut inuse = page.capacity * page.block_size; 378 | _mi_stat_decrease(&mut (stats.committed), inuse); 379 | _mi_stat_decrease(&mut (stats.pages), 1); 380 | // reset the page memory to reduce memory pressure? 381 | if page.is_reset == 0 && mi_option_is_enabled(mi_option_page_reset) != 0 { 382 | let mut psize: usize; // for stats we assume resetting the full page 383 | let mut start = _mi_segment_page_start(segment, page, &mut psize); 384 | _mi_stat_increase(&mut (stats.reset), psize); 385 | page.is_reset = true; 386 | if inuse > 0 { _mi_os_reset(start as *mut _, inuse); }; 387 | } 388 | // zero the page data 389 | let mut idx = page.segment_idx; // don't clear the index 390 | let mut is_reset = page.is_reset != 0; // don't clear the reset flag 391 | memset(page as *mut _, 0, std::mem::size_of::()); 392 | page.segment_idx = idx; 393 | page.segment_in_use = false; 394 | page.is_reset = is_reset != 0; 395 | segment.used -= 1; 396 | } 397 | #[no_mangle] 398 | pub unsafe extern "C" fn _mi_segment_page_free(mut page: *mut mi_page_t, 399 | mut force: bool, 400 | mut tld: 401 | &mut mi_segments_tld_t) { 402 | if !page.is_null() { 403 | 0 404 | } else { 405 | _mi_assert_fail("page != NULL", "src/segment.c", 445, 406 | "_mi_segment_page_free") 407 | } 408 | let mut segment = _mi_page_segment(page); 409 | // mark it as free now 410 | mi_segment_page_clear(segment, page, 411 | tld.stats); // no more used pages; remove from the free list and free the segment 412 | if segment.used == 0 { 413 | mi_segment_free(segment, force != 0, 414 | tld); // only abandoned pages; remove from free list and abandon 415 | } else { 416 | if segment.used == segment.abandoned { 417 | mi_segment_abandon(segment, tld); 418 | } else if segment.used + 1 == segment.capacity { 419 | // for now we only support small pages 420 | // move back to segments small pages free list 421 | mi_segment_enqueue(&mut tld.small_free, segment); 422 | }; 423 | }; 424 | } 425 | /* ----------------------------------------------------------- 426 | Abandonment 427 | ----------------------------------------------------------- */ 428 | // When threads terminate, they can leave segments with 429 | // live blocks (reached through other threads). Such segments 430 | // are "abandoned" and will be reclaimed by other threads to 431 | // reuse their pages and/or free them eventually 432 | pub static mut abandoned: *mut volatile_mi_segment_t = ptr::null_mut(); 433 | pub static mut abandoned_count: volatile_uintptr_t = 0; 434 | unsafe fn mi_segment_abandon(mut segment: &mut mi_segment_t, 435 | mut tld: &mut mi_segments_tld_t) { 436 | // remove the segment from the free page queue if needed 437 | if mi_segment_is_in_free_queue(segment, tld) { 438 | if segment.page_kind == MI_PAGE_SMALL { 439 | 0 440 | } else { 441 | _mi_assert_fail("segment->page_kind == MI_PAGE_SMALL", 442 | "src/segment.c", 488, "mi_segment_abandon") 443 | } // for now we only support small pages 444 | mi_segment_queue_remove(&mut tld.small_free, segment); 445 | } 446 | // all pages in the segment are abandoned; add it to the abandoned list 447 | segment.thread_id = 448 | 0; // all pages are abandoned, abandon the entire segment 449 | loop { 450 | segment.abandoned_next = 451 | abandoned as *mut mi_segment_t; // close enough 452 | if !!mi_atomic_compare_exchange_ptr(&mut abandoned as 453 | *mut *mut c_void, 454 | segment as *mut _, 455 | segment.abandoned_next as *mut _) 456 | { 457 | break 458 | }; // at most 1/8th of all outstanding (estimated) 459 | } // but at least 8 460 | mi_atomic_increment(&mut abandoned_count); 461 | _mi_stat_increase(&mut (tld.stats.segments_abandoned), 1); 462 | } 463 | #[no_mangle] 464 | pub unsafe extern "C" fn _mi_segment_page_abandon(mut page: *mut mi_page_t, 465 | mut tld: 466 | &mut mi_segments_tld_t) { 467 | if !page.is_null() { 468 | 0 469 | } else { 470 | _mi_assert_fail("page != NULL", "src/segment.c", 503, 471 | "_mi_segment_page_abandon") 472 | } 473 | let mut segment = _mi_page_segment(page); 474 | segment.abandoned += 1; 475 | _mi_stat_increase(&mut (tld.stats.pages_abandoned), 1); 476 | if segment.used == segment.abandoned { 477 | mi_segment_abandon(segment, tld); 478 | }; 479 | } 480 | #[no_mangle] 481 | pub unsafe extern "C" fn _mi_segment_try_reclaim_abandoned(mut heap: 482 | *mut mi_heap_t, 483 | mut try_all: bool, 484 | mut tld: 485 | &mut mi_segments_tld_t) 486 | -> bool { 487 | let mut reclaimed = 0; 488 | let mut atmost: usize; 489 | if try_all != 0 { 490 | atmost = abandoned_count + 16; 491 | } else { atmost = abandoned_count / 8; if atmost < 8 { atmost = 8; }; } 492 | // for `atmost` `reclaimed` abandoned segments... 493 | while atmost > reclaimed 494 | { // try to claim the head of the abandoned segments 495 | let mut segment: 496 | *mut mi_segment_t; // stop early if no more segments available 497 | loop { 498 | segment = abandoned as *mut mi_segment_t; // got it. 499 | if !(!segment.is_null() && 500 | !mi_atomic_compare_exchange_ptr(&mut abandoned as 501 | *mut *mut c_void, 502 | segment.abandoned_next as 503 | *mut _, 504 | segment as *mut _)) { 505 | break 506 | }; // add its free pages to the the current thread 507 | } // add its abandoned pages to the current thread 508 | if segment.is_null() { 509 | break ; // if everything free by now, free the page 510 | } // otherwise reclaim it 511 | mi_atomic_decrement(&mut abandoned_count); // due to page_clear 512 | segment.thread_id = _mi_thread_id(); 513 | segment.abandoned_next = ptr::null_mut(); 514 | mi_segments_count_add(1, tld); 515 | _mi_stat_decrease(&mut (tld.stats.segments_abandoned), 1); 516 | if segment.page_kind == MI_PAGE_SMALL && 517 | mi_segment_has_free(segment) != 0 { 518 | mi_segment_enqueue(&mut tld.small_free, segment); 519 | } 520 | if segment.abandoned == segment.used { 521 | 0 522 | } else { 523 | _mi_assert_fail("segment->abandoned == segment->used", 524 | "src/segment.c", 548, 525 | "_mi_segment_try_reclaim_abandoned") 526 | } 527 | for mut i in 0..segment.capacity { 528 | let mut page = &mut segment.pages[i]; 529 | if page.segment_in_use != 0 { 530 | segment.abandoned -= 1; 531 | if page.next.is_null() { 532 | 0 533 | } else { 534 | _mi_assert_fail("page->next == NULL", "src/segment.c", 535 | 553, "_mi_segment_try_reclaim_abandoned") 536 | } 537 | _mi_stat_decrease(&mut (tld.stats.pages_abandoned), 1); 538 | if mi_page_all_free(page) { 539 | mi_segment_page_clear(segment, page, tld.stats); 540 | } else { _mi_page_reclaim(heap, page); }; 541 | }; 542 | } 543 | if segment.abandoned == 0 { 544 | 0 545 | } else { 546 | _mi_assert_fail("segment->abandoned == 0", "src/segment.c", 565, 547 | "_mi_segment_try_reclaim_abandoned") 548 | } 549 | if segment.used == 0 { 550 | mi_segment_free(segment, false, tld); 551 | } else { reclaimed += 1; }; 552 | } 553 | return (reclaimed > 0); 554 | } 555 | /* ----------------------------------------------------------- 556 | Small page allocation 557 | ----------------------------------------------------------- */ 558 | // Allocate a small page inside a segment. 559 | // Requires that the page has free pages 560 | unsafe fn mi_segment_small_page_alloc_in(mut segment: &mut mi_segment_t, 561 | mut tld: &mut mi_segments_tld_t) 562 | -> *mut mi_page_t { 563 | let mut page = 564 | mi_segment_find_free(segment); // if no more free pages, remove from the queue 565 | page.segment_in_use = true; 566 | segment.used += 1; 567 | if segment.used == segment.capacity { 568 | mi_segment_queue_remove(&mut tld.small_free, segment); 569 | } 570 | return page; 571 | } 572 | pub static MI_SMALL_PAGE_SHIFT: usize = (13 + 3); 573 | unsafe fn mi_segment_small_page_alloc(mut tld: &mut mi_segments_tld_t, 574 | mut os_tld: *mut mi_os_tld_t) 575 | -> *mut mi_page_t { 576 | if mi_segment_queue_is_empty(&mut tld.small_free) { 577 | let mut segment = 578 | mi_segment_alloc(0, MI_PAGE_SMALL, MI_SMALL_PAGE_SHIFT, tld, 579 | os_tld); 580 | if segment.is_null() { return ptr::null_mut(); } 581 | mi_segment_enqueue(&mut tld.small_free, segment); 582 | } 583 | return mi_segment_small_page_alloc_in(tld.small_free.first, tld); 584 | } 585 | /* ----------------------------------------------------------- 586 | large page allocation 587 | ----------------------------------------------------------- */ 588 | pub static MI_LARGE_PAGE_SHIFT: usize = (6 + (13 + 3)); 589 | unsafe fn mi_segment_large_page_alloc(mut tld: *mut mi_segments_tld_t, 590 | mut os_tld: *mut mi_os_tld_t) 591 | -> *mut mi_page_t { 592 | let mut segment = 593 | mi_segment_alloc(0, MI_PAGE_LARGE, MI_LARGE_PAGE_SHIFT, tld, os_tld); 594 | if segment.is_null() { return ptr::null_mut(); } 595 | segment.used = 1; 596 | let mut page = &mut segment.pages[0]; 597 | page.segment_in_use = true; 598 | return page; 599 | } 600 | pub static MI_SEGMENT_SHIFT: usize = (6 + (13 + 3)); 601 | unsafe fn mi_segment_huge_page_alloc(mut size: usize, 602 | mut tld: *mut mi_segments_tld_t, 603 | mut os_tld: *mut mi_os_tld_t) 604 | -> *mut mi_page_t { 605 | let mut segment = 606 | mi_segment_alloc(size, MI_PAGE_HUGE, MI_SEGMENT_SHIFT, tld, os_tld); 607 | if segment.is_null() { return ptr::null_mut(); } 608 | segment.used = 1; 609 | let mut page = &mut segment.pages[0]; 610 | page.segment_in_use = true; 611 | return page; 612 | } 613 | /* ----------------------------------------------------------- 614 | Page allocation and free 615 | ----------------------------------------------------------- */ 616 | pub static MI_SMALL_PAGE_SIZE: c_int = 1 << (13 + 3); 617 | // smaller blocks than 8kb (assuming MI_SMALL_PAGE_SIZE == 64kb) 618 | pub static MI_LARGE_SIZE_MAX: c_long = ((1 << (6 + (13 + 3))) / 8); 619 | #[no_mangle] 620 | pub unsafe extern "C" fn _mi_segment_page_alloc(mut block_size: usize, 621 | mut tld: 622 | *mut mi_segments_tld_t, 623 | mut os_tld: *mut mi_os_tld_t) 624 | -> *mut mi_page_t { 625 | let mut page: *mut mi_page_t; 626 | if block_size < MI_SMALL_PAGE_SIZE / 8 { 627 | page = mi_segment_small_page_alloc(tld, os_tld); 628 | } else if block_size < 629 | (MI_LARGE_SIZE_MAX - std::mem::size_of::()) { 630 | page = mi_segment_large_page_alloc(tld, os_tld); 631 | } else { page = mi_segment_huge_page_alloc(block_size, tld, os_tld); } 632 | return page; 633 | } 634 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/static.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/citrus/stats.rs: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | Copyright (c) 2018, Microsoft Research, Daan Leijen 3 | This is free software; you can redistribute it and/or modify it under the 4 | terms of the MIT license. A copy of the license can be found in the file 5 | "LICENSE" at the root of this distribution. 6 | -----------------------------------------------------------------------------*/ 7 | // memset 8 | /* ----------------------------------------------------------- 9 | Merge thread statistics with the main one. 10 | ----------------------------------------------------------- */ 11 | #[no_mangle] 12 | pub unsafe extern "C" fn _mi_stats_done(mut stats: *mut mi_stats_t) { 13 | if stats == &mut _mi_stats_main { return; } 14 | mi_stats_add(&mut _mi_stats_main, stats); 15 | memset(stats as *mut _, 0, std::mem::size_of::()); 16 | } 17 | /* ----------------------------------------------------------- 18 | Statistics operations 19 | ----------------------------------------------------------- */ 20 | unsafe fn mi_stat_update(mut stat: &mut mi_stat_count_t, mut amount: i64) { 21 | if amount == 0 { 22 | return; // add atomically (for abandoned pages) 23 | } // racing.. it's ok 24 | let mut in_main = 25 | ((stat as *mut u8) >= (&mut _mi_stats_main as *mut u8) && 26 | (stat as *mut u8) < 27 | (&mut _mi_stats_main as 28 | *mut u8).offset(std::mem::size_of::())); // add thread local 29 | if in_main != 0 { 30 | let mut current = mi_atomic_add(&mut stat.current, amount); 31 | if current > stat.peak { stat.peak = stat.current; } 32 | if amount > 0 { 33 | mi_atomic_add(&mut stat.allocated, amount); 34 | } else { mi_atomic_add(&mut stat.freed, -amount); }; 35 | } else { 36 | stat.current += amount; 37 | if stat.current > stat.peak { stat.peak = stat.current; } 38 | if amount > 0 { 39 | stat.allocated += amount; 40 | } else { stat.freed += -amount; }; 41 | }; 42 | } 43 | #[no_mangle] 44 | pub unsafe extern "C" fn _mi_stat_counter_increase(mut stat: 45 | &mut mi_stat_counter_t, 46 | mut amount: usize) { 47 | // TODO: add thread safe code 48 | stat.count += 1; 49 | stat.total += amount; 50 | } 51 | #[no_mangle] 52 | pub unsafe extern "C" fn _mi_stat_increase(mut stat: *mut mi_stat_count_t, 53 | mut amount: usize) { 54 | mi_stat_update(stat, amount as i64); 55 | } 56 | #[no_mangle] 57 | pub unsafe extern "C" fn _mi_stat_decrease(mut stat: *mut mi_stat_count_t, 58 | mut amount: usize) { 59 | mi_stat_update(stat, -(amount as i64)); 60 | } 61 | // must be thread safe as it is called from stats_merge 62 | unsafe fn mi_stat_add(mut stat: &mut mi_stat_count_t, 63 | mut src: &mi_stat_count_t, mut unit: i64) { 64 | if stat == src { return; } 65 | mi_atomic_add(&mut stat.allocated, src.allocated * unit); 66 | mi_atomic_add(&mut stat.current, src.current * unit); 67 | mi_atomic_add(&mut stat.freed, src.freed * unit); 68 | mi_atomic_add(&mut stat.peak, src.peak * unit); 69 | } 70 | unsafe fn mi_stat_counter_add(mut stat: &mut mi_stat_counter_t, 71 | mut src: &mi_stat_counter_t, mut unit: i64) { 72 | if stat == src { return; } 73 | mi_atomic_add(&mut stat.total, src.total * unit); 74 | mi_atomic_add(&mut stat.count, src.count * unit); 75 | } 76 | // must be thread safe as it is called from stats_merge 77 | pub static MI_BIN_HUGE: c_long = 64; 78 | unsafe fn mi_stats_add(mut stats: &mut mi_stats_t, mut src: &mi_stats_t) { 79 | if stats == src { return; } 80 | mi_stat_add(&mut stats.segments, &src.segments, 1); 81 | mi_stat_add(&mut stats.pages, &src.pages, 1); 82 | mi_stat_add(&mut stats.reserved, &src.reserved, 1); 83 | mi_stat_add(&mut stats.committed, &src.committed, 1); 84 | mi_stat_add(&mut stats.reset, &src.reset, 1); 85 | mi_stat_add(&mut stats.pages_abandoned, &src.pages_abandoned, 1); 86 | mi_stat_add(&mut stats.segments_abandoned, &src.segments_abandoned, 1); 87 | mi_stat_add(&mut stats.mmap_calls, &src.mmap_calls, 1); 88 | mi_stat_add(&mut stats.mmap_ensure_aligned, &src.mmap_ensure_aligned, 1); 89 | mi_stat_add(&mut stats.mmap_right_align, &src.mmap_right_align, 1); 90 | mi_stat_add(&mut stats.threads, &src.threads, 1); 91 | mi_stat_add(&mut stats.pages_extended, &src.pages_extended, 1); 92 | mi_stat_add(&mut stats.malloc, &src.malloc, 1); 93 | mi_stat_add(&mut stats.huge, &src.huge, 1); 94 | mi_stat_counter_add(&mut stats.searches, &src.searches, 1); 95 | for mut i in 0..(MI_BIN_HUGE + 1) { 96 | if (src.normal[i]).allocated > 0 || (src.normal[i]).freed > 0 { 97 | mi_stat_add(&mut stats.normal[i], &src.normal[i], 1); 98 | }; 99 | }; 100 | } 101 | /* ----------------------------------------------------------- 102 | Display statistics 103 | ----------------------------------------------------------- */ 104 | unsafe fn mi_printf_amount(mut n: i64, mut unit: i64, mut out: *mut FILE, 105 | mut fmt: *const i8) { 106 | let mut buf: [i8; 32]; 107 | let mut len = 32; 108 | let mut suffix = if unit <= 0 { " " } else { "b" }; 109 | let mut base = if unit == 0 { 1000f32 } else { 1024f32 }; 110 | if unit > 0 { n *= unit; } 111 | let mut pos = if n < 0 { -n } else { n } as f64; 112 | if pos < base { 113 | snprintf(buf, len, "%d %s ", n as c_int, suffix); 114 | } else if pos < base * base { 115 | snprintf(buf, len, "%.1f k%s", (n as f64) / base, suffix); 116 | } else if pos < base * base * base { 117 | snprintf(buf, len, "%.1f m%s", (n as f64) / (base * base), suffix); 118 | } else { 119 | snprintf(buf, len, "%.1f g%s", (n as f64) / (base * base * base), 120 | suffix); 121 | } 122 | _mi_fprintf(out, if fmt.is_null() { "%11s" } else { fmt }, buf); 123 | } 124 | unsafe fn mi_print_amount(mut n: i64, mut unit: i64, mut out: *mut FILE) { 125 | mi_printf_amount(n, unit, out, ptr::null_mut()); 126 | } 127 | unsafe fn mi_print_count(mut n: i64, mut unit: i64, mut out: *mut FILE) { 128 | if unit == 1 { 129 | _mi_fprintf(out, "%11s", " "); 130 | } else { mi_print_amount(n, 0, out); }; 131 | } 132 | unsafe fn mi_stat_print(mut stat: &mi_stat_count_t, mut msg: *const i8, 133 | mut unit: i64, mut out: *mut FILE) { 134 | _mi_fprintf(out, "%10s:", msg); 135 | mi_print_amount(stat.peak, unit, out); 136 | if unit != 0 { 137 | mi_print_amount(stat.allocated, unit, out); 138 | mi_print_amount(stat.freed, unit, out); 139 | } 140 | if unit > 0 { 141 | mi_print_amount(unit, if unit == 0 { 0 } else { 1 }, out); 142 | mi_print_count(stat.allocated, unit, out); 143 | if stat.allocated > stat.freed { 144 | _mi_fprintf(out, " not all freed!\n"); 145 | } else { _mi_fprintf(out, " ok\n"); }; 146 | } else { _mi_fprintf(out, "\n"); }; 147 | } 148 | unsafe fn mi_stat_counter_print(mut stat: &mi_stat_counter_t, 149 | mut msg: *const i8, mut out: *mut FILE) { 150 | let mut avg = 151 | if stat.count == 0 { 152 | 0f32 153 | } else { (stat.total as f64) / (stat.count as f64) }; 154 | _mi_fprintf(out, "%10s: %7.1f avg\n", msg, avg); 155 | } 156 | unsafe fn mi_print_header(mut out: *mut FILE) { 157 | _mi_fprintf(out, "%10s: %10s %10s %10s %10s %10s\n", "heap stats", 158 | "peak ", "total ", "freed ", "unit ", "count "); 159 | } 160 | unsafe fn mi_stats_print_bins(mut all: *mut mi_stat_count_t, 161 | mut bins: *const mi_stat_count_t, 162 | mut max: usize, mut fmt: *const i8, 163 | mut out: *mut FILE) { 164 | let mut found = false; 165 | let mut buf: [i8; 64]; 166 | for mut i in 0..(max + 1) { 167 | if (bins[i]).allocated > 0 { 168 | found = true; 169 | let mut unit = _mi_bin_size(i as u8); 170 | snprintf(buf, 64, "%s %3zd", fmt, i); 171 | mi_stat_add(all, &bins[i], unit); 172 | mi_stat_print(&bins[i], buf, unit, out); 173 | }; 174 | } 175 | //snprintf(buf, 64, "%s all", fmt); 176 | //mi_stat_print(all, buf, 1); 177 | if found != 0 { _mi_fprintf(out, "\n"); mi_print_header(out); }; 178 | } 179 | unsafe fn _mi_stats_print(mut stats: &mut mi_stats_t, mut secs: f64, 180 | mut out: *mut FILE) { 181 | if out.is_null() { out = stderr; } 182 | mi_print_header(out); 183 | //_mi_fprintf(out,"(mimalloc built without statistics)\n"); 184 | let mut normal = mi_stat_count_t{_0: 0, _1: 0, _2: 0, _3: 0,}; 185 | mi_stats_print_bins(&mut normal, stats.normal, MI_BIN_HUGE, "normal", 186 | out); 187 | mi_stat_print(&mut normal, "normal", 1, out); 188 | mi_stat_print(&mut stats.huge, "huge", 1, out); 189 | let mut total = mi_stat_count_t{_0: 0, _1: 0, _2: 0, _3: 0,}; 190 | mi_stat_add(&mut total, &mut normal, 1); 191 | mi_stat_add(&mut total, &mut stats.huge, 1); 192 | mi_stat_print(&mut total, "total", 1, out); 193 | _mi_fprintf(out, "malloc requested: "); 194 | mi_print_amount(stats.malloc.allocated, 1, out); 195 | _mi_fprintf(out, "\n\n"); 196 | mi_stat_print(&mut stats.committed, "committed", 1, out); 197 | mi_stat_print(&mut stats.reserved, "reserved", 1, out); 198 | mi_stat_print(&mut stats.reset, "reset", -1, out); 199 | mi_stat_print(&mut stats.segments, "segments", -1, out); 200 | mi_stat_print(&mut stats.segments_abandoned, "-abandoned", -1, out); 201 | mi_stat_print(&mut stats.pages, "pages", -1, out); 202 | mi_stat_print(&mut stats.pages_abandoned, "-abandoned", -1, out); 203 | mi_stat_print(&mut stats.pages_extended, "-extended", 0, out); 204 | mi_stat_print(&mut stats.mmap_calls, "mmaps", 0, out); 205 | mi_stat_print(&mut stats.mmap_right_align, "mmap fast", 0, out); 206 | mi_stat_print(&mut stats.mmap_ensure_aligned, "mmap slow", 0, out); 207 | mi_stat_print(&mut stats.threads, "threads", 0, out); 208 | mi_stat_counter_print(&mut stats.searches, "searches", out); 209 | if secs >= 0f32 { _mi_fprintf(out, "%10s: %9.3f s\n", "elapsed", secs); } 210 | let mut user_time: f64; 211 | let mut sys_time: f64; 212 | let mut peak_rss: usize; 213 | let mut page_faults: usize; 214 | let mut page_reclaim: usize; 215 | mi_process_info(&mut user_time, &mut sys_time, &mut peak_rss, 216 | &mut page_faults, &mut page_reclaim); 217 | _mi_fprintf(out, 218 | "%10s: user: %.3f s, system: %.3f s, faults: %lu, reclaims: %lu, rss: ", 219 | "process", user_time, sys_time, page_faults as c_long, 220 | page_reclaim as c_long); 221 | mi_printf_amount(peak_rss as i64, 1, out, "%s"); 222 | _mi_fprintf(out, "\n"); 223 | } 224 | pub static mut mi_time_start: f64 = 0f32; 225 | unsafe fn mi_stats_get_default() -> *mut mi_stats_t { 226 | let mut heap = mi_heap_get_default(); 227 | return &mut heap.tld.stats; 228 | } 229 | #[no_mangle] 230 | pub unsafe extern "C" fn mi_stats_reset() { 231 | let mut stats = mi_stats_get_default(); 232 | if stats != &mut _mi_stats_main { 233 | memset(stats as *mut _, 0, std::mem::size_of::()); 234 | } 235 | memset(&mut _mi_stats_main, 0, std::mem::size_of::()); 236 | mi_time_start = mi_clock_start(); 237 | } 238 | unsafe fn mi_stats_print_ex(mut stats: *mut mi_stats_t, mut secs: f64, 239 | mut out: *mut FILE) { 240 | if stats != &mut _mi_stats_main { 241 | mi_stats_add(&mut _mi_stats_main, stats); 242 | memset(stats as *mut _, 0, std::mem::size_of::()); 243 | } 244 | _mi_stats_print(&mut _mi_stats_main, secs, out); 245 | } 246 | #[no_mangle] 247 | pub unsafe extern "C" fn mi_stats_print(mut out: *mut FILE) { 248 | mi_stats_print_ex(mi_stats_get_default(), mi_clock_end(mi_time_start), 249 | out); 250 | } 251 | #[no_mangle] 252 | pub unsafe extern "C" fn mi_thread_stats_print(mut out: *mut FILE) { 253 | _mi_stats_print(mi_stats_get_default(), mi_clock_end(mi_time_start), out); 254 | } 255 | // -------------------------------------------------------- 256 | // Basic timer for convenience 257 | // -------------------------------------------------------- 258 | pub static CLOCK_REALTIME: c_int = 0; 259 | unsafe fn mi_clock_now() -> f64 { 260 | let mut t: timespec; 261 | clock_gettime(CLOCK_REALTIME, &mut t); 262 | return (t.tv_sec as f64) + (0.000000001f32 * (t.tv_nsec as f64)); 263 | } 264 | // low resolution timer 265 | pub static mut mi_clock_diff: f64 = 0f32; 266 | unsafe fn mi_clock_start() -> f64 { 267 | if mi_clock_diff == 0f32 { 268 | let mut t0 = mi_clock_now(); 269 | mi_clock_diff = mi_clock_now() - t0; 270 | } 271 | return mi_clock_now(); 272 | } 273 | unsafe fn mi_clock_end(mut start: f64) -> f64 { 274 | let mut end = mi_clock_now(); 275 | return (end - start - mi_clock_diff); 276 | } 277 | // -------------------------------------------------------- 278 | // Basic process statistics 279 | // -------------------------------------------------------- 280 | // FILETIME is in 100 nano seconds 281 | unsafe fn timeval_secs(mut tv: &timeval) -> f64 { 282 | return (tv.tv_sec as f64) + ((tv.tv_usec as f64) * 0.000001f32); 283 | } 284 | pub static RUSAGE_SELF: c_int = RUSAGE_SELF; 285 | unsafe fn mi_process_info(mut utime: &mut f64, mut stime: &mut f64, 286 | mut peak_rss: &mut usize, 287 | mut page_faults: &mut usize, 288 | mut page_reclaim: &mut usize) { 289 | let mut rusage: rusage; 290 | getrusage(RUSAGE_SELF, &mut rusage); 291 | *peak_rss = rusage.ru_maxrss * 1024; 292 | *page_faults = rusage.ru_majflt; 293 | *page_reclaim = rusage.ru_minflt; 294 | *utime = timeval_secs(&mut rusage.ru_utime); 295 | *stime = timeval_secs(&mut rusage.ru_stime); 296 | } 297 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/alloc.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/alloc_aligned.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/alloc_override.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/alloc_override_osx.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/alloc_override_win.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/heap.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/init.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/lib.rs: -------------------------------------------------------------------------------- 1 | /* 2 | #![no_std] 3 | 4 | use core::panic::PanicInfo; 5 | 6 | use libc; 7 | use libc_print::println; 8 | 9 | #[panic_handler] 10 | fn panic(_info: &PanicInfo) -> ! { 11 | unsafe { 12 | libc::exit(1) 13 | } 14 | } 15 | */ 16 | 17 | mod alloc_aligned; 18 | mod alloc_override; 19 | mod alloc; 20 | mod init; 21 | mod options; 22 | mod page_queue; 23 | mod segment; 24 | mod stats; 25 | mod alloc_override_osx; 26 | mod alloc_override_win; 27 | mod heap; 28 | mod os; 29 | mod page; 30 | mod static_; 31 | 32 | pub use crate::alloc_aligned::*; 33 | pub use crate::alloc_override::*; 34 | pub use crate::alloc::*; 35 | pub use crate::init::*; 36 | pub use crate::options::*; 37 | pub use crate::page_queue::*; 38 | pub use crate::segment::*; 39 | pub use crate::stats::*; 40 | pub use crate::alloc_override_osx::*; 41 | pub use crate::alloc_override_win::*; 42 | pub use crate::heap::*; 43 | pub use crate::os::*; 44 | pub use crate::page::*; 45 | pub use crate::static_::*; 46 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/options.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/os.rs: -------------------------------------------------------------------------------- 1 | use libc::*; 2 | 3 | #[no_mangle] 4 | pub extern "C" fn _mi_align_up_rs(sz: uintptr_t, alignment: size_t) -> uintptr_t { 5 | let mut x = sz.wrapping_div(alignment).wrapping_mul(alignment); 6 | if x < sz { x = x.wrapping_add(alignment) }; 7 | if x < sz { 0 } else { x } 8 | } 9 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/page.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/page_queue.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/segment.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/static_.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/rust_impl/src/stats.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mimalloc-sys/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Raw FFI wrapper over the mimalloc memory allocator 2 | #![no_std] 3 | use libc::{c_void, size_t}; 4 | 5 | extern "C" { 6 | pub fn mi_malloc(size: size_t) -> *mut c_void; 7 | pub fn mi_malloc_aligned(size: size_t, alignment: size_t) -> *mut c_void; 8 | 9 | pub fn mi_zalloc(size: size_t) -> *mut c_void; 10 | pub fn mi_zalloc_aligned(size: size_t, alignment: size_t) -> *mut c_void; 11 | 12 | pub fn mi_realloc(p: *mut c_void, newsize: size_t) -> *mut c_void; 13 | pub fn mi_realloc_aligned(p: *mut c_void, newsize: size_t, alignment: size_t) -> *mut c_void; 14 | 15 | pub fn mi_free(p: *mut c_void); 16 | } 17 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 79 -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![no_std] 2 | use core::alloc::{GlobalAlloc, Layout}; 3 | 4 | // Copied from https://github.com/rust-lang/rust/blob/master/src/libstd/sys_common/alloc.rs 5 | #[cfg(all(any( 6 | target_arch = "x86", 7 | target_arch = "arm", 8 | target_arch = "mips", 9 | target_arch = "powerpc", 10 | target_arch = "powerpc64", 11 | target_arch = "asmjs", 12 | target_arch = "wasm32" 13 | )))] 14 | const MIN_ALIGN: usize = 8; 15 | #[cfg(all(any( 16 | target_arch = "x86_64", 17 | target_arch = "aarch64", 18 | target_arch = "mips64", 19 | target_arch = "s390x", 20 | target_arch = "sparc64" 21 | )))] 22 | const MIN_ALIGN: usize = 16; 23 | 24 | #[derive(Copy, Clone, Default, Debug)] 25 | pub struct Mimalloc; 26 | 27 | unsafe impl GlobalAlloc for Mimalloc { 28 | #[inline] 29 | unsafe fn alloc(&self, layout: Layout) -> *mut u8 { 30 | let ptr = if layout.align() <= MIN_ALIGN 31 | && layout.align() <= layout.size() 32 | { 33 | mimalloc_sys::mi_malloc(layout.size() as _) 34 | } else { 35 | mimalloc_sys::mi_malloc_aligned( 36 | layout.size() as _, 37 | layout.align() as _, 38 | ) 39 | }; 40 | 41 | ptr as *mut u8 42 | } 43 | 44 | #[inline] 45 | unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { 46 | let ptr = if layout.align() <= MIN_ALIGN 47 | && layout.align() <= layout.size() 48 | { 49 | mimalloc_sys::mi_zalloc(layout.size() as _) 50 | } else { 51 | mimalloc_sys::mi_zalloc_aligned( 52 | layout.size() as _, 53 | layout.align() as _, 54 | ) 55 | }; 56 | 57 | ptr as *mut u8 58 | } 59 | 60 | #[inline] 61 | unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { 62 | mimalloc_sys::mi_free(ptr as *mut _); 63 | } 64 | 65 | #[inline] 66 | unsafe fn realloc( 67 | &self, 68 | ptr: *mut u8, 69 | layout: Layout, 70 | new_size: usize, 71 | ) -> *mut u8 { 72 | let ptr = if layout.align() <= MIN_ALIGN 73 | && layout.align() <= layout.size() 74 | { 75 | mimalloc_sys::mi_realloc(ptr as *mut _, new_size) 76 | } else { 77 | mimalloc_sys::mi_realloc_aligned( 78 | ptr as *mut _, 79 | new_size, 80 | layout.align() as _, 81 | ) 82 | }; 83 | 84 | ptr as *mut u8 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /tests/smoke.rs: -------------------------------------------------------------------------------- 1 | use mimallocator::Mimalloc; 2 | use std::alloc::{GlobalAlloc, Layout}; 3 | 4 | #[global_allocator] 5 | static A: Mimalloc = Mimalloc; 6 | 7 | #[test] 8 | fn smoke_vec() { 9 | let mut a = Vec::new(); 10 | a.push(3); 11 | } 12 | 13 | /// https://github.com/rust-lang/rust/issues/45955 14 | #[test] 15 | fn overaligned() { 16 | let size = 8; 17 | let align = 16; // greater than size 18 | let iterations = 100; 19 | unsafe { 20 | let pointers: Vec<_> = (0..iterations) 21 | .map(|_| { 22 | let ptr = Mimalloc 23 | .alloc(Layout::from_size_align(size, align).unwrap()); 24 | assert!(!ptr.is_null()); 25 | ptr 26 | }) 27 | .collect(); 28 | for &ptr in &pointers { 29 | assert_eq!( 30 | (ptr as usize) % align, 31 | 0, 32 | "Got a pointer less aligned than requested" 33 | ) 34 | } 35 | 36 | // Clean up 37 | for &ptr in &pointers { 38 | Mimalloc 39 | .dealloc(ptr, Layout::from_size_align(size, align).unwrap()) 40 | } 41 | } 42 | } 43 | 44 | #[test] 45 | fn smoke_ffi() { 46 | unsafe { 47 | let ptr = mimalloc_sys::mi_malloc(4); 48 | *(ptr as *mut u32) = 0xDECADE; 49 | assert_eq!(*(ptr as *mut u32), 0xDECADE); 50 | mimalloc_sys::mi_free(ptr); 51 | } 52 | } 53 | --------------------------------------------------------------------------------