├── .gitattributes ├── .github ├── bors.toml └── workflows │ ├── build.yml │ ├── changelog.yml │ └── properties │ └── build.properties.json ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── build.rs ├── cfail ├── Cargo.toml ├── src │ └── main.rs └── ui │ ├── freeze.rs │ └── freeze.stderr ├── rustfmt.toml ├── src ├── binary_heap.rs ├── bytes.rs ├── c_string.rs ├── de.rs ├── defmt.rs ├── deque.rs ├── history_buf.rs ├── index_map.rs ├── index_set.rs ├── len_type.rs ├── lib.rs ├── linear_map.rs ├── mpmc.rs ├── pool.rs ├── pool │ ├── arc.rs │ ├── boxed.rs │ ├── object.rs │ ├── treiber.rs │ └── treiber │ │ ├── cas.rs │ │ └── llsc.rs ├── ser.rs ├── slice.rs ├── sorted_linked_list.rs ├── spsc.rs ├── storage.rs ├── string │ ├── drain.rs │ └── mod.rs ├── test_helpers.rs ├── ufmt.rs └── vec │ ├── drain.rs │ └── mod.rs ├── suppressions.txt └── tests ├── cpass.rs └── tsan.rs /.gitattributes: -------------------------------------------------------------------------------- 1 | CHANGELOG.md merge=union 2 | 3 | -------------------------------------------------------------------------------- /.github/bors.toml: -------------------------------------------------------------------------------- 1 | block_labels = ["S-blocked"] 2 | delete_merged_branches = true 3 | status = ["ci"] 4 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | on: 3 | merge_group: 4 | pull_request: 5 | branches: [main] 6 | push: 7 | branches: [staging, trying] 8 | workflow_dispatch: 9 | 10 | env: 11 | CARGO_TERM_COLOR: always 12 | RUSTFLAGS: "-D warnings" 13 | 14 | jobs: 15 | # Run MIRI tests on nightly 16 | # NOTE first because it takes the longest to complete 17 | testmiri: 18 | name: testmiri 19 | runs-on: ubuntu-latest 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v4 23 | 24 | - name: Cache cargo dependencies 25 | uses: actions/cache@v3 26 | with: 27 | path: | 28 | - ~/.cargo/bin/ 29 | - ~/.cargo/registry/index/ 30 | - ~/.cargo/registry/cache/ 31 | - ~/.cargo/git/db/ 32 | key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} 33 | restore-keys: | 34 | ${{ runner.OS }}-cargo- 35 | 36 | - name: Cache build output dependencies 37 | uses: actions/cache@v3 38 | with: 39 | path: target 40 | key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} 41 | restore-keys: | 42 | ${{ runner.OS }}-build- 43 | 44 | - name: Install Rust 45 | uses: dtolnay/rust-toolchain@master 46 | with: 47 | toolchain: nightly 48 | components: miri 49 | 50 | - name: Run miri 51 | run: MIRIFLAGS=-Zmiri-ignore-leaks cargo miri test 52 | 53 | # Run cargo test 54 | test: 55 | name: test 56 | runs-on: ubuntu-latest 57 | steps: 58 | - name: Checkout 59 | uses: actions/checkout@v4 60 | 61 | - name: Cache cargo dependencies 62 | uses: actions/cache@v3 63 | with: 64 | path: | 65 | - ~/.cargo/bin/ 66 | - ~/.cargo/registry/index/ 67 | - ~/.cargo/registry/cache/ 68 | - ~/.cargo/git/db/ 69 | key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} 70 | restore-keys: | 71 | ${{ runner.OS }}-cargo- 72 | 73 | - name: Cache build output dependencies 74 | uses: actions/cache@v3 75 | with: 76 | path: target 77 | key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} 78 | restore-keys: | 79 | ${{ runner.OS }}-build- 80 | 81 | - name: Install Rust 82 | uses: dtolnay/rust-toolchain@master 83 | with: 84 | toolchain: stable 85 | 86 | - name: Run cargo test 87 | run: cargo test 88 | 89 | # Run cargo fmt --check 90 | style: 91 | name: style 92 | runs-on: ubuntu-latest 93 | steps: 94 | - name: Checkout 95 | uses: actions/checkout@v4 96 | 97 | - name: Install Rust 98 | uses: dtolnay/rust-toolchain@nightly 99 | with: 100 | components: rustfmt 101 | 102 | - name: cargo fmt --check 103 | run: cargo fmt --all -- --check 104 | 105 | clippy: 106 | runs-on: ubuntu-latest 107 | steps: 108 | - name: Checkout 109 | uses: actions/checkout@v4 110 | - name: Install Rust 111 | uses: dtolnay/rust-toolchain@stable 112 | with: 113 | components: clippy 114 | targets: i686-unknown-linux-musl 115 | - run: cargo clippy --all --target i686-unknown-linux-musl --all-targets 116 | 117 | # Compilation check 118 | check: 119 | name: check 120 | runs-on: ubuntu-latest 121 | strategy: 122 | matrix: 123 | target: 124 | - x86_64-unknown-linux-gnu 125 | - i686-unknown-linux-musl 126 | - riscv32imc-unknown-none-elf 127 | - armv7r-none-eabi 128 | - thumbv6m-none-eabi 129 | - thumbv7m-none-eabi 130 | - thumbv8m.base-none-eabi 131 | - thumbv8m.main-none-eabi 132 | steps: 133 | - name: Checkout 134 | uses: actions/checkout@v4 135 | 136 | - name: Cache cargo dependencies 137 | uses: actions/cache@v3 138 | with: 139 | path: | 140 | - ~/.cargo/bin/ 141 | - ~/.cargo/registry/index/ 142 | - ~/.cargo/registry/cache/ 143 | - ~/.cargo/git/db/ 144 | key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} 145 | restore-keys: | 146 | ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} 147 | ${{ runner.OS }}-cargo- 148 | 149 | - name: Cache build output dependencies 150 | uses: actions/cache@v3 151 | with: 152 | path: target 153 | key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} 154 | restore-keys: | 155 | ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} 156 | ${{ runner.OS }}-build- 157 | 158 | - name: Install Rust with target (${{ matrix.target }}) 159 | uses: dtolnay/rust-toolchain@master 160 | with: 161 | toolchain: stable 162 | targets: ${{ matrix.target }} 163 | 164 | - name: cargo check 165 | run: | 166 | cargo check --target="${target}" 167 | cargo check --target="${target}" --features="alloc" 168 | 169 | # Needs native atomics, since `bytes` doesn't support `portable-atomic`. 170 | if [ "${target}" != "riscv32imc-unknown-none-elf" ] && [ "${target}" != "thumbv6m-none-eabi" ]; then 171 | cargo check --target="${target}" --features="bytes" 172 | fi 173 | 174 | cargo check --target="${target}" --features="defmt" 175 | cargo check --target="${target}" --features="mpmc_large" 176 | cargo check --target="${target}" --features="portable-atomic-critical-section" 177 | cargo check --target="${target}" --features="serde" 178 | cargo check --target="${target}" --features="ufmt" 179 | env: 180 | target: ${{ matrix.target }} 181 | 182 | doc: 183 | name: doc 184 | runs-on: ubuntu-latest 185 | steps: 186 | - name: Checkout 187 | uses: actions/checkout@v4 188 | 189 | - name: Cache cargo dependencies 190 | uses: actions/cache@v3 191 | with: 192 | path: | 193 | - ~/.cargo/bin/ 194 | - ~/.cargo/registry/index/ 195 | - ~/.cargo/registry/cache/ 196 | - ~/.cargo/git/db/ 197 | key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} 198 | restore-keys: | 199 | ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} 200 | ${{ runner.OS }}-cargo- 201 | 202 | - name: Cache build output dependencies 203 | uses: actions/cache@v3 204 | with: 205 | path: target 206 | key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} 207 | restore-keys: | 208 | ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} 209 | ${{ runner.OS }}-build- 210 | 211 | - name: Get metadata 212 | id: metadata 213 | run: | 214 | set -euo pipefail 215 | 216 | docsrs_metadata="$(cargo metadata --format-version 1 | jq '.packages[] | select(.name == "heapless") | .metadata.docs.rs')" 217 | features=($(jq --raw-output '.features[]' <<< "${docsrs_metadata}")) 218 | rustdocflags=(-D warnings --cfg docsrs $(jq --raw-output '.["rustdoc-args"][]' <<< "${docsrs_metadata}")) 219 | targets=($(jq --raw-output '.targets[]' <<< "${docsrs_metadata}")) 220 | 221 | echo "features=${features[*]}" >> "${GITHUB_OUTPUT}" 222 | echo "rustdocflags=${rustdocflags[*]}" >> "${GITHUB_OUTPUT}" 223 | echo "targets=${targets[*]}" >> "${GITHUB_OUTPUT}" 224 | 225 | - name: Install nightly Rust with targets (${{ steps.metadata.outputs.targets }}) 226 | uses: dtolnay/rust-toolchain@nightly 227 | with: 228 | targets: ${{ steps.metadata.outputs.targets }} 229 | 230 | - name: cargo rustdoc 231 | run: | 232 | set -euo pipefail 233 | 234 | targets=(${targets}) 235 | 236 | for target in "${targets[@]}"; do 237 | set -x 238 | cargo rustdoc --target "${target}" --features "${features}" 239 | set +x 240 | done 241 | env: 242 | features: ${{ steps.metadata.outputs.features }} 243 | RUSTDOCFLAGS: ${{ steps.metadata.outputs.rustdocflags }} 244 | targets: ${{ steps.metadata.outputs.targets }} 245 | 246 | # Run cpass tests 247 | testcpass: 248 | name: testcpass 249 | runs-on: ubuntu-latest 250 | strategy: 251 | matrix: 252 | target: 253 | - x86_64-unknown-linux-gnu 254 | - i686-unknown-linux-musl 255 | buildtype: 256 | - "" 257 | - "--release" 258 | steps: 259 | - name: Checkout 260 | uses: actions/checkout@v4 261 | 262 | - name: Cache cargo dependencies 263 | uses: actions/cache@v3 264 | with: 265 | path: | 266 | - ~/.cargo/bin/ 267 | - ~/.cargo/registry/index/ 268 | - ~/.cargo/registry/cache/ 269 | - ~/.cargo/git/db/ 270 | key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} 271 | restore-keys: | 272 | ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} 273 | ${{ runner.OS }}-cargo- 274 | 275 | - name: Cache build output dependencies 276 | uses: actions/cache@v3 277 | with: 278 | path: target 279 | key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} 280 | restore-keys: | 281 | ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} 282 | ${{ runner.OS }}-build- 283 | 284 | - name: Install Rust with target (${{ matrix.target }}) 285 | uses: dtolnay/rust-toolchain@master 286 | with: 287 | toolchain: stable 288 | targets: ${{ matrix.target }} 289 | 290 | - name: cargo test 291 | run: cargo test --test cpass --target=${{ matrix.target }} ${{ matrix.buildtype }} 292 | 293 | # Run test suite for UI 294 | testtsan: 295 | name: testtsan 296 | runs-on: ubuntu-latest 297 | strategy: 298 | matrix: 299 | target: 300 | - x86_64-unknown-linux-gnu 301 | buildtype: 302 | - "" 303 | - "--release" 304 | steps: 305 | - name: Checkout 306 | uses: actions/checkout@v4 307 | 308 | - name: Cache cargo dependencies 309 | uses: actions/cache@v3 310 | with: 311 | path: | 312 | - ~/.cargo/bin/ 313 | - ~/.cargo/registry/index/ 314 | - ~/.cargo/registry/cache/ 315 | - ~/.cargo/git/db/ 316 | key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} 317 | restore-keys: | 318 | ${{ runner.OS }}-cargo- 319 | 320 | - name: Cache build output dependencies 321 | uses: actions/cache@v3 322 | with: 323 | path: target 324 | key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} 325 | restore-keys: | 326 | ${{ runner.OS }}-build- 327 | 328 | - name: Install Rust nightly with target (${{ matrix.target }}) 329 | uses: dtolnay/rust-toolchain@master 330 | with: 331 | toolchain: nightly 332 | target: ${{ matrix.target }} 333 | components: rust-src 334 | 335 | - name: Export variables 336 | run: | 337 | echo RUSTFLAGS="-Z sanitizer=thread" >> $GITHUB_ENV 338 | echo TSAN_OPTIONS="suppressions=$(pwd)/suppressions.txt" >> $GITHUB_ENV 339 | echo $GITHUB_ENV 340 | 341 | - name: cargo test 342 | run: cargo test -Zbuild-std --test tsan --target=${{ matrix.target }} --features=${{ matrix.features }} ${{ matrix.buildtype }} -- --test-threads=1 343 | 344 | # Run cfail tests on MSRV 345 | testcfail: 346 | name: testcfail 347 | runs-on: ubuntu-latest 348 | defaults: 349 | run: 350 | working-directory: cfail 351 | 352 | steps: 353 | - name: Checkout 354 | uses: actions/checkout@v4 355 | 356 | - name: Cache cargo dependencies 357 | uses: actions/cache@v3 358 | with: 359 | path: | 360 | - ~/.cargo/bin/ 361 | - ~/.cargo/registry/index/ 362 | - ~/.cargo/registry/cache/ 363 | - ~/.cargo/git/db/ 364 | key: ${{ runner.OS }}-cargo-${{ hashFiles('**/Cargo.lock') }} 365 | restore-keys: | 366 | ${{ runner.OS }}-cargo- 367 | 368 | - name: Cache build output dependencies 369 | uses: actions/cache@v3 370 | with: 371 | path: target 372 | key: ${{ runner.OS }}-build-${{ hashFiles('**/Cargo.lock') }} 373 | restore-keys: | 374 | ${{ runner.OS }}-build- 375 | 376 | - name: Install Rust 377 | uses: dtolnay/rust-toolchain@stable 378 | 379 | - name: Run cargo 380 | run: cargo run 381 | -------------------------------------------------------------------------------- /.github/workflows/changelog.yml: -------------------------------------------------------------------------------- 1 | # Check that the changelog is updated for all changes. 2 | # 3 | # This is only run for PRs. 4 | 5 | on: 6 | pull_request: 7 | # opened, reopened, synchronize are the default types for pull_request. 8 | # labeled, unlabeled ensure this check is also run if a label is added or removed. 9 | types: [opened, reopened, labeled, unlabeled, synchronize] 10 | 11 | name: Changelog 12 | 13 | jobs: 14 | changelog: 15 | name: Changelog 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout sources 19 | uses: actions/checkout@v4 20 | 21 | - name: Check that changelog updated 22 | uses: dangoslen/changelog-enforcer@v3 23 | with: 24 | changeLogPath: CHANGELOG.md 25 | skipLabels: 'needs-changelog, skip-changelog' 26 | missingUpdateErrorMessage: 'Please add a changelog entry in the CHANGELOG.md file.' 27 | env: 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/properties/build.properties.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "Build", 3 | "description": "Heapless Test Suite", 4 | "iconName": "rust", 5 | "categories": ["Rust"] 6 | } 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/*.rs.bk 2 | .#* 3 | Cargo.lock 4 | target/ 5 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # The Rust Code of Conduct 2 | 3 | ## Conduct 4 | 5 | **Contact**: [Libs team](https://github.com/rust-embedded/wg#the-libs-team) 6 | 7 | * We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic. 8 | * On IRC, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all. 9 | * Please be kind and courteous. There's no need to be mean or rude. 10 | * Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer. 11 | * Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. 12 | * We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term "harassment" as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups. 13 | * Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [Libs team][team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back. 14 | * Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome. 15 | 16 | ## Moderation 17 | 18 | These are the policies for upholding our community's standards of conduct. 19 | 20 | 1. Remarks that violate the Rust standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.) 21 | 2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed. 22 | 3. Moderators will first respond to such remarks with a warning. 23 | 4. If the warning is unheeded, the user will be "kicked," i.e., kicked out of the communication channel to cool off. 24 | 5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded. 25 | 6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology. 26 | 7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, **in private**. Complaints about bans in-channel are not allowed. 27 | 8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others. 28 | 29 | In the Rust community we strive to go the extra step to look out for each other. Don't just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they're off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely. 30 | 31 | And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could've communicated better — remember that it's your responsibility to make your fellow Rustaceans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust. 32 | 33 | The enforcement policies listed above apply to all official embedded WG venues; including official IRC channels (#rust-embedded); GitHub repositories under rust-embedded; and all forums under rust-embedded.org (forum.rust-embedded.org). 34 | 35 | *Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).* 36 | 37 | [team]: https://github.com/rust-embedded/wg#the-libs-team 38 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = [ 3 | "Jorge Aparicio ", 4 | "Per Lindgren ", 5 | "Emil Fresk ", 6 | ] 7 | categories = ["data-structures", "no-std"] 8 | description = "`static` friendly data structures that don't require dynamic memory allocation" 9 | documentation = "https://docs.rs/heapless" 10 | edition = "2021" 11 | keywords = ["static", "no-heap"] 12 | license = "MIT OR Apache-2.0" 13 | name = "heapless" 14 | repository = "https://github.com/rust-embedded/heapless" 15 | version = "0.9.0" 16 | 17 | [features] 18 | bytes = ["dep:bytes"] 19 | 20 | # Enable polyfilling of atomics via `portable-atomic`. 21 | # `portable-atomic` polyfills some functionality by default, but to get full atomics you must 22 | # enable one of its features to tell it how to do it. See `portable-atomic` documentation for details. 23 | portable-atomic = ["dep:portable-atomic"] 24 | 25 | # Enable polyfilling of atomics via portable-atomic, using critical section for locking 26 | portable-atomic-critical-section = ["dep:portable-atomic", "portable-atomic", "portable-atomic?/critical-section"] 27 | 28 | # Enable polyfilling of atomics via portable-atomic, using disabling interrupts for locking. 29 | # WARNING: this is only sound for single-core bare-metal privileged-mode targets! 30 | portable-atomic-unsafe-assume-single-core = ["dep:portable-atomic", "portable-atomic", "portable-atomic?/unsafe-assume-single-core"] 31 | 32 | # implement serde traits. 33 | serde = ["dep:serde"] 34 | 35 | # implement ufmt traits. 36 | ufmt = ["dep:ufmt", "dep:ufmt-write"] 37 | 38 | # Implement `defmt::Format`. 39 | defmt = ["dep:defmt"] 40 | 41 | # Enable larger MPMC sizes. 42 | mpmc_large = [] 43 | 44 | # Implement some alloc Vec interoperability 45 | alloc = [] 46 | 47 | nightly = [] 48 | 49 | [dependencies] 50 | bytes = { version = "1", default-features = false, optional = true } 51 | portable-atomic = { version = "1.0", optional = true } 52 | hash32 = "0.3.0" 53 | serde = { version = "1", optional = true, default-features = false } 54 | ufmt = { version = "0.2", optional = true } 55 | ufmt-write = { version = "0.1", optional = true } 56 | defmt = { version = "1.0.1", optional = true } 57 | 58 | # for the pool module 59 | [target.'cfg(any(target_arch = "arm", target_pointer_width = "32", target_pointer_width = "64"))'.dependencies] 60 | stable_deref_trait = { version = "1", default-features = false } 61 | 62 | [dev-dependencies] 63 | critical-section = { version = "1.1", features = ["std"] } 64 | static_assertions = "1.1.0" 65 | 66 | [package.metadata.docs.rs] 67 | features = [ 68 | "bytes", 69 | "ufmt", 70 | "serde", 71 | "defmt", 72 | "mpmc_large", 73 | "portable-atomic-critical-section", 74 | "alloc", 75 | ] 76 | # for the pool module 77 | targets = ["i686-unknown-linux-gnu"] 78 | rustdoc-args = ["--cfg", "docsrs"] 79 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Jorge Aparicio 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![crates.io](https://img.shields.io/crates/v/heapless.svg)](https://crates.io/crates/heapless) 2 | [![crates.io](https://img.shields.io/crates/d/heapless.svg)](https://crates.io/crates/heapless) 3 | 4 | # `heapless` 5 | 6 | > `static` friendly data structures that don't require dynamic memory allocation 7 | 8 | This project is developed and maintained by the [libs team]. 9 | 10 | ## [Documentation](https://docs.rs/heapless/latest/heapless) 11 | 12 | ## [Change log](CHANGELOG.md) 13 | 14 | ## Tests 15 | 16 | ``` console 17 | $ # run all 18 | $ cargo test --features serde 19 | 20 | $ # run only for example histbuf tests 21 | $ cargo test histbuf --features serde 22 | ``` 23 | 24 | ## License 25 | 26 | Licensed under either of 27 | 28 | - Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or 29 | http://www.apache.org/licenses/LICENSE-2.0) 30 | 31 | - MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) 32 | 33 | at your option. 34 | 35 | ## Contribution 36 | 37 | Unless you explicitly state otherwise, any contribution intentionally submitted 38 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be 39 | dual licensed as above, without any additional terms or conditions. 40 | 41 | [libs team]: https://github.com/rust-embedded/wg#the-libs-team 42 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | env, 3 | error::Error, 4 | fs, 5 | path::Path, 6 | process::{Command, ExitStatus, Stdio}, 7 | }; 8 | 9 | fn main() -> Result<(), Box> { 10 | println!("cargo::rustc-check-cfg=cfg(arm_llsc)"); 11 | println!("cargo::rustc-check-cfg=cfg(has_atomic_load_store)"); 12 | 13 | let target = env::var("TARGET")?; 14 | 15 | // Manually list targets that have atomic load/store, but no CAS. 16 | // Remove when `cfg(target_has_atomic_load_store)` is stable. 17 | // last updated nightly-2023-10-28 18 | match &target[..] { 19 | "armv4t-none-eabi" 20 | | "armv5te-none-eabi" 21 | | "avr-unknown-gnu-atmega328" 22 | | "bpfeb-unknown-none" 23 | | "bpfel-unknown-none" 24 | | "thumbv4t-none-eabi" 25 | | "thumbv5te-none-eabi" 26 | | "thumbv6m-none-eabi" => println!("cargo:rustc-cfg=has_atomic_load_store"), 27 | _ => {} 28 | }; 29 | 30 | // AArch64 instruction set contains `clrex` but not `ldrex` or `strex`; the 31 | // probe will succeed when we already know to deny this target from LLSC. 32 | if !target.starts_with("aarch64") { 33 | match compile_probe(ARM_LLSC_PROBE) { 34 | Some(status) if status.success() => println!("cargo:rustc-cfg=arm_llsc"), 35 | _ => {} 36 | } 37 | } 38 | 39 | Ok(()) 40 | } 41 | 42 | const ARM_LLSC_PROBE: &str = r#" 43 | #![no_std] 44 | 45 | // `no_mangle` forces codegen, which makes llvm check the contents of the `asm!` macro 46 | #[no_mangle] 47 | unsafe fn asm() { 48 | core::arch::asm!("clrex"); 49 | } 50 | "#; 51 | 52 | // this function was taken from anyhow v1.0.63 build script 53 | // https://crates.io/crates/anyhow/1.0.63 (last visited 2022-09-02) 54 | // the code is licensed under 'MIT or APACHE-2.0' 55 | fn compile_probe(source: &str) -> Option { 56 | let rustc = env::var_os("RUSTC")?; 57 | let out_dir = env::var_os("OUT_DIR")?; 58 | let probefile = Path::new(&out_dir).join("probe.rs"); 59 | fs::write(&probefile, source).ok()?; 60 | 61 | // Make sure to pick up Cargo rustc configuration. 62 | let mut cmd = if let Some(wrapper) = env::var_os("RUSTC_WRAPPER") { 63 | let mut cmd = Command::new(wrapper); 64 | // The wrapper's first argument is supposed to be the path to rustc. 65 | cmd.arg(rustc); 66 | cmd 67 | } else { 68 | Command::new(rustc) 69 | }; 70 | 71 | cmd.stderr(Stdio::null()) 72 | .arg("--edition=2018") 73 | .arg("--crate-name=probe") 74 | .arg("--crate-type=lib") 75 | .arg("--out-dir") 76 | .arg(out_dir) 77 | .arg(probefile); 78 | 79 | if let Some(target) = env::var_os("TARGET") { 80 | cmd.arg("--target").arg(target); 81 | } 82 | 83 | // If Cargo wants to set RUSTFLAGS, use that. 84 | if let Ok(rustflags) = env::var("CARGO_ENCODED_RUSTFLAGS") { 85 | if !rustflags.is_empty() { 86 | for arg in rustflags.split('\x1f') { 87 | cmd.arg(arg); 88 | } 89 | } 90 | } 91 | 92 | cmd.status().ok() 93 | } 94 | -------------------------------------------------------------------------------- /cfail/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Jorge Aparicio "] 3 | edition = "2018" 4 | name = "cfail" 5 | publish = false 6 | version = "0.1.0" 7 | 8 | [dependencies] 9 | heapless = { path = ".." } 10 | trybuild = "1.0.18" 11 | -------------------------------------------------------------------------------- /cfail/src/main.rs: -------------------------------------------------------------------------------- 1 | use trybuild::TestCases; 2 | 3 | fn main() { 4 | let t = TestCases::new(); 5 | t.compile_fail("ui/*.rs"); 6 | } 7 | -------------------------------------------------------------------------------- /cfail/ui/freeze.rs: -------------------------------------------------------------------------------- 1 | use heapless::spsc::Queue; 2 | 3 | fn main() { 4 | let mut q: Queue = Queue::new(); 5 | 6 | let (_p, mut _c) = q.split(); 7 | q.enqueue(0).unwrap(); 8 | _c.dequeue(); 9 | } 10 | -------------------------------------------------------------------------------- /cfail/ui/freeze.stderr: -------------------------------------------------------------------------------- 1 | error[E0499]: cannot borrow `q` as mutable more than once at a time 2 | --> $DIR/freeze.rs:7:5 3 | | 4 | 6 | let (_p, mut _c) = q.split(); 5 | | - first mutable borrow occurs here 6 | 7 | q.enqueue(0).unwrap(); 7 | | ^ second mutable borrow occurs here 8 | 8 | _c.dequeue(); 9 | | -- first borrow later used here 10 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | format_code_in_doc_comments = true 2 | -------------------------------------------------------------------------------- /src/bytes.rs: -------------------------------------------------------------------------------- 1 | //! Implementations of `bytes` traits for `heapless` types. 2 | 3 | use crate::{ 4 | len_type::LenType, 5 | vec::{VecInner, VecStorage}, 6 | }; 7 | use bytes::{buf::UninitSlice, BufMut}; 8 | 9 | unsafe impl + ?Sized, LenT: LenType> BufMut for VecInner { 10 | #[inline] 11 | fn remaining_mut(&self) -> usize { 12 | self.capacity() - self.len() 13 | } 14 | 15 | #[inline] 16 | unsafe fn advance_mut(&mut self, cnt: usize) { 17 | let len = self.len(); 18 | let pos = len + cnt; 19 | if pos >= self.capacity() { 20 | panic!("Advance out of range"); 21 | } 22 | self.set_len(pos); 23 | } 24 | 25 | #[inline] 26 | fn chunk_mut(&mut self) -> &mut UninitSlice { 27 | let len = self.len(); 28 | let ptr = self.as_mut_ptr(); 29 | unsafe { &mut UninitSlice::from_raw_parts_mut(ptr, self.capacity())[len..] } 30 | } 31 | } 32 | 33 | #[cfg(test)] 34 | mod tests { 35 | use crate::{Vec, VecView}; 36 | use bytes::BufMut; 37 | 38 | #[test] 39 | #[should_panic] 40 | fn buf_mut_advance_mut_out_of_bounds() { 41 | let mut vec: Vec = Vec::new(); 42 | unsafe { vec.advance_mut(9) }; 43 | } 44 | 45 | #[test] 46 | fn buf_mut_remaining_mut() { 47 | let mut vec: Vec = Vec::new(); 48 | assert_eq!(vec.remaining_mut(), 8); 49 | vec.push(42).unwrap(); 50 | assert_eq!(vec.remaining_mut(), 7); 51 | } 52 | 53 | #[test] 54 | fn buf_mut_chunk_mut() { 55 | let mut vec: Vec = Vec::new(); 56 | assert_eq!(vec.chunk_mut().len(), 8); 57 | unsafe { vec.advance_mut(1) }; 58 | assert_eq!(vec.chunk_mut().len(), 7); 59 | } 60 | 61 | #[test] 62 | #[should_panic] 63 | fn buf_mut_advance_mut_out_of_bounds_view() { 64 | let vec: &mut VecView = &mut Vec::::new(); 65 | unsafe { vec.advance_mut(9) }; 66 | } 67 | 68 | #[test] 69 | fn buf_mut_remaining_mut_view() { 70 | let vec: &mut VecView = &mut Vec::::new(); 71 | assert_eq!(vec.remaining_mut(), 8); 72 | vec.push(42).unwrap(); 73 | assert_eq!(vec.remaining_mut(), 7); 74 | } 75 | 76 | #[test] 77 | fn buf_mut_chunk_mut_view() { 78 | let vec: &mut VecView = &mut Vec::::new(); 79 | assert_eq!(vec.chunk_mut().len(), 8); 80 | unsafe { vec.advance_mut(1) }; 81 | assert_eq!(vec.chunk_mut().len(), 7); 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /src/c_string.rs: -------------------------------------------------------------------------------- 1 | //! A fixed capacity [`CString`](https://doc.rust-lang.org/std/ffi/struct.CString.html). 2 | 3 | use crate::{vec::Vec, CapacityError, LenType}; 4 | use core::{ 5 | borrow::Borrow, 6 | cmp::Ordering, 7 | error::Error, 8 | ffi::{c_char, CStr, FromBytesWithNulError}, 9 | fmt, 10 | ops::Deref, 11 | }; 12 | 13 | /// A fixed capacity [`CString`](https://doc.rust-lang.org/std/ffi/struct.CString.html). 14 | /// 15 | /// It stores up to `N - 1` non-nul characters with a trailing nul terminator. 16 | #[derive(Clone, Hash)] 17 | pub struct CString { 18 | inner: Vec, 19 | } 20 | 21 | impl CString { 22 | /// Creates a new C-compatible string with a terminating nul byte. 23 | /// 24 | /// ```rust 25 | /// use heapless::CString; 26 | /// 27 | /// // A fixed-size `CString` that can store up to 10 characters 28 | /// // including the nul terminator. 29 | /// let empty = CString::<10>::new(); 30 | /// 31 | /// assert_eq!(empty.as_c_str(), c""); 32 | /// assert_eq!(empty.to_str(), Ok("")); 33 | /// ``` 34 | pub fn new() -> Self { 35 | const { 36 | assert!(N > 0); 37 | } 38 | 39 | let mut inner = Vec::new(); 40 | 41 | // SAFETY: We just asserted that `N > 0`. 42 | unsafe { inner.push_unchecked(b'\0') }; 43 | 44 | Self { inner } 45 | } 46 | 47 | /// Unsafely creates a [`CString`] from a byte slice. 48 | /// 49 | /// This function will copy the provided `bytes` to a [`CString`] without 50 | /// performing any sanity checks. 51 | /// 52 | /// The function will fail if `bytes.len() > N`. 53 | /// 54 | /// # Safety 55 | /// 56 | /// The provided slice **must** be nul-terminated and not contain any interior 57 | /// nul bytes. 58 | /// 59 | /// # Examples 60 | /// 61 | /// ```rust 62 | /// use heapless::CString; 63 | /// let mut c_string = unsafe { CString::<7>::from_bytes_with_nul_unchecked(b"string\0").unwrap() }; 64 | /// 65 | /// assert_eq!(c_string.to_str(), Ok("string")); 66 | /// ``` 67 | pub unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> Result { 68 | let mut inner = Vec::new(); 69 | 70 | inner.extend_from_slice(bytes)?; 71 | 72 | Ok(Self { inner }) 73 | } 74 | 75 | /// Instantiates a [`CString`] copying from the giving byte slice, assuming it is 76 | /// nul-terminated. 77 | /// 78 | /// Fails if the given byte slice has any interior nul byte, if the slice does not 79 | /// end with a nul byte, or if the byte slice can't fit in `N`. 80 | pub fn from_bytes_with_nul(bytes: &[u8]) -> Result { 81 | let mut string = Self::new(); 82 | 83 | string.extend_from_bytes(bytes)?; 84 | 85 | Ok(string) 86 | } 87 | 88 | /// Builds a [`CString`] copying from a raw C string pointer. 89 | /// 90 | /// # Safety 91 | /// 92 | /// - The memory pointed to by `ptr` must contain a valid nul terminator at the 93 | /// end of the string. 94 | /// - `ptr` must be valid for reads of bytes up to and including the nul terminator. 95 | /// This means in particular: 96 | /// - The entire memory range of this `CStr` must be contained within a single allocated object! 97 | /// - `ptr` must be non-nul even for a zero-length `CStr`. 98 | /// 99 | /// # Example 100 | /// 101 | /// ```rust 102 | /// use core::ffi::{c_char, CStr}; 103 | /// use heapless::CString; 104 | /// 105 | /// const HELLO_PTR: *const c_char = { 106 | /// const BYTES: &[u8] = b"Hello, world!\0"; 107 | /// BYTES.as_ptr().cast() 108 | /// }; 109 | /// 110 | /// let copied = unsafe { CString::<14>::from_raw(HELLO_PTR) }.unwrap(); 111 | /// 112 | /// assert_eq!(copied.to_str(), Ok("Hello, world!")); 113 | /// ``` 114 | pub unsafe fn from_raw(ptr: *const c_char) -> Result { 115 | // SAFETY: The given pointer to a string is assumed to be nul-terminated. 116 | Self::from_bytes_with_nul(unsafe { CStr::from_ptr(ptr).to_bytes_with_nul() }) 117 | } 118 | 119 | /// Converts the [`CString`] to a [`CStr`] slice. 120 | #[inline] 121 | pub fn as_c_str(&self) -> &CStr { 122 | unsafe { CStr::from_bytes_with_nul_unchecked(&self.inner) } 123 | } 124 | 125 | /// Calculates the length of `self.inner` would have if it appended `bytes`. 126 | fn capacity_with_bytes(&self, bytes: &[u8]) -> Option { 127 | match bytes.last() { 128 | None => None, 129 | Some(0) if bytes.len() < 2 => None, 130 | Some(0) => { 131 | // `bytes` is nul-terminated and so is `self.inner`. 132 | // Adding up both would account for 2 nul bytes when only a single byte 133 | // would end up in the resulting CString. 134 | Some(self.inner.len() + bytes.len() - 1) 135 | } 136 | Some(_) => { 137 | // No terminating nul byte in `bytes` but there's one in 138 | // `self.inner`, so the math lines up nicely. 139 | // 140 | // In the case that `bytes` has a nul byte anywhere else, we would 141 | // error after `memchr` is called. So there's no problem. 142 | Some(self.inner.len() + bytes.len()) 143 | } 144 | } 145 | } 146 | 147 | /// Extends the [`CString`] with the given bytes. 148 | /// 149 | /// This function fails if the [`CString`] would not have enough capacity to append the bytes or 150 | /// if the bytes contain an interior nul byte. 151 | /// 152 | /// # Example 153 | /// 154 | /// ```rust 155 | /// use heapless::CString; 156 | /// 157 | /// let mut c_string = CString::<10>::new(); 158 | /// 159 | /// c_string.extend_from_bytes(b"hey").unwrap(); 160 | /// c_string.extend_from_bytes(b" there\0").unwrap(); 161 | /// 162 | /// assert_eq!(c_string.to_str(), Ok("hey there")); 163 | /// ``` 164 | pub fn extend_from_bytes(&mut self, bytes: &[u8]) -> Result<(), ExtendError> { 165 | let Some(capacity) = self.capacity_with_bytes(bytes) else { 166 | return Ok(()); 167 | }; 168 | 169 | if capacity > N { 170 | // Cannot store these bytes due to an insufficient capacity. 171 | return Err(CapacityError.into()); 172 | } 173 | 174 | match CStr::from_bytes_with_nul(bytes) { 175 | Ok(_) => { 176 | // SAFETY: A string is left in a valid state because appended bytes are nul-terminated. 177 | unsafe { self.extend_from_bytes_unchecked(bytes) }?; 178 | 179 | Ok(()) 180 | } 181 | Err(FromBytesWithNulError::InteriorNul { position }) => { 182 | Err(ExtendError::InteriorNul { position }) 183 | } 184 | Err(FromBytesWithNulError::NotNulTerminated) => { 185 | // Because given bytes has no nul byte anywhere, we insert the bytes and 186 | // then add the nul byte terminator. 187 | // 188 | // We've ensured above that we have enough space left to insert these bytes, 189 | // so the operations below must succeed. 190 | // 191 | // SAFETY: We append a missing nul terminator right below. 192 | unsafe { 193 | self.extend_from_bytes_unchecked(bytes).unwrap(); 194 | self.inner.push_unchecked(0); 195 | }; 196 | 197 | Ok(()) 198 | } 199 | } 200 | } 201 | 202 | /// Removes the nul byte terminator from the inner buffer. 203 | /// 204 | /// # Safety 205 | /// 206 | /// Callers must ensure to add the nul terminator back after this function is called. 207 | #[inline] 208 | unsafe fn pop_terminator(&mut self) { 209 | debug_assert_eq!(self.inner.last(), Some(&0)); 210 | 211 | // SAFETY: We always have the nul terminator at the end. 212 | unsafe { self.inner.pop_unchecked() }; 213 | } 214 | 215 | /// Removes the existing nul terminator and then extends `self` with the given bytes. 216 | /// 217 | /// # Safety 218 | /// 219 | /// If `additional` is not nul-terminated, the [`CString`] is left non nul-terminated, which is 220 | /// an invalid state. Caller must ensure that either `additional` has a terminating nul byte 221 | /// or ensure to append a trailing nul terminator. 222 | unsafe fn extend_from_bytes_unchecked( 223 | &mut self, 224 | additional: &[u8], 225 | ) -> Result<(), CapacityError> { 226 | // SAFETY: A caller is responsible for adding a nul terminator back to the inner buffer. 227 | unsafe { self.pop_terminator() } 228 | 229 | self.inner.extend_from_slice(additional) 230 | } 231 | 232 | /// Returns the underlying byte slice including the trailing nul terminator. 233 | /// 234 | /// # Example 235 | /// 236 | /// ```rust 237 | /// use heapless::CString; 238 | /// 239 | /// let mut c_string = CString::<5>::new(); 240 | /// c_string.extend_from_bytes(b"abc").unwrap(); 241 | /// 242 | /// assert_eq!(c_string.as_bytes_with_nul(), b"abc\0"); 243 | /// ``` 244 | #[inline] 245 | pub fn as_bytes_with_nul(&self) -> &[u8] { 246 | &self.inner 247 | } 248 | 249 | /// Returns the underlying byte slice excluding the trailing nul terminator. 250 | /// 251 | /// # Example 252 | /// 253 | /// ```rust 254 | /// use heapless::CString; 255 | /// 256 | /// let mut c_string = CString::<5>::new(); 257 | /// c_string.extend_from_bytes(b"abc").unwrap(); 258 | /// 259 | /// assert_eq!(c_string.as_bytes(), b"abc"); 260 | /// ``` 261 | #[inline] 262 | pub fn as_bytes(&self) -> &[u8] { 263 | &self.inner[..self.inner.len() - 1] 264 | } 265 | } 266 | 267 | impl AsRef for CString { 268 | #[inline] 269 | fn as_ref(&self) -> &CStr { 270 | self.as_c_str() 271 | } 272 | } 273 | 274 | impl Borrow for CString { 275 | #[inline] 276 | fn borrow(&self) -> &CStr { 277 | self.as_c_str() 278 | } 279 | } 280 | 281 | impl Default for CString { 282 | #[inline] 283 | fn default() -> Self { 284 | Self::new() 285 | } 286 | } 287 | 288 | impl Deref for CString { 289 | type Target = CStr; 290 | 291 | #[inline] 292 | fn deref(&self) -> &Self::Target { 293 | self.as_c_str() 294 | } 295 | } 296 | 297 | impl PartialEq> 298 | for CString 299 | { 300 | #[inline] 301 | fn eq(&self, rhs: &CString) -> bool { 302 | self.as_c_str() == rhs.as_c_str() 303 | } 304 | } 305 | 306 | impl Eq for CString {} 307 | 308 | impl PartialOrd> 309 | for CString 310 | { 311 | #[inline] 312 | fn partial_cmp(&self, rhs: &CString) -> Option { 313 | self.as_c_str().partial_cmp(rhs.as_c_str()) 314 | } 315 | } 316 | 317 | impl Ord for CString { 318 | #[inline] 319 | fn cmp(&self, rhs: &Self) -> Ordering { 320 | self.as_c_str().cmp(rhs.as_c_str()) 321 | } 322 | } 323 | 324 | impl fmt::Debug for CString { 325 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 326 | self.as_c_str().fmt(f) 327 | } 328 | } 329 | 330 | /// An error to extend [`CString`] with bytes. 331 | #[derive(Debug)] 332 | pub enum ExtendError { 333 | /// The capacity of the [`CString`] is too small. 334 | Capacity(CapacityError), 335 | /// An invalid interior nul byte found in a given byte slice. 336 | InteriorNul { 337 | /// A position of a nul byte. 338 | position: usize, 339 | }, 340 | } 341 | 342 | impl Error for ExtendError {} 343 | 344 | impl fmt::Display for ExtendError { 345 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 346 | match self { 347 | Self::Capacity(error) => write!(f, "{error}"), 348 | Self::InteriorNul { position } => write!(f, "interior nul byte at {position}"), 349 | } 350 | } 351 | } 352 | 353 | impl From for ExtendError { 354 | fn from(error: CapacityError) -> Self { 355 | Self::Capacity(error) 356 | } 357 | } 358 | 359 | #[cfg(test)] 360 | mod tests { 361 | use super::*; 362 | 363 | #[test] 364 | fn empty() { 365 | let empty = CString::<1>::new(); 366 | 367 | assert_eq!(empty.as_c_str(), c""); 368 | assert_eq!(empty.as_bytes(), &[]); 369 | assert_eq!(empty.to_str(), Ok("")); 370 | } 371 | 372 | #[test] 373 | fn create_with_capacity_error() { 374 | assert!(CString::<1>::from_bytes_with_nul(b"a\0").is_err()); 375 | } 376 | 377 | #[test] 378 | fn extend_no_byte() { 379 | let mut c_string = CString::<1>::new(); 380 | 381 | c_string.extend_from_bytes(b"").unwrap(); 382 | } 383 | 384 | #[test] 385 | fn extend_from_bytes() { 386 | let mut c_string = CString::<11>::new(); 387 | assert_eq!(c_string.to_str(), Ok("")); 388 | 389 | c_string.extend_from_bytes(b"hello").unwrap(); 390 | 391 | assert_eq!(c_string.to_str(), Ok("hello")); 392 | 393 | // Call must fail since `w\0rld` contains an interior nul byte. 394 | assert!(matches!( 395 | c_string.extend_from_bytes(b"w\0rld"), 396 | Err(ExtendError::InteriorNul { position: 1 }) 397 | )); 398 | 399 | // However, the call above _must not_ have invalidated the state of our CString 400 | assert_eq!(c_string.to_str(), Ok("hello")); 401 | 402 | // Call must fail since we can't store "hello world\0" in 11 bytes 403 | assert!(matches!( 404 | c_string.extend_from_bytes(b" world"), 405 | Err(ExtendError::Capacity(CapacityError)) 406 | )); 407 | 408 | // Yet again, the call above must not have invalidated the state of our CString 409 | // (as it would e.g. if we pushed the bytes but then failed to push the nul terminator) 410 | assert_eq!(c_string.to_str(), Ok("hello")); 411 | 412 | c_string.extend_from_bytes(b" Bill").unwrap(); 413 | 414 | assert_eq!(c_string.to_str(), Ok("hello Bill")); 415 | } 416 | 417 | #[test] 418 | fn calculate_capacity_with_additional_bytes() { 419 | const INITIAL_BYTES: &[u8] = b"abc"; 420 | 421 | let mut c_string = CString::<5>::new(); 422 | 423 | c_string.extend_from_bytes(INITIAL_BYTES).unwrap(); 424 | 425 | assert_eq!(c_string.to_bytes_with_nul().len(), 4); 426 | assert_eq!(c_string.capacity_with_bytes(b""), None); 427 | assert_eq!(c_string.capacity_with_bytes(b"\0"), None); 428 | assert_eq!( 429 | c_string.capacity_with_bytes(b"d"), 430 | Some(INITIAL_BYTES.len() + 2) 431 | ); 432 | assert_eq!( 433 | c_string.capacity_with_bytes(b"d\0"), 434 | Some(INITIAL_BYTES.len() + 2) 435 | ); 436 | assert_eq!( 437 | c_string.capacity_with_bytes(b"defg"), 438 | Some(INITIAL_BYTES.len() + 5) 439 | ); 440 | assert_eq!( 441 | c_string.capacity_with_bytes(b"defg\0"), 442 | Some(INITIAL_BYTES.len() + 5) 443 | ); 444 | } 445 | #[test] 446 | fn default() { 447 | assert_eq!(CString::<1>::default().as_c_str(), c""); 448 | } 449 | 450 | #[test] 451 | fn deref() { 452 | assert_eq!(CString::<1>::new().deref(), c""); 453 | assert_eq!(CString::<2>::new().deref(), c""); 454 | assert_eq!(CString::<3>::new().deref(), c""); 455 | 456 | let mut string = CString::<2>::new(); 457 | string.extend_from_bytes(&[65]).unwrap(); 458 | 459 | assert_eq!(string.deref(), c"A"); 460 | 461 | let mut string = CString::<3>::new(); 462 | string.extend_from_bytes(&[65, 66]).unwrap(); 463 | 464 | assert_eq!(string.deref(), c"AB"); 465 | 466 | let mut string = CString::<4>::new(); 467 | string.extend_from_bytes(&[65, 66, 67]).unwrap(); 468 | 469 | assert_eq!(string.deref(), c"ABC"); 470 | } 471 | 472 | #[test] 473 | fn as_ref() { 474 | let mut string = CString::<4>::new(); 475 | string.extend_from_bytes(b"foo").unwrap(); 476 | assert_eq!(string.as_ref(), c"foo"); 477 | } 478 | 479 | #[test] 480 | fn borrow() { 481 | let mut string = CString::<4>::new(); 482 | string.extend_from_bytes(b"foo").unwrap(); 483 | assert_eq!(Borrow::::borrow(&string), c"foo"); 484 | } 485 | 486 | mod equality { 487 | use super::*; 488 | 489 | #[test] 490 | fn c_string() { 491 | // Empty strings 492 | assert!(CString::<1>::new() == CString::<1>::new()); 493 | assert!(CString::<1>::new() == CString::<2>::new()); 494 | assert!(CString::<1>::from_bytes_with_nul(b"\0").unwrap() == CString::<3>::new()); 495 | 496 | // Single character 497 | assert!( 498 | CString::<2>::from_bytes_with_nul(b"a\0").unwrap() 499 | == CString::<2>::from_bytes_with_nul(b"a\0").unwrap() 500 | ); 501 | assert!( 502 | CString::<2>::from_bytes_with_nul(b"a\0").unwrap() 503 | == CString::<3>::from_bytes_with_nul(b"a\0").unwrap() 504 | ); 505 | assert!( 506 | CString::<2>::from_bytes_with_nul(b"a\0").unwrap() 507 | != CString::<2>::from_bytes_with_nul(b"b\0").unwrap() 508 | ); 509 | 510 | // Multiple characters 511 | assert!( 512 | CString::<4>::from_bytes_with_nul(b"abc\0").unwrap() 513 | == CString::<4>::from_bytes_with_nul(b"abc\0").unwrap() 514 | ); 515 | assert!( 516 | CString::<3>::from_bytes_with_nul(b"ab\0").unwrap() 517 | != CString::<4>::from_bytes_with_nul(b"abc\0").unwrap() 518 | ); 519 | } 520 | } 521 | 522 | mod ordering { 523 | use super::*; 524 | 525 | #[test] 526 | fn c_string() { 527 | assert_eq!( 528 | CString::<1>::new().partial_cmp(&CString::<1>::new()), 529 | Some(Ordering::Equal) 530 | ); 531 | assert_eq!( 532 | CString::<2>::from_bytes_with_nul(b"a\0") 533 | .unwrap() 534 | .partial_cmp(&CString::<2>::from_bytes_with_nul(b"b\0").unwrap()), 535 | Some(Ordering::Less) 536 | ); 537 | assert_eq!( 538 | CString::<2>::from_bytes_with_nul(b"b\0") 539 | .unwrap() 540 | .partial_cmp(&CString::<2>::from_bytes_with_nul(b"a\0").unwrap()), 541 | Some(Ordering::Greater) 542 | ); 543 | } 544 | 545 | #[test] 546 | fn c_str() { 547 | assert_eq!(c"".partial_cmp(&CString::<1>::new()), Some(Ordering::Equal)); 548 | assert_eq!( 549 | c"a".partial_cmp(&CString::<2>::from_bytes_with_nul(b"b\0").unwrap()), 550 | Some(Ordering::Less) 551 | ); 552 | assert_eq!( 553 | c"b".partial_cmp(&CString::<2>::from_bytes_with_nul(b"a\0").unwrap()), 554 | Some(Ordering::Greater) 555 | ); 556 | } 557 | } 558 | } 559 | -------------------------------------------------------------------------------- /src/de.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | binary_heap::Kind as BinaryHeapKind, len_type::LenType, BinaryHeap, Deque, HistoryBuf, 3 | IndexMap, IndexSet, LinearMap, String, Vec, 4 | }; 5 | use core::{ 6 | fmt, 7 | hash::{Hash, Hasher}, 8 | marker::PhantomData, 9 | }; 10 | use hash32::BuildHasherDefault; 11 | use serde::de::{self, Deserialize, Deserializer, Error, MapAccess, SeqAccess}; 12 | 13 | // Sequential containers 14 | 15 | impl<'de, T, KIND, const N: usize> Deserialize<'de> for BinaryHeap 16 | where 17 | T: Ord + Deserialize<'de>, 18 | 19 | KIND: BinaryHeapKind, 20 | { 21 | fn deserialize(deserializer: D) -> Result 22 | where 23 | D: Deserializer<'de>, 24 | { 25 | struct ValueVisitor<'de, T, KIND, const N: usize>(PhantomData<(&'de (), T, KIND)>); 26 | 27 | impl<'de, T, KIND, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, KIND, N> 28 | where 29 | T: Ord + Deserialize<'de>, 30 | KIND: BinaryHeapKind, 31 | { 32 | type Value = BinaryHeap; 33 | 34 | fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { 35 | formatter.write_str("a sequence") 36 | } 37 | 38 | fn visit_seq(self, mut seq: A) -> Result 39 | where 40 | A: SeqAccess<'de>, 41 | { 42 | let mut values = BinaryHeap::new(); 43 | 44 | while let Some(value) = seq.next_element()? { 45 | if values.push(value).is_err() { 46 | return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; 47 | } 48 | } 49 | 50 | Ok(values) 51 | } 52 | } 53 | deserializer.deserialize_seq(ValueVisitor(PhantomData)) 54 | } 55 | } 56 | 57 | impl<'de, T, S, const N: usize> Deserialize<'de> for IndexSet, N> 58 | where 59 | T: Eq + Hash + Deserialize<'de>, 60 | S: Hasher + Default, 61 | { 62 | fn deserialize(deserializer: D) -> Result 63 | where 64 | D: Deserializer<'de>, 65 | { 66 | struct ValueVisitor<'de, T, S, const N: usize>(PhantomData<(&'de (), T, S)>); 67 | 68 | impl<'de, T, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, S, N> 69 | where 70 | T: Eq + Hash + Deserialize<'de>, 71 | S: Hasher + Default, 72 | { 73 | type Value = IndexSet, N>; 74 | 75 | fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { 76 | formatter.write_str("a sequence") 77 | } 78 | 79 | fn visit_seq(self, mut seq: A) -> Result 80 | where 81 | A: SeqAccess<'de>, 82 | { 83 | let mut values = IndexSet::new(); 84 | 85 | while let Some(value) = seq.next_element()? { 86 | if values.insert(value).is_err() { 87 | return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; 88 | } 89 | } 90 | 91 | Ok(values) 92 | } 93 | } 94 | deserializer.deserialize_seq(ValueVisitor(PhantomData)) 95 | } 96 | } 97 | 98 | impl<'de, T, LenT: LenType, const N: usize> Deserialize<'de> for Vec 99 | where 100 | T: Deserialize<'de>, 101 | { 102 | fn deserialize(deserializer: D) -> Result 103 | where 104 | D: Deserializer<'de>, 105 | { 106 | struct ValueVisitor<'de, T, LenT: LenType, const N: usize>(PhantomData<(&'de (), T, LenT)>); 107 | 108 | impl<'de, T, LenT, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, LenT, N> 109 | where 110 | T: Deserialize<'de>, 111 | LenT: LenType, 112 | { 113 | type Value = Vec; 114 | 115 | fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { 116 | formatter.write_str("a sequence") 117 | } 118 | 119 | fn visit_seq(self, mut seq: A) -> Result 120 | where 121 | A: SeqAccess<'de>, 122 | { 123 | let mut values = Vec::new(); 124 | 125 | while let Some(value) = seq.next_element()? { 126 | if values.push(value).is_err() { 127 | return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; 128 | } 129 | } 130 | 131 | Ok(values) 132 | } 133 | } 134 | deserializer.deserialize_seq(ValueVisitor(PhantomData)) 135 | } 136 | } 137 | 138 | impl<'de, T, const N: usize> Deserialize<'de> for Deque 139 | where 140 | T: Deserialize<'de>, 141 | { 142 | fn deserialize(deserializer: D) -> Result 143 | where 144 | D: Deserializer<'de>, 145 | { 146 | struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>); 147 | 148 | impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N> 149 | where 150 | T: Deserialize<'de>, 151 | { 152 | type Value = Deque; 153 | 154 | fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { 155 | formatter.write_str("a sequence") 156 | } 157 | 158 | fn visit_seq(self, mut seq: A) -> Result 159 | where 160 | A: SeqAccess<'de>, 161 | { 162 | let mut values = Deque::new(); 163 | 164 | while let Some(value) = seq.next_element()? { 165 | if values.push_back(value).is_err() { 166 | return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; 167 | } 168 | } 169 | 170 | Ok(values) 171 | } 172 | } 173 | deserializer.deserialize_seq(ValueVisitor(PhantomData)) 174 | } 175 | } 176 | 177 | impl<'de, T, const N: usize> Deserialize<'de> for HistoryBuf 178 | where 179 | T: Deserialize<'de>, 180 | { 181 | fn deserialize(deserializer: D) -> Result 182 | where 183 | D: Deserializer<'de>, 184 | { 185 | struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>); 186 | 187 | impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N> 188 | where 189 | T: Deserialize<'de>, 190 | { 191 | type Value = HistoryBuf; 192 | 193 | fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { 194 | formatter.write_str("a sequence") 195 | } 196 | 197 | fn visit_seq(self, mut seq: A) -> Result 198 | where 199 | A: SeqAccess<'de>, 200 | { 201 | let mut values = HistoryBuf::new(); 202 | 203 | while let Some(value) = seq.next_element()? { 204 | values.write(value); 205 | } 206 | 207 | Ok(values) 208 | } 209 | } 210 | deserializer.deserialize_seq(ValueVisitor(PhantomData)) 211 | } 212 | } 213 | 214 | // Dictionaries 215 | 216 | impl<'de, K, V, S, const N: usize> Deserialize<'de> for IndexMap, N> 217 | where 218 | K: Eq + Hash + Deserialize<'de>, 219 | V: Deserialize<'de>, 220 | S: Default + Hasher, 221 | { 222 | fn deserialize(deserializer: D) -> Result 223 | where 224 | D: Deserializer<'de>, 225 | { 226 | struct ValueVisitor<'de, K, V, S, const N: usize>(PhantomData<(&'de (), K, V, S)>); 227 | 228 | impl<'de, K, V, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, S, N> 229 | where 230 | K: Eq + Hash + Deserialize<'de>, 231 | V: Deserialize<'de>, 232 | S: Default + Hasher, 233 | { 234 | type Value = IndexMap, N>; 235 | 236 | fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { 237 | formatter.write_str("a map") 238 | } 239 | 240 | fn visit_map(self, mut map: A) -> Result 241 | where 242 | A: MapAccess<'de>, 243 | { 244 | let mut values = IndexMap::new(); 245 | 246 | while let Some((key, value)) = map.next_entry()? { 247 | if values.insert(key, value).is_err() { 248 | return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; 249 | } 250 | } 251 | 252 | Ok(values) 253 | } 254 | } 255 | deserializer.deserialize_map(ValueVisitor(PhantomData)) 256 | } 257 | } 258 | 259 | impl<'de, K, V, const N: usize> Deserialize<'de> for LinearMap 260 | where 261 | K: Eq + Deserialize<'de>, 262 | V: Deserialize<'de>, 263 | { 264 | fn deserialize(deserializer: D) -> Result 265 | where 266 | D: Deserializer<'de>, 267 | { 268 | struct ValueVisitor<'de, K, V, const N: usize>(PhantomData<(&'de (), K, V)>); 269 | 270 | impl<'de, K, V, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, N> 271 | where 272 | K: Eq + Deserialize<'de>, 273 | V: Deserialize<'de>, 274 | { 275 | type Value = LinearMap; 276 | 277 | fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { 278 | formatter.write_str("a map") 279 | } 280 | 281 | fn visit_map(self, mut map: A) -> Result 282 | where 283 | A: MapAccess<'de>, 284 | { 285 | let mut values = LinearMap::new(); 286 | 287 | while let Some((key, value)) = map.next_entry()? { 288 | if values.insert(key, value).is_err() { 289 | return Err(A::Error::invalid_length(values.capacity() + 1, &self))?; 290 | } 291 | } 292 | 293 | Ok(values) 294 | } 295 | } 296 | deserializer.deserialize_map(ValueVisitor(PhantomData)) 297 | } 298 | } 299 | 300 | // String containers 301 | 302 | impl<'de, LenT: LenType, const N: usize> Deserialize<'de> for String { 303 | fn deserialize(deserializer: D) -> Result 304 | where 305 | D: Deserializer<'de>, 306 | { 307 | struct ValueVisitor<'de, LenT: LenType, const N: usize>(PhantomData<(&'de (), LenT)>); 308 | 309 | impl<'de, LenT: LenType, const N: usize> de::Visitor<'de> for ValueVisitor<'de, LenT, N> { 310 | type Value = String; 311 | 312 | fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { 313 | write!(formatter, "a string no more than {} bytes long", N as u64) 314 | } 315 | 316 | fn visit_str(self, v: &str) -> Result 317 | where 318 | E: de::Error, 319 | { 320 | let mut s = String::new(); 321 | s.push_str(v) 322 | .map_err(|_| E::invalid_length(v.len(), &self))?; 323 | Ok(s) 324 | } 325 | 326 | fn visit_bytes(self, v: &[u8]) -> Result 327 | where 328 | E: de::Error, 329 | { 330 | let mut s = String::new(); 331 | 332 | s.push_str( 333 | core::str::from_utf8(v) 334 | .map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))?, 335 | ) 336 | .map_err(|_| E::invalid_length(v.len(), &self))?; 337 | 338 | Ok(s) 339 | } 340 | } 341 | 342 | deserializer.deserialize_str(ValueVisitor(PhantomData)) 343 | } 344 | } 345 | -------------------------------------------------------------------------------- /src/defmt.rs: -------------------------------------------------------------------------------- 1 | //! Defmt implementations for heapless types 2 | 3 | use crate::{ 4 | len_type::LenType, 5 | string::{StringInner, StringStorage}, 6 | vec::{VecInner, VecStorage}, 7 | }; 8 | use defmt::Formatter; 9 | 10 | impl + ?Sized> defmt::Format for VecInner 11 | where 12 | T: defmt::Format, 13 | { 14 | fn format(&self, fmt: Formatter<'_>) { 15 | defmt::write!(fmt, "{=[?]}", self.as_slice()); 16 | } 17 | } 18 | 19 | impl defmt::Format for StringInner { 20 | fn format(&self, fmt: Formatter<'_>) { 21 | defmt::write!(fmt, "{=str}", self.as_str()); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/index_set.rs: -------------------------------------------------------------------------------- 1 | //! A fixed-capacity hash set where the iteration order is independent of the hash values. 2 | use core::{ 3 | borrow::Borrow, 4 | fmt, 5 | hash::{BuildHasher, Hash}, 6 | }; 7 | 8 | use hash32::{BuildHasherDefault, FnvHasher}; 9 | 10 | use crate::index_map::{self, IndexMap}; 11 | 12 | /// An [`IndexSet`] using the default FNV hasher. 13 | /// 14 | /// A list of all Methods and Traits available for `FnvIndexSet` can be found in 15 | /// the [`IndexSet`] documentation. 16 | /// 17 | /// # Examples 18 | /// ``` 19 | /// use heapless::index_set::FnvIndexSet; 20 | /// 21 | /// // A hash set with a capacity of 16 elements allocated on the stack 22 | /// let mut books = FnvIndexSet::<_, 16>::new(); 23 | /// 24 | /// // Add some books. 25 | /// books.insert("A Dance With Dragons").unwrap(); 26 | /// books.insert("To Kill a Mockingbird").unwrap(); 27 | /// books.insert("The Odyssey").unwrap(); 28 | /// books.insert("The Great Gatsby").unwrap(); 29 | /// 30 | /// // Check for a specific one. 31 | /// if !books.contains("The Winds of Winter") { 32 | /// println!( 33 | /// "We have {} books, but The Winds of Winter ain't one.", 34 | /// books.len() 35 | /// ); 36 | /// } 37 | /// 38 | /// // Remove a book. 39 | /// books.remove("The Odyssey"); 40 | /// 41 | /// // Iterate over everything. 42 | /// for book in &books { 43 | /// println!("{}", book); 44 | /// } 45 | /// ``` 46 | pub type FnvIndexSet = IndexSet, N>; 47 | 48 | /// Fixed capacity [`IndexSet`](https://docs.rs/indexmap/2/indexmap/set/struct.IndexSet.html). 49 | /// 50 | /// Note that you cannot use `IndexSet` directly, since it is generic around the hashing algorithm 51 | /// in use. Pick a concrete instantiation like [`FnvIndexSet`] instead 52 | /// or create your own. 53 | /// 54 | /// Note that the capacity of the `IndexSet` must be a power of 2. 55 | /// 56 | /// # Examples 57 | /// Since `IndexSet` cannot be used directly, we're using its `FnvIndexSet` instantiation 58 | /// for this example. 59 | /// 60 | /// ``` 61 | /// use heapless::index_set::FnvIndexSet; 62 | /// 63 | /// // A hash set with a capacity of 16 elements allocated on the stack 64 | /// let mut books = FnvIndexSet::<_, 16>::new(); 65 | /// 66 | /// // Add some books. 67 | /// books.insert("A Dance With Dragons").unwrap(); 68 | /// books.insert("To Kill a Mockingbird").unwrap(); 69 | /// books.insert("The Odyssey").unwrap(); 70 | /// books.insert("The Great Gatsby").unwrap(); 71 | /// 72 | /// // Check for a specific one. 73 | /// if !books.contains("The Winds of Winter") { 74 | /// println!( 75 | /// "We have {} books, but The Winds of Winter ain't one.", 76 | /// books.len() 77 | /// ); 78 | /// } 79 | /// 80 | /// // Remove a book. 81 | /// books.remove("The Odyssey"); 82 | /// 83 | /// // Iterate over everything. 84 | /// for book in &books { 85 | /// println!("{}", book); 86 | /// } 87 | /// ``` 88 | pub struct IndexSet { 89 | map: IndexMap, 90 | } 91 | 92 | impl IndexSet, N> { 93 | /// Creates an empty `IndexSet` 94 | pub const fn new() -> Self { 95 | Self { 96 | map: IndexMap::new(), 97 | } 98 | } 99 | } 100 | 101 | impl IndexSet { 102 | /// Returns the number of elements the set can hold 103 | /// 104 | /// # Examples 105 | /// 106 | /// ``` 107 | /// use heapless::index_set::FnvIndexSet; 108 | /// 109 | /// let set = FnvIndexSet::::new(); 110 | /// assert_eq!(set.capacity(), 16); 111 | /// ``` 112 | pub fn capacity(&self) -> usize { 113 | self.map.capacity() 114 | } 115 | 116 | /// Return an iterator over the values of the set, in insertion order 117 | /// 118 | /// # Examples 119 | /// 120 | /// ``` 121 | /// use heapless::index_set::FnvIndexSet; 122 | /// 123 | /// let mut set = FnvIndexSet::<_, 16>::new(); 124 | /// set.insert("a").unwrap(); 125 | /// set.insert("b").unwrap(); 126 | /// 127 | /// // Will print in insertion order: a, b 128 | /// for x in set.iter() { 129 | /// println!("{}", x); 130 | /// } 131 | /// ``` 132 | pub fn iter(&self) -> Iter<'_, T> { 133 | Iter { 134 | iter: self.map.iter(), 135 | } 136 | } 137 | 138 | /// Get the first value 139 | /// 140 | /// Computes in *O*(1) time 141 | pub fn first(&self) -> Option<&T> { 142 | self.map.first().map(|(k, _v)| k) 143 | } 144 | 145 | /// Get the last value 146 | /// 147 | /// Computes in *O*(1) time 148 | pub fn last(&self) -> Option<&T> { 149 | self.map.last().map(|(k, _v)| k) 150 | } 151 | 152 | /// Returns the number of elements in the set. 153 | /// 154 | /// # Examples 155 | /// 156 | /// ``` 157 | /// use heapless::index_set::FnvIndexSet; 158 | /// 159 | /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); 160 | /// assert_eq!(v.len(), 0); 161 | /// v.insert(1).unwrap(); 162 | /// assert_eq!(v.len(), 1); 163 | /// ``` 164 | pub fn len(&self) -> usize { 165 | self.map.len() 166 | } 167 | 168 | /// Returns `true` if the set contains no elements. 169 | /// 170 | /// # Examples 171 | /// 172 | /// ``` 173 | /// use heapless::index_set::FnvIndexSet; 174 | /// 175 | /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); 176 | /// assert!(v.is_empty()); 177 | /// v.insert(1).unwrap(); 178 | /// assert!(!v.is_empty()); 179 | /// ``` 180 | pub fn is_empty(&self) -> bool { 181 | self.map.is_empty() 182 | } 183 | 184 | /// Returns `true` if the set is full. 185 | /// 186 | /// # Examples 187 | /// 188 | /// ``` 189 | /// use heapless::index_set::FnvIndexSet; 190 | /// 191 | /// let mut v: FnvIndexSet<_, 4> = FnvIndexSet::new(); 192 | /// assert!(!v.is_full()); 193 | /// v.insert(1).unwrap(); 194 | /// v.insert(2).unwrap(); 195 | /// v.insert(3).unwrap(); 196 | /// v.insert(4).unwrap(); 197 | /// assert!(v.is_full()); 198 | /// ``` 199 | pub fn is_full(&self) -> bool { 200 | self.map.is_full() 201 | } 202 | 203 | /// Clears the set, removing all values. 204 | /// 205 | /// # Examples 206 | /// 207 | /// ``` 208 | /// use heapless::index_set::FnvIndexSet; 209 | /// 210 | /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); 211 | /// v.insert(1).unwrap(); 212 | /// v.clear(); 213 | /// assert!(v.is_empty()); 214 | /// ``` 215 | pub fn clear(&mut self) { 216 | self.map.clear(); 217 | } 218 | } 219 | 220 | impl IndexSet 221 | where 222 | T: Eq + Hash, 223 | S: BuildHasher, 224 | { 225 | /// Visits the values representing the difference, i.e. the values that are in `self` but not in 226 | /// `other`. 227 | /// 228 | /// # Examples 229 | /// 230 | /// ``` 231 | /// use heapless::index_set::FnvIndexSet; 232 | /// 233 | /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); 234 | /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); 235 | /// 236 | /// // Can be seen as `a - b`. 237 | /// for x in a.difference(&b) { 238 | /// println!("{}", x); // Print 1 239 | /// } 240 | /// 241 | /// let diff: FnvIndexSet<_, 16> = a.difference(&b).collect(); 242 | /// assert_eq!(diff, [1].iter().collect::>()); 243 | /// 244 | /// // Note that difference is not symmetric, 245 | /// // and `b - a` means something else: 246 | /// let diff: FnvIndexSet<_, 16> = b.difference(&a).collect(); 247 | /// assert_eq!(diff, [4].iter().collect::>()); 248 | /// ``` 249 | pub fn difference<'a, S2, const N2: usize>( 250 | &'a self, 251 | other: &'a IndexSet, 252 | ) -> Difference<'a, T, S2, N2> 253 | where 254 | S2: BuildHasher, 255 | { 256 | Difference { 257 | iter: self.iter(), 258 | other, 259 | } 260 | } 261 | 262 | /// Visits the values representing the symmetric difference, i.e. the values that are in `self` 263 | /// or in `other` but not in both. 264 | /// 265 | /// # Examples 266 | /// 267 | /// ``` 268 | /// use heapless::index_set::FnvIndexSet; 269 | /// 270 | /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); 271 | /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); 272 | /// 273 | /// // Print 1, 4 in that order. 274 | /// for x in a.symmetric_difference(&b) { 275 | /// println!("{}", x); 276 | /// } 277 | /// 278 | /// let diff1: FnvIndexSet<_, 16> = a.symmetric_difference(&b).collect(); 279 | /// let diff2: FnvIndexSet<_, 16> = b.symmetric_difference(&a).collect(); 280 | /// 281 | /// assert_eq!(diff1, diff2); 282 | /// assert_eq!(diff1, [1, 4].iter().collect::>()); 283 | /// ``` 284 | pub fn symmetric_difference<'a, S2, const N2: usize>( 285 | &'a self, 286 | other: &'a IndexSet, 287 | ) -> impl Iterator 288 | where 289 | S2: BuildHasher, 290 | { 291 | self.difference(other).chain(other.difference(self)) 292 | } 293 | 294 | /// Visits the values representing the intersection, i.e. the values that are both in `self` and 295 | /// `other`. 296 | /// 297 | /// # Examples 298 | /// 299 | /// ``` 300 | /// use heapless::index_set::FnvIndexSet; 301 | /// 302 | /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); 303 | /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); 304 | /// 305 | /// // Print 2, 3 in that order. 306 | /// for x in a.intersection(&b) { 307 | /// println!("{}", x); 308 | /// } 309 | /// 310 | /// let intersection: FnvIndexSet<_, 16> = a.intersection(&b).collect(); 311 | /// assert_eq!(intersection, [2, 3].iter().collect::>()); 312 | /// ``` 313 | pub fn intersection<'a, S2, const N2: usize>( 314 | &'a self, 315 | other: &'a IndexSet, 316 | ) -> Intersection<'a, T, S2, N2> 317 | where 318 | S2: BuildHasher, 319 | { 320 | Intersection { 321 | iter: self.iter(), 322 | other, 323 | } 324 | } 325 | 326 | /// Visits the values representing the union, i.e. all the values in `self` or `other`, without 327 | /// duplicates. 328 | /// 329 | /// # Examples 330 | /// 331 | /// ``` 332 | /// use heapless::index_set::FnvIndexSet; 333 | /// 334 | /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); 335 | /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); 336 | /// 337 | /// // Print 1, 2, 3, 4 in that order. 338 | /// for x in a.union(&b) { 339 | /// println!("{}", x); 340 | /// } 341 | /// 342 | /// let union: FnvIndexSet<_, 16> = a.union(&b).collect(); 343 | /// assert_eq!(union, [1, 2, 3, 4].iter().collect::>()); 344 | /// ``` 345 | pub fn union<'a, S2, const N2: usize>( 346 | &'a self, 347 | other: &'a IndexSet, 348 | ) -> impl Iterator 349 | where 350 | S2: BuildHasher, 351 | { 352 | self.iter().chain(other.difference(self)) 353 | } 354 | 355 | /// Returns `true` if the set contains a value. 356 | /// 357 | /// The value may be any borrowed form of the set's value type, but `Hash` and `Eq` on the 358 | /// borrowed form must match those for the value type. 359 | /// 360 | /// # Examples 361 | /// 362 | /// ``` 363 | /// use heapless::index_set::FnvIndexSet; 364 | /// 365 | /// let set: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); 366 | /// assert_eq!(set.contains(&1), true); 367 | /// assert_eq!(set.contains(&4), false); 368 | /// ``` 369 | pub fn contains(&self, value: &Q) -> bool 370 | where 371 | T: Borrow, 372 | Q: ?Sized + Eq + Hash, 373 | { 374 | self.map.contains_key(value) 375 | } 376 | 377 | /// Returns `true` if `self` has no elements in common with `other`. This is equivalent to 378 | /// checking for an empty intersection. 379 | /// 380 | /// # Examples 381 | /// 382 | /// ``` 383 | /// use heapless::index_set::FnvIndexSet; 384 | /// 385 | /// let a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); 386 | /// let mut b = FnvIndexSet::<_, 16>::new(); 387 | /// 388 | /// assert_eq!(a.is_disjoint(&b), true); 389 | /// b.insert(4).unwrap(); 390 | /// assert_eq!(a.is_disjoint(&b), true); 391 | /// b.insert(1).unwrap(); 392 | /// assert_eq!(a.is_disjoint(&b), false); 393 | /// ``` 394 | pub fn is_disjoint(&self, other: &IndexSet) -> bool 395 | where 396 | S2: BuildHasher, 397 | { 398 | self.iter().all(|v| !other.contains(v)) 399 | } 400 | 401 | /// Returns `true` if the set is a subset of another, i.e. `other` contains at least all the 402 | /// values in `self`. 403 | /// 404 | /// # Examples 405 | /// 406 | /// ``` 407 | /// use heapless::index_set::FnvIndexSet; 408 | /// 409 | /// let sup: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); 410 | /// let mut set = FnvIndexSet::<_, 16>::new(); 411 | /// 412 | /// assert_eq!(set.is_subset(&sup), true); 413 | /// set.insert(2).unwrap(); 414 | /// assert_eq!(set.is_subset(&sup), true); 415 | /// set.insert(4).unwrap(); 416 | /// assert_eq!(set.is_subset(&sup), false); 417 | /// ``` 418 | pub fn is_subset(&self, other: &IndexSet) -> bool 419 | where 420 | S2: BuildHasher, 421 | { 422 | self.iter().all(|v| other.contains(v)) 423 | } 424 | 425 | // Returns `true` if the set is a superset of another, i.e. `self` contains at least all the 426 | // values in `other`. 427 | /// 428 | /// # Examples 429 | /// 430 | /// ``` 431 | /// use heapless::index_set::FnvIndexSet; 432 | /// 433 | /// let sub: FnvIndexSet<_, 16> = [1, 2].iter().cloned().collect(); 434 | /// let mut set = FnvIndexSet::<_, 16>::new(); 435 | /// 436 | /// assert_eq!(set.is_superset(&sub), false); 437 | /// 438 | /// set.insert(0).unwrap(); 439 | /// set.insert(1).unwrap(); 440 | /// assert_eq!(set.is_superset(&sub), false); 441 | /// 442 | /// set.insert(2).unwrap(); 443 | /// assert_eq!(set.is_superset(&sub), true); 444 | /// ``` 445 | pub fn is_superset(&self, other: &IndexSet) -> bool 446 | where 447 | S2: BuildHasher, 448 | { 449 | other.is_subset(self) 450 | } 451 | 452 | /// Adds a value to the set. 453 | /// 454 | /// If the set did not have this value present, `true` is returned. 455 | /// 456 | /// If the set did have this value present, `false` is returned. 457 | /// 458 | /// # Examples 459 | /// 460 | /// ``` 461 | /// use heapless::index_set::FnvIndexSet; 462 | /// 463 | /// let mut set = FnvIndexSet::<_, 16>::new(); 464 | /// 465 | /// assert_eq!(set.insert(2).unwrap(), true); 466 | /// assert_eq!(set.insert(2).unwrap(), false); 467 | /// assert_eq!(set.len(), 1); 468 | /// ``` 469 | pub fn insert(&mut self, value: T) -> Result { 470 | self.map 471 | .insert(value, ()) 472 | .map(|old| old.is_none()) 473 | .map_err(|(k, _)| k) 474 | } 475 | 476 | /// Removes a value from the set. Returns `true` if the value was present in the set. 477 | /// 478 | /// The value may be any borrowed form of the set's value type, but `Hash` and `Eq` on the 479 | /// borrowed form must match those for the value type. 480 | /// 481 | /// # Examples 482 | /// 483 | /// ``` 484 | /// use heapless::index_set::FnvIndexSet; 485 | /// 486 | /// let mut set = FnvIndexSet::<_, 16>::new(); 487 | /// 488 | /// set.insert(2).unwrap(); 489 | /// assert_eq!(set.remove(&2), true); 490 | /// assert_eq!(set.remove(&2), false); 491 | /// ``` 492 | pub fn remove(&mut self, value: &Q) -> bool 493 | where 494 | T: Borrow, 495 | Q: ?Sized + Eq + Hash, 496 | { 497 | self.map.remove(value).is_some() 498 | } 499 | 500 | /// Retains only the elements specified by the predicate. 501 | /// 502 | /// In other words, remove all elements `e` for which `f(&e)` returns `false`. 503 | pub fn retain(&mut self, mut f: F) 504 | where 505 | F: FnMut(&T) -> bool, 506 | { 507 | self.map.retain(move |k, _| f(k)); 508 | } 509 | } 510 | 511 | impl Clone for IndexSet 512 | where 513 | T: Clone, 514 | S: Clone, 515 | { 516 | fn clone(&self) -> Self { 517 | Self { 518 | map: self.map.clone(), 519 | } 520 | } 521 | } 522 | 523 | impl fmt::Debug for IndexSet 524 | where 525 | T: fmt::Debug, 526 | { 527 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 528 | f.debug_set().entries(self.iter()).finish() 529 | } 530 | } 531 | 532 | impl Default for IndexSet 533 | where 534 | S: Default, 535 | { 536 | fn default() -> Self { 537 | Self { 538 | map: <_>::default(), 539 | } 540 | } 541 | } 542 | 543 | impl PartialEq> 544 | for IndexSet 545 | where 546 | T: Eq + Hash, 547 | S1: BuildHasher, 548 | S2: BuildHasher, 549 | { 550 | fn eq(&self, other: &IndexSet) -> bool { 551 | self.len() == other.len() && self.is_subset(other) 552 | } 553 | } 554 | 555 | impl Extend for IndexSet 556 | where 557 | T: Eq + Hash, 558 | S: BuildHasher, 559 | { 560 | fn extend(&mut self, iterable: I) 561 | where 562 | I: IntoIterator, 563 | { 564 | self.map.extend(iterable.into_iter().map(|k| (k, ()))); 565 | } 566 | } 567 | 568 | impl<'a, T, S, const N: usize> Extend<&'a T> for IndexSet 569 | where 570 | T: 'a + Eq + Hash + Copy, 571 | S: BuildHasher, 572 | { 573 | fn extend(&mut self, iterable: I) 574 | where 575 | I: IntoIterator, 576 | { 577 | self.extend(iterable.into_iter().cloned()); 578 | } 579 | } 580 | 581 | impl FromIterator for IndexSet 582 | where 583 | T: Eq + Hash, 584 | S: BuildHasher + Default, 585 | { 586 | fn from_iter(iter: I) -> Self 587 | where 588 | I: IntoIterator, 589 | { 590 | let mut set = Self::default(); 591 | set.extend(iter); 592 | set 593 | } 594 | } 595 | 596 | impl<'a, T, S, const N: usize> IntoIterator for &'a IndexSet 597 | where 598 | T: Eq + Hash, 599 | S: BuildHasher, 600 | { 601 | type Item = &'a T; 602 | type IntoIter = Iter<'a, T>; 603 | 604 | fn into_iter(self) -> Self::IntoIter { 605 | self.iter() 606 | } 607 | } 608 | 609 | /// An iterator over the items of a [`IndexSet`]. 610 | /// 611 | /// This `struct` is created by the [`iter`](IndexSet::iter) method on [`IndexSet`]. See its 612 | /// documentation for more. 613 | pub struct Iter<'a, T> { 614 | iter: index_map::Iter<'a, T, ()>, 615 | } 616 | 617 | impl<'a, T> Iterator for Iter<'a, T> { 618 | type Item = &'a T; 619 | 620 | fn next(&mut self) -> Option { 621 | self.iter.next().map(|(k, _)| k) 622 | } 623 | } 624 | 625 | impl Clone for Iter<'_, T> { 626 | fn clone(&self) -> Self { 627 | Self { 628 | iter: self.iter.clone(), 629 | } 630 | } 631 | } 632 | 633 | /// An iterator over the difference of two `IndexSet`s. 634 | /// 635 | /// This is created by the [`IndexSet::difference`] method. 636 | pub struct Difference<'a, T, S, const N: usize> 637 | where 638 | S: BuildHasher, 639 | T: Eq + Hash, 640 | { 641 | iter: Iter<'a, T>, 642 | other: &'a IndexSet, 643 | } 644 | 645 | impl<'a, T, S, const N: usize> Iterator for Difference<'a, T, S, N> 646 | where 647 | S: BuildHasher, 648 | T: Eq + Hash, 649 | { 650 | type Item = &'a T; 651 | 652 | fn next(&mut self) -> Option { 653 | loop { 654 | let elt = self.iter.next()?; 655 | if !self.other.contains(elt) { 656 | return Some(elt); 657 | } 658 | } 659 | } 660 | } 661 | 662 | /// An iterator over the intersection of two `IndexSet`s. 663 | /// 664 | /// This is created by the [`IndexSet::intersection`] method. 665 | pub struct Intersection<'a, T, S, const N: usize> 666 | where 667 | S: BuildHasher, 668 | T: Eq + Hash, 669 | { 670 | iter: Iter<'a, T>, 671 | other: &'a IndexSet, 672 | } 673 | 674 | impl<'a, T, S, const N: usize> Iterator for Intersection<'a, T, S, N> 675 | where 676 | S: BuildHasher, 677 | T: Eq + Hash, 678 | { 679 | type Item = &'a T; 680 | 681 | fn next(&mut self) -> Option { 682 | loop { 683 | let elt = self.iter.next()?; 684 | if self.other.contains(elt) { 685 | return Some(elt); 686 | } 687 | } 688 | } 689 | } 690 | 691 | #[cfg(test)] 692 | mod tests { 693 | use static_assertions::assert_not_impl_any; 694 | 695 | use super::{BuildHasherDefault, IndexSet}; 696 | 697 | // Ensure a `IndexSet` containing `!Send` values stays `!Send` itself. 698 | assert_not_impl_any!(IndexSet<*const (), BuildHasherDefault<()>, 4>: Send); 699 | } 700 | -------------------------------------------------------------------------------- /src/len_type.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | fmt::{Debug, Display}, 3 | ops::{Add, AddAssign, Sub, SubAssign}, 4 | }; 5 | 6 | pub trait Sealed: 7 | Send 8 | + Sync 9 | + Copy 10 | + Display 11 | + Debug 12 | + PartialEq 13 | + Add 14 | + AddAssign 15 | + Sub 16 | + SubAssign 17 | + PartialOrd 18 | + TryFrom 19 | + TryInto 20 | { 21 | /// The zero value of the integer type. 22 | const ZERO: Self; 23 | /// The one value of the integer type. 24 | const MAX: Self; 25 | /// The maximum value of this type, as a `usize`. 26 | const MAX_USIZE: usize; 27 | 28 | /// The one value of the integer type. 29 | /// 30 | /// It's a function instead of constant because we want to have implementation which panics for 31 | /// type `ZeroLenType` 32 | fn one() -> Self; 33 | 34 | /// An infallible conversion from `usize` to `LenT`. 35 | #[inline] 36 | fn from_usize(val: usize) -> Self { 37 | val.try_into().unwrap() 38 | } 39 | 40 | /// An infallible conversion from `LenT` to `usize`. 41 | #[inline] 42 | fn into_usize(self) -> usize { 43 | self.try_into().unwrap() 44 | } 45 | 46 | /// Converts `LenT` into `Some(usize)`, unless it's `Self::MAX`, where it returns `None`. 47 | #[inline] 48 | fn to_non_max(self) -> Option { 49 | if self == Self::MAX { 50 | None 51 | } else { 52 | Some(self.into_usize()) 53 | } 54 | } 55 | } 56 | 57 | macro_rules! impl_lentype { 58 | ($($(#[$meta:meta])* $LenT:ty),*) => {$( 59 | $(#[$meta])* 60 | impl Sealed for $LenT { 61 | const ZERO: Self = 0; 62 | const MAX: Self = Self::MAX; 63 | const MAX_USIZE: usize = Self::MAX as _; 64 | 65 | fn one() -> Self { 66 | 1 67 | } 68 | } 69 | 70 | $(#[$meta])* 71 | impl LenType for $LenT {} 72 | )*} 73 | } 74 | 75 | /// A sealed trait representing a valid type to use as a length for a container. 76 | /// 77 | /// This cannot be implemented in user code, and is restricted to `u8`, `u16`, `u32`, and `usize`. 78 | pub trait LenType: Sealed {} 79 | 80 | impl_lentype!( 81 | u8, 82 | u16, 83 | #[cfg(any(target_pointer_width = "32", target_pointer_width = "64"))] 84 | u32, 85 | usize 86 | ); 87 | 88 | pub const fn check_capacity_fits() { 89 | assert!(LenT::MAX_USIZE >= N, "The capacity is larger than `LenT` can hold, increase the size of `LenT` or reduce the capacity"); 90 | } 91 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! `static` friendly data structures that don't require dynamic memory allocation 2 | //! 3 | //! The core principle behind `heapless` is that its data structures are backed by a *static* memory 4 | //! allocation. For example, you can think of `heapless::Vec` as an alternative version of 5 | //! `std::Vec` with fixed capacity and that can't be re-allocated on the fly (e.g. via `push`). 6 | //! 7 | //! All `heapless` data structures store their memory allocation *inline* and specify their capacity 8 | //! via their type parameter `N`. This means that you can instantiate a `heapless` data structure on 9 | //! the stack, in a `static` variable, or even in the heap. 10 | //! 11 | //! ``` 12 | //! use heapless::Vec; // fixed capacity `std::Vec` 13 | //! 14 | //! // on the stack 15 | //! let mut xs: Vec = Vec::new(); // can hold up to 8 elements 16 | //! xs.push(42)?; 17 | //! assert_eq!(xs.pop(), Some(42)); 18 | //! 19 | //! // in a `static` variable 20 | //! static mut XS: Vec = Vec::new(); 21 | //! 22 | //! let xs = unsafe { &mut XS }; 23 | //! 24 | //! xs.push(42)?; 25 | //! assert_eq!(xs.pop(), Some(42)); 26 | //! 27 | //! // in the heap (though kind of pointless because no reallocation) 28 | //! let mut ys: Box> = Box::new(Vec::new()); 29 | //! ys.push(42)?; 30 | //! assert_eq!(ys.pop(), Some(42)); 31 | //! # Ok::<(), u8>(()) 32 | //! ``` 33 | //! 34 | //! Because they have fixed capacity `heapless` data structures don't implicitly reallocate. This 35 | //! means that operations like `heapless::Vec.push` are *truly* constant time rather than amortized 36 | //! constant time with potentially unbounded (depends on the allocator) worst case execution time 37 | //! (which is bad/unacceptable for hard real time applications). 38 | //! 39 | //! `heapless` data structures don't use a memory allocator which means no risk of an uncatchable 40 | //! Out Of Memory (OOM) condition while performing operations on them. It's certainly possible to 41 | //! run out of capacity while growing `heapless` data structures, but the API lets you handle this 42 | //! possibility by returning a `Result` on operations that may exhaust the capacity of the data 43 | //! structure. 44 | //! 45 | //! List of currently implemented data structures: 46 | #![cfg_attr( 47 | any( 48 | arm_llsc, 49 | all( 50 | target_pointer_width = "32", 51 | any(target_has_atomic = "64", feature = "portable-atomic") 52 | ), 53 | all( 54 | target_pointer_width = "64", 55 | any( 56 | all(target_has_atomic = "128", feature = "nightly"), 57 | feature = "portable-atomic" 58 | ) 59 | ) 60 | ), 61 | doc = "- [`Arc`][pool::arc::Arc]: Like `std::sync::Arc` but backed by a lock-free memory pool rather than `[global_allocator]`." 62 | )] 63 | #![cfg_attr( 64 | any( 65 | arm_llsc, 66 | all( 67 | target_pointer_width = "32", 68 | any(target_has_atomic = "64", feature = "portable-atomic") 69 | ), 70 | all( 71 | target_pointer_width = "64", 72 | any( 73 | all(target_has_atomic = "128", feature = "nightly"), 74 | feature = "portable-atomic" 75 | ) 76 | ) 77 | ), 78 | doc = "- [`Box`][pool::boxed::Box]: Like `std::boxed::Box` but backed by a lock-free memory pool rather than `[global_allocator]`." 79 | )] 80 | #![cfg_attr( 81 | any( 82 | arm_llsc, 83 | all( 84 | target_pointer_width = "32", 85 | any(target_has_atomic = "64", feature = "portable-atomic") 86 | ), 87 | all( 88 | target_pointer_width = "64", 89 | any( 90 | all(target_has_atomic = "128", feature = "nightly"), 91 | feature = "portable-atomic" 92 | ) 93 | ) 94 | ), 95 | doc = "- [`Arc`][pool::arc::Arc]: Like `std::sync::Arc` but backed by a lock-free memory pool rather than `[global_allocator]`." 96 | )] 97 | #![cfg_attr( 98 | any( 99 | arm_llsc, 100 | all( 101 | target_pointer_width = "32", 102 | any(target_has_atomic = "64", feature = "portable-atomic") 103 | ), 104 | all( 105 | target_pointer_width = "64", 106 | any( 107 | all(target_has_atomic = "128", feature = "nightly"), 108 | feature = "portable-atomic" 109 | ) 110 | ) 111 | ), 112 | doc = "- [`Object`](pool::object::Object): Objects managed by an object pool." 113 | )] 114 | //! - [`BinaryHeap`]: A priority queue. 115 | //! - [`Deque`]: A double-ended queue. 116 | //! - [`HistoryBuf`]: A “history buffer”, similar to a write-only ring buffer. 117 | //! - [`IndexMap`]: A hash table. 118 | //! - [`IndexSet`]: A hash set. 119 | //! - [`LinearMap`]: A linear map. 120 | //! - [`SortedLinkedList`](sorted_linked_list::SortedLinkedList): A sorted linked list. 121 | //! - [`String`]: A string. 122 | //! - [`Vec`]: A vector. 123 | //! - [`mpmc::MpMcQueue`](mpmc): A lock-free multiple-producer, multiple-consumer queue. 124 | //! - [`spsc::Queue`](spsc): A lock-free single-producer, single-consumer queue. 125 | //! 126 | //! # Minimum Supported Rust Version (MSRV) 127 | //! 128 | //! This crate does *not* have a Minimum Supported Rust Version (MSRV) and may make use of language 129 | //! features and API in the standard library available in the latest stable Rust version. 130 | //! 131 | //! In other words, changes in the Rust version requirement of this crate are not considered semver 132 | //! breaking change and may occur in patch version releases. 133 | #![cfg_attr(docsrs, feature(doc_cfg), feature(doc_auto_cfg))] 134 | #![cfg_attr(not(test), no_std)] 135 | #![deny(missing_docs)] 136 | #![cfg_attr( 137 | all( 138 | feature = "nightly", 139 | target_pointer_width = "64", 140 | target_has_atomic = "128" 141 | ), 142 | feature(integer_atomics) 143 | )] 144 | #![warn( 145 | clippy::use_self, 146 | clippy::too_long_first_doc_paragraph, 147 | clippy::redundant_pub_crate, 148 | clippy::option_if_let_else, 149 | clippy::ptr_as_ptr, 150 | clippy::ref_as_ptr, 151 | clippy::doc_markdown, 152 | clippy::semicolon_if_nothing_returned, 153 | clippy::if_not_else 154 | )] 155 | 156 | #[cfg(feature = "alloc")] 157 | extern crate alloc; 158 | 159 | pub use binary_heap::BinaryHeap; 160 | pub use c_string::CString; 161 | pub use deque::Deque; 162 | pub use history_buf::{HistoryBuf, OldestOrdered}; 163 | pub use index_map::IndexMap; 164 | pub use index_set::IndexSet; 165 | pub use len_type::LenType; 166 | pub use linear_map::LinearMap; 167 | pub use string::String; 168 | 169 | pub use vec::{Vec, VecView}; 170 | 171 | #[macro_use] 172 | #[cfg(test)] 173 | mod test_helpers; 174 | 175 | pub mod c_string; 176 | pub mod deque; 177 | pub mod history_buf; 178 | pub mod index_map; 179 | pub mod index_set; 180 | mod len_type; 181 | pub mod linear_map; 182 | mod slice; 183 | pub mod storage; 184 | pub mod string; 185 | pub mod vec; 186 | 187 | // FIXME: Workaround a compiler ICE in rust 1.83 to 1.86 188 | // https://github.com/rust-lang/rust/issues/138979#issuecomment-2760839948 189 | #[expect(dead_code)] 190 | fn dead_code_ice_workaround() {} 191 | 192 | #[cfg(feature = "serde")] 193 | mod de; 194 | #[cfg(feature = "serde")] 195 | mod ser; 196 | 197 | pub mod binary_heap; 198 | #[cfg(feature = "bytes")] 199 | mod bytes; 200 | #[cfg(feature = "defmt")] 201 | mod defmt; 202 | #[cfg(any( 203 | // assume we have all atomics available if we're using portable-atomic 204 | feature = "portable-atomic", 205 | // target has native atomic CAS (mpmc_large requires usize, otherwise just u8) 206 | all(feature = "mpmc_large", target_has_atomic = "ptr"), 207 | all(not(feature = "mpmc_large"), target_has_atomic = "8") 208 | ))] 209 | pub mod mpmc; 210 | #[cfg(any( 211 | arm_llsc, 212 | all( 213 | target_pointer_width = "32", 214 | any(target_has_atomic = "64", feature = "portable-atomic") 215 | ), 216 | all( 217 | target_pointer_width = "64", 218 | any( 219 | all(target_has_atomic = "128", feature = "nightly"), 220 | feature = "portable-atomic" 221 | ) 222 | ) 223 | ))] 224 | pub mod pool; 225 | pub mod sorted_linked_list; 226 | #[cfg(any( 227 | // assume we have all atomics available if we're using portable-atomic 228 | feature = "portable-atomic", 229 | // target has native atomic CAS. Note this is too restrictive, spsc requires load/store only, not CAS. 230 | // This should be `cfg(target_has_atomic_load_store)`, but that's not stable yet. 231 | target_has_atomic = "ptr", 232 | // or the current target is in a list in build.rs of targets known to have load/store but no CAS. 233 | has_atomic_load_store 234 | ))] 235 | pub mod spsc; 236 | 237 | #[cfg(feature = "ufmt")] 238 | mod ufmt; 239 | 240 | /// Implementation details for macros. 241 | /// Do not use. Used for macros only. Not covered by semver guarantees. 242 | #[doc(hidden)] 243 | pub mod _export { 244 | pub use crate::string::format; 245 | } 246 | 247 | /// The error type for fallible [`Vec`] and [`String`] methods. 248 | #[derive(Debug)] 249 | #[non_exhaustive] 250 | pub struct CapacityError; 251 | 252 | impl core::fmt::Display for CapacityError { 253 | fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { 254 | f.write_str("insufficient capacity") 255 | } 256 | } 257 | 258 | impl core::error::Error for CapacityError {} 259 | -------------------------------------------------------------------------------- /src/mpmc.rs: -------------------------------------------------------------------------------- 1 | //! A fixed capacity multiple-producer, multiple-consumer (MPMC) lock-free queue. 2 | //! 3 | //! **Note:** This module requires atomic compare-and-swap (CAS) instructions. On 4 | //! targets where they're not natively available, they are emulated by the 5 | //! [`portable-atomic`](https://crates.io/crates/portable-atomic) crate. 6 | //! 7 | //! # Example 8 | //! 9 | //! This queue can be constructed in `const` context. Placing it in a `static` variable lets *all* 10 | //! contexts (interrupts/threads/`main`) safely enqueue and dequeue items. 11 | //! 12 | //! ``` 13 | //! use core::sync::atomic::{AtomicU8, Ordering}; 14 | //! 15 | //! use heapless::mpmc::Queue; 16 | //! 17 | //! static Q: Queue = Queue::new(); 18 | //! 19 | //! fn main() { 20 | //! // Configure systick interrupt. 21 | //! 22 | //! loop { 23 | //! if let Some(x) = Q.dequeue() { 24 | //! println!("{}", x); 25 | //! } else { 26 | //! // Wait for interrupt. 27 | //! } 28 | //! # break 29 | //! } 30 | //! } 31 | //! 32 | //! fn systick() { 33 | //! static COUNT: AtomicU8 = AtomicU8::new(0); 34 | //! let count = COUNT.fetch_add(1, Ordering::SeqCst); 35 | //! 36 | //! # let _ = 37 | //! Q.enqueue(count); 38 | //! } 39 | //! ``` 40 | //! 41 | //! # Benchmark 42 | //! 43 | //! Measured on an ARM Cortex-M3 core running at 8 MHz and with zero flash wait cycles, compiled with `-C opt-level=z`: 44 | //! 45 | //! | Method | Time | N | 46 | //! |:----------------------------|-----:|---:| 47 | //! | `Queue::::enqueue()` | 34 | 0 | 48 | //! | `Queue::::enqueue()` | 52 | 1 | 49 | //! | `Queue::::enqueue()` | 69 | 2 | 50 | //! | `Queue::::dequeue()` | 35 | 0 | 51 | //! | `Queue::::dequeue()` | 53 | 1 | 52 | //! | `Queue::::dequeue()` | 71 | 2 | 53 | //! 54 | //! - N denotes the number of interruptions. On Cortex-M, an interruption consists of an 55 | //! interrupt handler preempting the would-be atomic section of the `enqueue`/`dequeue` 56 | //! operation. Note that it does *not* matter if the higher priority handler uses the queue or 57 | //! not. 58 | //! - All execution times are in clock cycles (1 clock cycle = 125 ns). 59 | //! - Execution time is *dependent* on `mem::size_of::()`, as both operations include 60 | //! `ptr::read::()` or `ptr::write::()` in their successful path. 61 | //! - The numbers reported correspond to the successful path, i.e. `dequeue` returning `Some` 62 | //! and `enqueue` returning `Ok`. 63 | //! 64 | //! # References 65 | //! 66 | //! This is an implementation of Dmitry Vyukov's [bounded MPMC queue], minus the 67 | //! cache padding. 68 | //! 69 | //! [bounded MPMC queue]: http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue 70 | 71 | use core::{cell::UnsafeCell, mem::MaybeUninit}; 72 | 73 | #[cfg(not(feature = "portable-atomic"))] 74 | use core::sync::atomic; 75 | #[cfg(feature = "portable-atomic")] 76 | use portable_atomic as atomic; 77 | 78 | use atomic::Ordering; 79 | 80 | use crate::storage::{OwnedStorage, Storage, ViewStorage}; 81 | 82 | #[cfg(feature = "mpmc_large")] 83 | type AtomicTargetSize = atomic::AtomicUsize; 84 | #[cfg(not(feature = "mpmc_large"))] 85 | type AtomicTargetSize = atomic::AtomicU8; 86 | 87 | #[cfg(feature = "mpmc_large")] 88 | type UintSize = usize; 89 | #[cfg(not(feature = "mpmc_large"))] 90 | type UintSize = u8; 91 | 92 | #[cfg(feature = "mpmc_large")] 93 | type IntSize = isize; 94 | #[cfg(not(feature = "mpmc_large"))] 95 | type IntSize = i8; 96 | 97 | /// Base struct for [`Queue`] and [`QueueView`], generic over the [`Storage`]. 98 | /// 99 | /// In most cases you should use [`Queue`] or [`QueueView`] directly. Only use this 100 | /// struct if you want to write code that's generic over both. 101 | pub struct QueueInner { 102 | dequeue_pos: AtomicTargetSize, 103 | enqueue_pos: AtomicTargetSize, 104 | buffer: UnsafeCell>>, 105 | } 106 | 107 | /// A statically allocated multi-producer, multi-consumer queue with a capacity of `N` elements. 108 | /// 109 | ///
110 | /// 111 | /// `N` must be a power of 2. 112 | /// 113 | ///
114 | /// 115 | /// The maximum value of `N` is 128 if the `mpmc_large` feature is not enabled. 116 | pub type Queue = QueueInner>; 117 | 118 | /// A [`Queue`] with dynamic capacity. 119 | /// 120 | /// [`Queue`] coerces to `QueueView`. `QueueView` is `!Sized`, meaning it can only ever be used by reference. 121 | pub type QueueView = QueueInner; 122 | 123 | impl Queue { 124 | /// Creates an empty queue. 125 | pub const fn new() -> Self { 126 | const { 127 | assert!(N > 1); 128 | assert!(N.is_power_of_two()); 129 | assert!(N < UintSize::MAX as usize); 130 | } 131 | 132 | let mut cell_count = 0; 133 | 134 | let mut result_cells: [Cell; N] = [const { Cell::new(0) }; N]; 135 | while cell_count != N { 136 | result_cells[cell_count] = Cell::new(cell_count); 137 | cell_count += 1; 138 | } 139 | 140 | Self { 141 | buffer: UnsafeCell::new(result_cells), 142 | dequeue_pos: AtomicTargetSize::new(0), 143 | enqueue_pos: AtomicTargetSize::new(0), 144 | } 145 | } 146 | 147 | /// Used in `Storage` implementation. 148 | pub(crate) fn as_view_private(&self) -> &QueueView { 149 | self 150 | } 151 | /// Used in `Storage` implementation. 152 | pub(crate) fn as_view_mut_private(&mut self) -> &mut QueueView { 153 | self 154 | } 155 | } 156 | 157 | impl QueueInner { 158 | /// Returns the maximum number of elements the queue can hold. 159 | #[inline] 160 | pub fn capacity(&self) -> usize { 161 | S::len(self.buffer.get()) 162 | } 163 | 164 | /// Get a reference to the `Queue`, erasing the `N` const-generic. 165 | /// 166 | /// 167 | /// ```rust 168 | /// # use heapless::mpmc::{Queue, QueueView}; 169 | /// let queue: Queue = Queue::new(); 170 | /// let view: &QueueView = queue.as_view(); 171 | /// ``` 172 | /// 173 | /// It is often preferable to do the same through type coerction, since `Queue` implements `Unsize>`: 174 | /// 175 | /// ```rust 176 | /// # use heapless::mpmc::{Queue, QueueView}; 177 | /// let queue: Queue = Queue::new(); 178 | /// let view: &QueueView = &queue; 179 | /// ``` 180 | #[inline] 181 | pub fn as_view(&self) -> &QueueView { 182 | S::as_mpmc_view(self) 183 | } 184 | 185 | /// Get a mutable reference to the `Queue`, erasing the `N` const-generic. 186 | /// 187 | /// ```rust 188 | /// # use heapless::mpmc::{Queue, QueueView}; 189 | /// let mut queue: Queue = Queue::new(); 190 | /// let view: &mut QueueView = queue.as_mut_view(); 191 | /// ``` 192 | /// 193 | /// It is often preferable to do the same through type coerction, since `Queue` implements `Unsize>`: 194 | /// 195 | /// ```rust 196 | /// # use heapless::mpmc::{Queue, QueueView}; 197 | /// let mut queue: Queue = Queue::new(); 198 | /// let view: &mut QueueView = &mut queue; 199 | /// ``` 200 | #[inline] 201 | pub fn as_mut_view(&mut self) -> &mut QueueView { 202 | S::as_mpmc_mut_view(self) 203 | } 204 | 205 | fn mask(&self) -> UintSize { 206 | (S::len(self.buffer.get()) - 1) as _ 207 | } 208 | 209 | /// Returns the item in the front of the queue, or `None` if the queue is empty. 210 | pub fn dequeue(&self) -> Option { 211 | unsafe { dequeue(S::as_ptr(self.buffer.get()), &self.dequeue_pos, self.mask()) } 212 | } 213 | 214 | /// Adds an `item` to the end of the queue. 215 | /// 216 | /// Returns back the `item` if the queue is full. 217 | pub fn enqueue(&self, item: T) -> Result<(), T> { 218 | unsafe { 219 | enqueue( 220 | S::as_ptr(self.buffer.get()), 221 | &self.enqueue_pos, 222 | self.mask(), 223 | item, 224 | ) 225 | } 226 | } 227 | } 228 | 229 | impl Default for Queue { 230 | fn default() -> Self { 231 | Self::new() 232 | } 233 | } 234 | 235 | impl Drop for QueueInner { 236 | fn drop(&mut self) { 237 | // Drop all elements currently in the queue. 238 | while self.dequeue().is_some() {} 239 | } 240 | } 241 | 242 | unsafe impl Sync for QueueInner where T: Send {} 243 | 244 | struct Cell { 245 | data: MaybeUninit, 246 | sequence: AtomicTargetSize, 247 | } 248 | 249 | impl Cell { 250 | const fn new(seq: usize) -> Self { 251 | Self { 252 | data: MaybeUninit::uninit(), 253 | sequence: AtomicTargetSize::new(seq as UintSize), 254 | } 255 | } 256 | } 257 | 258 | unsafe fn dequeue( 259 | buffer: *mut Cell, 260 | dequeue_pos: &AtomicTargetSize, 261 | mask: UintSize, 262 | ) -> Option { 263 | let mut pos = dequeue_pos.load(Ordering::Relaxed); 264 | 265 | let mut cell; 266 | loop { 267 | cell = buffer.add(usize::from(pos & mask)); 268 | let seq = (*cell).sequence.load(Ordering::Acquire); 269 | let dif = (seq as IntSize).wrapping_sub((pos.wrapping_add(1)) as IntSize); 270 | 271 | match dif.cmp(&0) { 272 | core::cmp::Ordering::Equal => { 273 | if dequeue_pos 274 | .compare_exchange_weak( 275 | pos, 276 | pos.wrapping_add(1), 277 | Ordering::Relaxed, 278 | Ordering::Relaxed, 279 | ) 280 | .is_ok() 281 | { 282 | break; 283 | } 284 | } 285 | core::cmp::Ordering::Less => { 286 | return None; 287 | } 288 | core::cmp::Ordering::Greater => { 289 | pos = dequeue_pos.load(Ordering::Relaxed); 290 | } 291 | } 292 | } 293 | 294 | let data = (*cell).data.as_ptr().read(); 295 | (*cell) 296 | .sequence 297 | .store(pos.wrapping_add(mask).wrapping_add(1), Ordering::Release); 298 | Some(data) 299 | } 300 | 301 | unsafe fn enqueue( 302 | buffer: *mut Cell, 303 | enqueue_pos: &AtomicTargetSize, 304 | mask: UintSize, 305 | item: T, 306 | ) -> Result<(), T> { 307 | let mut pos = enqueue_pos.load(Ordering::Relaxed); 308 | 309 | let mut cell; 310 | loop { 311 | cell = buffer.add(usize::from(pos & mask)); 312 | let seq = (*cell).sequence.load(Ordering::Acquire); 313 | let dif = (seq as IntSize).wrapping_sub(pos as IntSize); 314 | 315 | match dif.cmp(&0) { 316 | core::cmp::Ordering::Equal => { 317 | if enqueue_pos 318 | .compare_exchange_weak( 319 | pos, 320 | pos.wrapping_add(1), 321 | Ordering::Relaxed, 322 | Ordering::Relaxed, 323 | ) 324 | .is_ok() 325 | { 326 | break; 327 | } 328 | } 329 | core::cmp::Ordering::Less => { 330 | return Err(item); 331 | } 332 | core::cmp::Ordering::Greater => { 333 | pos = enqueue_pos.load(Ordering::Relaxed); 334 | } 335 | } 336 | } 337 | 338 | (*cell).data.as_mut_ptr().write(item); 339 | (*cell) 340 | .sequence 341 | .store(pos.wrapping_add(1), Ordering::Release); 342 | Ok(()) 343 | } 344 | 345 | #[cfg(test)] 346 | mod tests { 347 | use static_assertions::assert_not_impl_any; 348 | 349 | use super::Queue; 350 | 351 | // Ensure a `Queue` containing `!Send` values stays `!Send` itself. 352 | assert_not_impl_any!(Queue<*const (), 4>: Send); 353 | 354 | #[test] 355 | fn memory_leak() { 356 | droppable!(); 357 | 358 | let q = Queue::<_, 2>::new(); 359 | q.enqueue(Droppable::new()).unwrap_or_else(|_| panic!()); 360 | q.enqueue(Droppable::new()).unwrap_or_else(|_| panic!()); 361 | drop(q); 362 | 363 | assert_eq!(Droppable::count(), 0); 364 | } 365 | 366 | #[test] 367 | fn sanity() { 368 | let q = Queue::<_, 2>::new(); 369 | q.enqueue(0).unwrap(); 370 | q.enqueue(1).unwrap(); 371 | assert!(q.enqueue(2).is_err()); 372 | 373 | assert_eq!(q.dequeue(), Some(0)); 374 | assert_eq!(q.dequeue(), Some(1)); 375 | assert_eq!(q.dequeue(), None); 376 | } 377 | 378 | #[test] 379 | fn drain_at_pos255() { 380 | let q = Queue::<_, 2>::new(); 381 | for _ in 0..255 { 382 | assert!(q.enqueue(0).is_ok()); 383 | assert_eq!(q.dequeue(), Some(0)); 384 | } 385 | 386 | // Queue is empty, this should not block forever. 387 | assert_eq!(q.dequeue(), None); 388 | } 389 | 390 | #[test] 391 | fn full_at_wrapped_pos0() { 392 | let q = Queue::<_, 2>::new(); 393 | for _ in 0..254 { 394 | assert!(q.enqueue(0).is_ok()); 395 | assert_eq!(q.dequeue(), Some(0)); 396 | } 397 | assert!(q.enqueue(0).is_ok()); 398 | assert!(q.enqueue(0).is_ok()); 399 | // this should not block forever 400 | assert!(q.enqueue(0).is_err()); 401 | } 402 | 403 | #[test] 404 | fn enqueue_full() { 405 | #[cfg(not(feature = "mpmc_large"))] 406 | const CAPACITY: usize = 128; 407 | 408 | #[cfg(feature = "mpmc_large")] 409 | const CAPACITY: usize = 256; 410 | 411 | let q: Queue = Queue::new(); 412 | 413 | assert_eq!(q.capacity(), CAPACITY); 414 | 415 | for _ in 0..CAPACITY { 416 | q.enqueue(0xAA).unwrap(); 417 | } 418 | 419 | // Queue is full, this should not block forever. 420 | q.enqueue(0x55).unwrap_err(); 421 | } 422 | } 423 | -------------------------------------------------------------------------------- /src/pool.rs: -------------------------------------------------------------------------------- 1 | //! Memory and object pools 2 | //! 3 | //! # Target support 4 | //! 5 | //! This module/API is only available on these compilation targets: 6 | //! 7 | //! - ARM architectures which instruction set include the LDREX, CLREX and STREX instructions, e.g. 8 | //! `thumbv7m-none-eabi` but not `thumbv6m-none-eabi` 9 | //! - 32-bit x86, e.g. `i686-unknown-linux-gnu` 10 | //! 11 | //! # Benchmarks 12 | //! 13 | //! - compilation settings 14 | //! - `codegen-units = 1` 15 | //! - `lto = 'fat'` 16 | //! - `opt-level = 'z'` 17 | //! - compilation target: `thumbv7em-none-eabihf` 18 | //! - CPU: ARM Cortex-M4F 19 | //! 20 | //! - test program: 21 | //! 22 | //! ```no_run 23 | //! use heapless::box_pool; 24 | //! 25 | //! box_pool!(MyBoxPool: ()); // or `arc_pool!` or `object_pool!` 26 | //! 27 | //! bkpt(); 28 | //! let res = MyBoxPool.alloc(()); 29 | //! bkpt(); 30 | //! 31 | //! if let Ok(boxed) = res { 32 | //! bkpt(); 33 | //! drop(boxed); 34 | //! bkpt(); 35 | //! } 36 | //! # fn bkpt() {} 37 | //! ``` 38 | //! 39 | //! - measurement method: the cycle counter (CYCCNT) register was sampled each time a breakpoint 40 | //! (`bkpt`) was hit. the difference between the "after" and the "before" value of CYCCNT yields the 41 | //! execution time in clock cycles. 42 | //! 43 | //! | API | clock cycles | 44 | //! |------------------------------|--------------| 45 | //! | `BoxPool::alloc` | 23 | 46 | //! | `pool::boxed::Box::drop` | 23 | 47 | //! | `ArcPool::alloc` | 28 | 48 | //! | `pool::arc::Arc::drop` | 59 | 49 | //! | `ObjectPool::request` | 23 | 50 | //! | `pool::object::Object::drop` | 23 | 51 | //! 52 | //! Note that the execution time won't include `T`'s initialization nor `T`'s destructor which will 53 | //! be present in the general case for `Box` and `Arc`. 54 | 55 | mod treiber; 56 | 57 | #[cfg(any(feature = "portable-atomic", target_has_atomic = "ptr"))] 58 | pub mod arc; 59 | pub mod boxed; 60 | pub mod object; 61 | -------------------------------------------------------------------------------- /src/pool/arc.rs: -------------------------------------------------------------------------------- 1 | //! `std::sync::Arc`-like API on top of a lock-free memory pool 2 | //! 3 | //! # Example usage 4 | //! 5 | //! ``` 6 | //! use heapless::{arc_pool, pool::arc::{Arc, ArcBlock}}; 7 | //! 8 | //! arc_pool!(MyArcPool: u128); 9 | //! 10 | //! // cannot allocate without first giving memory blocks to the pool 11 | //! assert!(MyArcPool.alloc(42).is_err()); 12 | //! 13 | //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) 14 | //! let block: &'static mut ArcBlock = unsafe { 15 | //! static mut BLOCK: ArcBlock = ArcBlock::new(); 16 | //! addr_of_mut!(BLOCK).as_mut().unwrap() 17 | //! }; 18 | //! 19 | //! MyArcPool.manage(block); 20 | //! 21 | //! let arc = MyArcPool.alloc(1).unwrap(); 22 | //! 23 | //! // number of smart pointers is limited to the number of blocks managed by the pool 24 | //! let res = MyArcPool.alloc(2); 25 | //! assert!(res.is_err()); 26 | //! 27 | //! // but cloning does not consume an `ArcBlock` 28 | //! let arc2 = arc.clone(); 29 | //! 30 | //! assert_eq!(1, *arc2); 31 | //! 32 | //! // `arc`'s destructor returns the memory block to the pool 33 | //! drop(arc2); // decrease reference counter 34 | //! drop(arc); // release memory 35 | //! 36 | //! // it's now possible to allocate a new `Arc` smart pointer 37 | //! let res = MyArcPool.alloc(3); 38 | //! 39 | //! assert!(res.is_ok()); 40 | //! ``` 41 | //! 42 | //! # Array block initialization 43 | //! 44 | //! You can create a static variable that contains an array of memory blocks and give all the blocks 45 | //! to the `ArcPool`. This requires an intermediate `const` value as shown below: 46 | //! 47 | //! ``` 48 | //! use heapless::{arc_pool, pool::arc::ArcBlock}; 49 | //! 50 | //! arc_pool!(MyArcPool: u128); 51 | //! 52 | //! const POOL_CAPACITY: usize = 8; 53 | //! 54 | //! let blocks: &'static mut [ArcBlock] = { 55 | //! const BLOCK: ArcBlock = ArcBlock::new(); // <= 56 | //! static mut BLOCKS: [ArcBlock; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY]; 57 | //! unsafe { addr_of_mut!(BLOCK).as_mut().unwrap()S } 58 | //! }; 59 | //! 60 | //! for block in blocks { 61 | //! MyArcPool.manage(block); 62 | //! } 63 | //! ``` 64 | 65 | // reference counting logic is based on version 1.63.0 of the Rust standard library (`alloc` crate) 66 | // which is licensed under 'MIT or APACHE-2.0' 67 | // https://github.com/rust-lang/rust/blob/1.63.0/library/alloc/src/sync.rs#L235 (last visited 68 | // 2022-09-05) 69 | 70 | use core::{ 71 | fmt, 72 | hash::{Hash, Hasher}, 73 | mem::{ManuallyDrop, MaybeUninit}, 74 | ops, ptr, 75 | }; 76 | 77 | #[cfg(not(feature = "portable-atomic"))] 78 | use core::sync::atomic; 79 | #[cfg(feature = "portable-atomic")] 80 | use portable_atomic as atomic; 81 | 82 | use atomic::{AtomicUsize, Ordering}; 83 | 84 | use super::treiber::{NonNullPtr, Stack, UnionNode}; 85 | 86 | /// Creates a new `ArcPool` singleton with the given `$name` that manages the specified `$data_type` 87 | /// 88 | /// For more extensive documentation see the [module level documentation](crate::pool::arc) 89 | #[macro_export] 90 | macro_rules! arc_pool { 91 | ($name:ident: $data_type:ty) => { 92 | pub struct $name; 93 | 94 | impl $crate::pool::arc::ArcPool for $name { 95 | type Data = $data_type; 96 | 97 | fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> { 98 | // Even though the static variable is not exposed to user code, it is 99 | // still useful to have a descriptive symbol name for debugging. 100 | #[allow(non_upper_case_globals)] 101 | static $name: $crate::pool::arc::ArcPoolImpl<$data_type> = 102 | $crate::pool::arc::ArcPoolImpl::new(); 103 | 104 | &$name 105 | } 106 | } 107 | 108 | impl $name { 109 | /// Inherent method version of `ArcPool::alloc` 110 | #[allow(dead_code)] 111 | pub fn alloc( 112 | &self, 113 | value: $data_type, 114 | ) -> Result<$crate::pool::arc::Arc<$name>, $data_type> { 115 | <$name as $crate::pool::arc::ArcPool>::alloc(value) 116 | } 117 | 118 | /// Inherent method version of `ArcPool::manage` 119 | #[allow(dead_code)] 120 | pub fn manage(&self, block: &'static mut $crate::pool::arc::ArcBlock<$data_type>) { 121 | <$name as $crate::pool::arc::ArcPool>::manage(block) 122 | } 123 | } 124 | }; 125 | } 126 | 127 | /// A singleton that manages `pool::arc::Arc` smart pointers 128 | pub trait ArcPool: Sized { 129 | /// The data type managed by the memory pool 130 | type Data: 'static; 131 | 132 | /// `arc_pool!` implementation detail 133 | #[doc(hidden)] 134 | fn singleton() -> &'static ArcPoolImpl; 135 | 136 | /// Allocate a new `Arc` smart pointer initialized to the given `value` 137 | /// 138 | /// `manage` should be called at least once before calling `alloc` 139 | /// 140 | /// # Errors 141 | /// 142 | /// The `Err`or variant is returned when the memory pool has run out of memory blocks 143 | fn alloc(value: Self::Data) -> Result, Self::Data> { 144 | Ok(Arc { 145 | node_ptr: Self::singleton().alloc(value)?, 146 | }) 147 | } 148 | 149 | /// Add a statically allocated memory block to the memory pool 150 | fn manage(block: &'static mut ArcBlock) { 151 | Self::singleton().manage(block); 152 | } 153 | } 154 | 155 | /// `arc_pool!` implementation detail 156 | // newtype to avoid having to make field types public 157 | #[doc(hidden)] 158 | pub struct ArcPoolImpl { 159 | stack: Stack>>>, 160 | } 161 | 162 | impl ArcPoolImpl { 163 | /// `arc_pool!` implementation detail 164 | #[doc(hidden)] 165 | #[allow(clippy::new_without_default)] 166 | pub const fn new() -> Self { 167 | Self { 168 | stack: Stack::new(), 169 | } 170 | } 171 | 172 | fn alloc(&self, value: T) -> Result>>>, T> { 173 | if let Some(node_ptr) = self.stack.try_pop() { 174 | let inner = ArcInner { 175 | data: value, 176 | strong: AtomicUsize::new(1), 177 | }; 178 | unsafe { node_ptr.as_ptr().cast::>().write(inner) } 179 | 180 | Ok(node_ptr) 181 | } else { 182 | Err(value) 183 | } 184 | } 185 | 186 | fn manage(&self, block: &'static mut ArcBlock) { 187 | let node: &'static mut _ = &mut block.node; 188 | 189 | unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) } 190 | } 191 | } 192 | 193 | unsafe impl Sync for ArcPoolImpl {} 194 | 195 | /// Like `std::sync::Arc` but managed by memory pool `P` 196 | pub struct Arc

197 | where 198 | P: ArcPool, 199 | { 200 | node_ptr: NonNullPtr>>>, 201 | } 202 | 203 | impl

Arc

204 | where 205 | P: ArcPool, 206 | { 207 | fn inner(&self) -> &ArcInner { 208 | unsafe { &*self.node_ptr.as_ptr().cast::>() } 209 | } 210 | 211 | fn from_inner(node_ptr: NonNullPtr>>>) -> Self { 212 | Self { node_ptr } 213 | } 214 | 215 | unsafe fn get_mut_unchecked(this: &mut Self) -> &mut P::Data { 216 | &mut *ptr::addr_of_mut!((*this.node_ptr.as_ptr().cast::>()).data) 217 | } 218 | 219 | #[inline(never)] 220 | unsafe fn drop_slow(&mut self) { 221 | // run `P::Data`'s destructor 222 | ptr::drop_in_place(Self::get_mut_unchecked(self)); 223 | 224 | // return memory to pool 225 | P::singleton().stack.push(self.node_ptr); 226 | } 227 | } 228 | 229 | impl

AsRef for Arc

230 | where 231 | P: ArcPool, 232 | { 233 | fn as_ref(&self) -> &P::Data { 234 | self 235 | } 236 | } 237 | 238 | const MAX_REFCOUNT: usize = (isize::MAX) as usize; 239 | 240 | impl

Clone for Arc

ops::Deref for Arc

267 | where 268 | P: ArcPool, 269 | { 270 | type Target = P::Data; 271 | 272 | fn deref(&self) -> &Self::Target { 273 | unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr().cast::>()).data) } 274 | } 275 | } 276 | 277 | impl fmt::Display for Arc 278 | where 279 | A: ArcPool, 280 | A::Data: fmt::Display, 281 | { 282 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 283 | A::Data::fmt(self, f) 284 | } 285 | } 286 | 287 | impl Drop for Arc 288 | where 289 | A: ArcPool, 290 | { 291 | fn drop(&mut self) { 292 | if self.inner().strong.fetch_sub(1, Ordering::Release) != 1 { 293 | return; 294 | } 295 | 296 | atomic::fence(Ordering::Acquire); 297 | 298 | unsafe { self.drop_slow() } 299 | } 300 | } 301 | 302 | impl Eq for Arc 303 | where 304 | A: ArcPool, 305 | A::Data: Eq, 306 | { 307 | } 308 | 309 | impl Hash for Arc 310 | where 311 | A: ArcPool, 312 | A::Data: Hash, 313 | { 314 | fn hash(&self, state: &mut H) 315 | where 316 | H: Hasher, 317 | { 318 | (**self).hash(state); 319 | } 320 | } 321 | 322 | impl Ord for Arc 323 | where 324 | A: ArcPool, 325 | A::Data: Ord, 326 | { 327 | fn cmp(&self, other: &Self) -> core::cmp::Ordering { 328 | A::Data::cmp(self, other) 329 | } 330 | } 331 | 332 | impl PartialEq> for Arc 333 | where 334 | A: ArcPool, 335 | B: ArcPool, 336 | A::Data: PartialEq, 337 | { 338 | fn eq(&self, other: &Arc) -> bool { 339 | A::Data::eq(self, &**other) 340 | } 341 | } 342 | 343 | impl PartialOrd> for Arc 344 | where 345 | A: ArcPool, 346 | B: ArcPool, 347 | A::Data: PartialOrd, 348 | { 349 | fn partial_cmp(&self, other: &Arc) -> Option { 350 | A::Data::partial_cmp(self, &**other) 351 | } 352 | } 353 | 354 | unsafe impl Send for Arc 355 | where 356 | A: ArcPool, 357 | A::Data: Sync + Send, 358 | { 359 | } 360 | 361 | unsafe impl Sync for Arc 362 | where 363 | A: ArcPool, 364 | A::Data: Sync + Send, 365 | { 366 | } 367 | 368 | impl Unpin for Arc where A: ArcPool {} 369 | 370 | struct ArcInner { 371 | data: T, 372 | strong: AtomicUsize, 373 | } 374 | 375 | /// A chunk of memory that an `ArcPool` can manage 376 | pub struct ArcBlock { 377 | node: UnionNode>>, 378 | } 379 | 380 | impl ArcBlock { 381 | /// Creates a new memory block 382 | pub const fn new() -> Self { 383 | Self { 384 | node: UnionNode { 385 | data: ManuallyDrop::new(MaybeUninit::uninit()), 386 | }, 387 | } 388 | } 389 | } 390 | 391 | impl Default for ArcBlock { 392 | fn default() -> Self { 393 | Self::new() 394 | } 395 | } 396 | 397 | #[cfg(test)] 398 | mod tests { 399 | use super::*; 400 | use std::ptr::addr_of_mut; 401 | 402 | #[test] 403 | fn cannot_alloc_if_empty() { 404 | arc_pool!(MyArcPool: i32); 405 | 406 | assert_eq!(Err(42), MyArcPool.alloc(42),); 407 | } 408 | 409 | #[test] 410 | fn can_alloc_if_manages_one_block() { 411 | arc_pool!(MyArcPool: i32); 412 | 413 | let block = unsafe { 414 | static mut BLOCK: ArcBlock = ArcBlock::new(); 415 | addr_of_mut!(BLOCK).as_mut().unwrap() 416 | }; 417 | MyArcPool.manage(block); 418 | 419 | assert_eq!(42, *MyArcPool.alloc(42).unwrap()); 420 | } 421 | 422 | #[test] 423 | fn alloc_drop_alloc() { 424 | arc_pool!(MyArcPool: i32); 425 | 426 | let block = unsafe { 427 | static mut BLOCK: ArcBlock = ArcBlock::new(); 428 | addr_of_mut!(BLOCK).as_mut().unwrap() 429 | }; 430 | MyArcPool.manage(block); 431 | 432 | let arc = MyArcPool.alloc(1).unwrap(); 433 | 434 | drop(arc); 435 | 436 | assert_eq!(2, *MyArcPool.alloc(2).unwrap()); 437 | } 438 | 439 | #[test] 440 | fn strong_count_starts_at_one() { 441 | arc_pool!(MyArcPool: i32); 442 | 443 | let block = unsafe { 444 | static mut BLOCK: ArcBlock = ArcBlock::new(); 445 | addr_of_mut!(BLOCK).as_mut().unwrap() 446 | }; 447 | MyArcPool.manage(block); 448 | 449 | let arc = MyArcPool.alloc(1).ok().unwrap(); 450 | 451 | assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed)); 452 | } 453 | 454 | #[test] 455 | fn clone_increases_strong_count() { 456 | arc_pool!(MyArcPool: i32); 457 | 458 | let block = unsafe { 459 | static mut BLOCK: ArcBlock = ArcBlock::new(); 460 | addr_of_mut!(BLOCK).as_mut().unwrap() 461 | }; 462 | MyArcPool.manage(block); 463 | 464 | let arc = MyArcPool.alloc(1).ok().unwrap(); 465 | 466 | let before = arc.inner().strong.load(Ordering::Relaxed); 467 | 468 | let arc2 = arc.clone(); 469 | 470 | let expected = before + 1; 471 | assert_eq!(expected, arc.inner().strong.load(Ordering::Relaxed)); 472 | assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed)); 473 | } 474 | 475 | #[test] 476 | fn drop_decreases_strong_count() { 477 | arc_pool!(MyArcPool: i32); 478 | 479 | let block = unsafe { 480 | static mut BLOCK: ArcBlock = ArcBlock::new(); 481 | addr_of_mut!(BLOCK).as_mut().unwrap() 482 | }; 483 | MyArcPool.manage(block); 484 | 485 | let arc = MyArcPool.alloc(1).ok().unwrap(); 486 | let arc2 = arc.clone(); 487 | 488 | let before = arc.inner().strong.load(Ordering::Relaxed); 489 | 490 | drop(arc); 491 | 492 | let expected = before - 1; 493 | assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed)); 494 | } 495 | 496 | #[test] 497 | fn runs_destructor_exactly_once_when_strong_count_reaches_zero() { 498 | static COUNT: AtomicUsize = AtomicUsize::new(0); 499 | 500 | pub struct MyStruct; 501 | 502 | impl Drop for MyStruct { 503 | fn drop(&mut self) { 504 | COUNT.fetch_add(1, Ordering::Relaxed); 505 | } 506 | } 507 | 508 | arc_pool!(MyArcPool: MyStruct); 509 | 510 | let block = unsafe { 511 | static mut BLOCK: ArcBlock = ArcBlock::new(); 512 | addr_of_mut!(BLOCK).as_mut().unwrap() 513 | }; 514 | MyArcPool.manage(block); 515 | 516 | let arc = MyArcPool.alloc(MyStruct).ok().unwrap(); 517 | 518 | assert_eq!(0, COUNT.load(Ordering::Relaxed)); 519 | 520 | drop(arc); 521 | 522 | assert_eq!(1, COUNT.load(Ordering::Relaxed)); 523 | } 524 | 525 | #[test] 526 | fn zst_is_well_aligned() { 527 | #[repr(align(4096))] 528 | pub struct Zst4096; 529 | 530 | arc_pool!(MyArcPool: Zst4096); 531 | 532 | let block = unsafe { 533 | static mut BLOCK: ArcBlock = ArcBlock::new(); 534 | addr_of_mut!(BLOCK).as_mut().unwrap() 535 | }; 536 | MyArcPool.manage(block); 537 | 538 | let arc = MyArcPool.alloc(Zst4096).ok().unwrap(); 539 | 540 | let raw = &*arc as *const Zst4096; 541 | assert_eq!(0, raw as usize % 4096); 542 | } 543 | } 544 | -------------------------------------------------------------------------------- /src/pool/boxed.rs: -------------------------------------------------------------------------------- 1 | //! `std::boxed::Box`-like API on top of a lock-free memory pool 2 | //! 3 | //! # Example usage 4 | //! 5 | //! ``` 6 | //! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}}; 7 | //! 8 | //! box_pool!(MyBoxPool: u128); 9 | //! 10 | //! // cannot allocate without first giving memory blocks to the pool 11 | //! assert!(MyBoxPool.alloc(42).is_err()); 12 | //! 13 | //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) 14 | //! let block: &'static mut BoxBlock = unsafe { 15 | //! static mut BLOCK: BoxBlock = BoxBlock::new(); 16 | //! addr_of_mut!(BLOCK).as_mut().unwrap() 17 | //! }; 18 | //! 19 | //! // give block of memory to the pool 20 | //! MyBoxPool.manage(block); 21 | //! 22 | //! // it's now possible to allocate 23 | //! let mut boxed = MyBoxPool.alloc(1).unwrap(); 24 | //! 25 | //! // mutation is possible 26 | //! *boxed += 1; 27 | //! assert_eq!(2, *boxed); 28 | //! 29 | //! // number of boxes is limited to the number of blocks managed by the pool 30 | //! let res = MyBoxPool.alloc(3); 31 | //! assert!(res.is_err()); 32 | //! 33 | //! // give another memory block to the pool 34 | //! MyBoxPool.manage(unsafe { 35 | //! static mut BLOCK: BoxBlock = BoxBlock::new(); 36 | //! addr_of_mut!(BLOCK).as_mut().unwrap() 37 | //! }); 38 | //! 39 | //! // cloning also consumes a memory block from the pool 40 | //! let mut separate_box = boxed.clone(); 41 | //! *separate_box += 1; 42 | //! assert_eq!(3, *separate_box); 43 | //! 44 | //! // after the clone it's not possible to allocate again 45 | //! let res = MyBoxPool.alloc(4); 46 | //! assert!(res.is_err()); 47 | //! 48 | //! // `boxed`'s destructor returns the memory block to the pool 49 | //! drop(boxed); 50 | //! 51 | //! // it's possible to allocate again 52 | //! let res = MyBoxPool.alloc(5); 53 | //! 54 | //! assert!(res.is_ok()); 55 | //! ``` 56 | //! 57 | //! # Array block initialization 58 | //! 59 | //! You can create a static variable that contains an array of memory blocks and give all the blocks 60 | //! to the `BoxPool`. This requires an intermediate `const` value as shown below: 61 | //! 62 | //! ``` 63 | //! use heapless::{box_pool, pool::boxed::BoxBlock}; 64 | //! 65 | //! box_pool!(MyBoxPool: u128); 66 | //! 67 | //! const POOL_CAPACITY: usize = 8; 68 | //! 69 | //! let blocks: &'static mut [BoxBlock] = { 70 | //! #[allow(clippy::declare_interior_mutable_const)] 71 | //! const BLOCK: BoxBlock = BoxBlock::new(); // <= 72 | //! static mut BLOCKS: [BoxBlock; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY]; 73 | //! unsafe { addr_of_mut!(BLOCK).as_mut().unwrap()S } 74 | //! }; 75 | //! 76 | //! for block in blocks { 77 | //! MyBoxPool.manage(block); 78 | //! } 79 | //! ``` 80 | 81 | use core::{ 82 | fmt, 83 | hash::{Hash, Hasher}, 84 | mem::{ManuallyDrop, MaybeUninit}, 85 | ops, ptr, 86 | }; 87 | 88 | use stable_deref_trait::StableDeref; 89 | 90 | use super::treiber::{NonNullPtr, Stack, UnionNode}; 91 | 92 | /// Creates a new `BoxPool` singleton with the given `$name` that manages the specified `$data_type` 93 | /// 94 | /// For more extensive documentation see the [module level documentation](crate::pool::boxed) 95 | #[macro_export] 96 | macro_rules! box_pool { 97 | ($name:ident: $data_type:ty) => { 98 | pub struct $name; 99 | 100 | impl $crate::pool::boxed::BoxPool for $name { 101 | type Data = $data_type; 102 | 103 | fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> { 104 | // Even though the static variable is not exposed to user code, it is 105 | // still useful to have a descriptive symbol name for debugging. 106 | #[allow(non_upper_case_globals)] 107 | static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> = 108 | $crate::pool::boxed::BoxPoolImpl::new(); 109 | 110 | &$name 111 | } 112 | } 113 | 114 | impl $name { 115 | /// Inherent method version of `BoxPool::alloc` 116 | #[allow(dead_code)] 117 | pub fn alloc( 118 | &self, 119 | value: $data_type, 120 | ) -> Result<$crate::pool::boxed::Box<$name>, $data_type> { 121 | <$name as $crate::pool::boxed::BoxPool>::alloc(value) 122 | } 123 | 124 | /// Inherent method version of `BoxPool::manage` 125 | #[allow(dead_code)] 126 | pub fn manage(&self, block: &'static mut $crate::pool::boxed::BoxBlock<$data_type>) { 127 | <$name as $crate::pool::boxed::BoxPool>::manage(block) 128 | } 129 | } 130 | }; 131 | } 132 | 133 | /// A singleton that manages `pool::boxed::Box`-es 134 | /// 135 | /// # Usage 136 | /// 137 | /// Do not implement this trait yourself; instead use the `box_pool!` macro to create a type that 138 | /// implements this trait. 139 | /// 140 | /// # Semver guarantees 141 | /// 142 | /// *Implementing* this trait is exempt from semver guarantees. 143 | /// i.e. a new patch release is allowed to break downstream `BoxPool` implementations. 144 | /// 145 | /// *Using* the trait, e.g. in generic code, does fall under semver guarantees. 146 | pub trait BoxPool: Sized { 147 | /// The data type managed by the memory pool 148 | type Data: 'static; 149 | 150 | /// `box_pool!` implementation detail 151 | #[doc(hidden)] 152 | fn singleton() -> &'static BoxPoolImpl; 153 | 154 | /// Allocate a new `Box` initialized to the given `value` 155 | /// 156 | /// `manage` should be called at least once before calling `alloc` 157 | /// 158 | /// # Errors 159 | /// 160 | /// The `Err`or variant is returned when the memory pool has run out of memory blocks 161 | fn alloc(value: Self::Data) -> Result, Self::Data> { 162 | Ok(Box { 163 | node_ptr: Self::singleton().alloc(value)?, 164 | }) 165 | } 166 | 167 | /// Add a statically allocated memory block to the memory pool 168 | fn manage(block: &'static mut BoxBlock) { 169 | Self::singleton().manage(block); 170 | } 171 | } 172 | 173 | /// Like `std::boxed::Box` but managed by memory pool `P` rather than `#[global_allocator]` 174 | pub struct Box

175 | where 176 | P: BoxPool, 177 | { 178 | node_ptr: NonNullPtr>>, 179 | } 180 | 181 | impl Clone for Box 182 | where 183 | A: BoxPool, 184 | A::Data: Clone, 185 | { 186 | fn clone(&self) -> Self { 187 | A::alloc((**self).clone()).ok().expect("OOM") 188 | } 189 | } 190 | 191 | impl fmt::Debug for Box 192 | where 193 | A: BoxPool, 194 | A::Data: fmt::Debug, 195 | { 196 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 197 | A::Data::fmt(self, f) 198 | } 199 | } 200 | 201 | impl

ops::Deref for Box

202 | where 203 | P: BoxPool, 204 | { 205 | type Target = P::Data; 206 | 207 | fn deref(&self) -> &Self::Target { 208 | unsafe { &*self.node_ptr.as_ptr().cast::() } 209 | } 210 | } 211 | 212 | impl

ops::DerefMut for Box

213 | where 214 | P: BoxPool, 215 | { 216 | fn deref_mut(&mut self) -> &mut Self::Target { 217 | unsafe { &mut *self.node_ptr.as_ptr().cast::() } 218 | } 219 | } 220 | 221 | unsafe impl

StableDeref for Box

where P: BoxPool {} 222 | 223 | impl fmt::Display for Box 224 | where 225 | A: BoxPool, 226 | A::Data: fmt::Display, 227 | { 228 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 229 | A::Data::fmt(self, f) 230 | } 231 | } 232 | 233 | impl

Drop for Box

234 | where 235 | P: BoxPool, 236 | { 237 | fn drop(&mut self) { 238 | let node = self.node_ptr; 239 | 240 | unsafe { ptr::drop_in_place(node.as_ptr().cast::()) } 241 | 242 | unsafe { P::singleton().stack.push(node) } 243 | } 244 | } 245 | 246 | impl Eq for Box 247 | where 248 | A: BoxPool, 249 | A::Data: Eq, 250 | { 251 | } 252 | 253 | impl Hash for Box 254 | where 255 | A: BoxPool, 256 | A::Data: Hash, 257 | { 258 | fn hash(&self, state: &mut H) 259 | where 260 | H: Hasher, 261 | { 262 | (**self).hash(state); 263 | } 264 | } 265 | 266 | impl Ord for Box 267 | where 268 | A: BoxPool, 269 | A::Data: Ord, 270 | { 271 | fn cmp(&self, other: &Self) -> core::cmp::Ordering { 272 | A::Data::cmp(self, other) 273 | } 274 | } 275 | 276 | impl PartialEq> for Box 277 | where 278 | A: BoxPool, 279 | B: BoxPool, 280 | A::Data: PartialEq, 281 | { 282 | fn eq(&self, other: &Box) -> bool { 283 | A::Data::eq(self, other) 284 | } 285 | } 286 | 287 | impl PartialOrd> for Box 288 | where 289 | A: BoxPool, 290 | B: BoxPool, 291 | A::Data: PartialOrd, 292 | { 293 | fn partial_cmp(&self, other: &Box) -> Option { 294 | A::Data::partial_cmp(self, other) 295 | } 296 | } 297 | 298 | unsafe impl

Send for Box

299 | where 300 | P: BoxPool, 301 | P::Data: Send, 302 | { 303 | } 304 | 305 | unsafe impl

Sync for Box

306 | where 307 | P: BoxPool, 308 | P::Data: Sync, 309 | { 310 | } 311 | 312 | /// `box_pool!` implementation detail 313 | // newtype to avoid having to make field types public 314 | #[doc(hidden)] 315 | pub struct BoxPoolImpl { 316 | stack: Stack>>, 317 | } 318 | 319 | impl BoxPoolImpl { 320 | #[allow(clippy::new_without_default)] 321 | pub const fn new() -> Self { 322 | Self { 323 | stack: Stack::new(), 324 | } 325 | } 326 | 327 | fn alloc(&self, value: T) -> Result>>, T> { 328 | if let Some(node_ptr) = self.stack.try_pop() { 329 | unsafe { node_ptr.as_ptr().cast::().write(value) } 330 | 331 | Ok(node_ptr) 332 | } else { 333 | Err(value) 334 | } 335 | } 336 | 337 | fn manage(&self, block: &'static mut BoxBlock) { 338 | let node: &'static mut _ = &mut block.node; 339 | 340 | unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) } 341 | } 342 | } 343 | 344 | unsafe impl Sync for BoxPoolImpl {} 345 | 346 | /// A chunk of memory that a `BoxPool` singleton can manage 347 | pub struct BoxBlock { 348 | node: UnionNode>, 349 | } 350 | 351 | impl BoxBlock { 352 | /// Creates a new memory block 353 | pub const fn new() -> Self { 354 | Self { 355 | node: UnionNode { 356 | data: ManuallyDrop::new(MaybeUninit::uninit()), 357 | }, 358 | } 359 | } 360 | } 361 | 362 | impl Default for BoxBlock { 363 | fn default() -> Self { 364 | Self::new() 365 | } 366 | } 367 | 368 | #[cfg(test)] 369 | mod tests { 370 | use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; 371 | use std::ptr::addr_of_mut; 372 | use std::thread; 373 | 374 | use super::*; 375 | 376 | #[test] 377 | fn cannot_alloc_if_empty() { 378 | box_pool!(MyBoxPool: i32); 379 | 380 | assert_eq!(Err(42), MyBoxPool.alloc(42)); 381 | } 382 | 383 | #[test] 384 | fn can_alloc_if_pool_manages_one_block() { 385 | box_pool!(MyBoxPool: i32); 386 | 387 | let block = unsafe { 388 | static mut BLOCK: BoxBlock = BoxBlock::new(); 389 | addr_of_mut!(BLOCK).as_mut().unwrap() 390 | }; 391 | MyBoxPool.manage(block); 392 | 393 | assert_eq!(42, *MyBoxPool.alloc(42).unwrap()); 394 | } 395 | 396 | #[test] 397 | fn alloc_drop_alloc() { 398 | box_pool!(MyBoxPool: i32); 399 | 400 | let block = unsafe { 401 | static mut BLOCK: BoxBlock = BoxBlock::new(); 402 | addr_of_mut!(BLOCK).as_mut().unwrap() 403 | }; 404 | MyBoxPool.manage(block); 405 | 406 | let boxed = MyBoxPool.alloc(1).unwrap(); 407 | 408 | drop(boxed); 409 | 410 | assert_eq!(2, *MyBoxPool.alloc(2).unwrap()); 411 | } 412 | 413 | #[test] 414 | fn runs_destructor_exactly_once_on_drop() { 415 | static COUNT: AtomicUsize = AtomicUsize::new(0); 416 | 417 | pub struct MyStruct; 418 | 419 | impl Drop for MyStruct { 420 | fn drop(&mut self) { 421 | COUNT.fetch_add(1, Ordering::Relaxed); 422 | } 423 | } 424 | 425 | box_pool!(MyBoxPool: MyStruct); 426 | 427 | let block = unsafe { 428 | static mut BLOCK: BoxBlock = BoxBlock::new(); 429 | addr_of_mut!(BLOCK).as_mut().unwrap() 430 | }; 431 | MyBoxPool.manage(block); 432 | 433 | let boxed = MyBoxPool.alloc(MyStruct).ok().unwrap(); 434 | 435 | assert_eq!(0, COUNT.load(Ordering::Relaxed)); 436 | 437 | drop(boxed); 438 | 439 | assert_eq!(1, COUNT.load(Ordering::Relaxed)); 440 | } 441 | 442 | #[test] 443 | fn zst_is_well_aligned() { 444 | #[repr(align(4096))] 445 | pub struct Zst4096; 446 | 447 | box_pool!(MyBoxPool: Zst4096); 448 | 449 | let block = unsafe { 450 | static mut BLOCK: BoxBlock = BoxBlock::new(); 451 | addr_of_mut!(BLOCK).as_mut().unwrap() 452 | }; 453 | MyBoxPool.manage(block); 454 | 455 | let boxed = MyBoxPool.alloc(Zst4096).ok().unwrap(); 456 | 457 | let raw = &*boxed as *const Zst4096; 458 | assert_eq!(0, raw as usize % 4096); 459 | } 460 | 461 | #[test] 462 | fn can_clone_if_pool_is_not_exhausted() { 463 | static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); 464 | 465 | pub struct MyStruct; 466 | 467 | impl Clone for MyStruct { 468 | fn clone(&self) -> Self { 469 | STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); 470 | Self 471 | } 472 | } 473 | 474 | box_pool!(MyBoxPool: MyStruct); 475 | 476 | MyBoxPool.manage(unsafe { 477 | static mut BLOCK: BoxBlock = BoxBlock::new(); 478 | addr_of_mut!(BLOCK).as_mut().unwrap() 479 | }); 480 | MyBoxPool.manage(unsafe { 481 | static mut BLOCK: BoxBlock = BoxBlock::new(); 482 | addr_of_mut!(BLOCK).as_mut().unwrap() 483 | }); 484 | 485 | let first = MyBoxPool.alloc(MyStruct).ok().unwrap(); 486 | let _second = first.clone(); 487 | 488 | assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); 489 | 490 | let is_oom = MyBoxPool.alloc(MyStruct).is_err(); 491 | assert!(is_oom); 492 | } 493 | 494 | #[test] 495 | fn clone_panics_if_pool_exhausted() { 496 | static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); 497 | 498 | pub struct MyStruct; 499 | 500 | impl Clone for MyStruct { 501 | fn clone(&self) -> Self { 502 | STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); 503 | Self 504 | } 505 | } 506 | 507 | box_pool!(MyBoxPool: MyStruct); 508 | 509 | MyBoxPool.manage(unsafe { 510 | static mut BLOCK: BoxBlock = BoxBlock::new(); 511 | addr_of_mut!(BLOCK).as_mut().unwrap() 512 | }); 513 | 514 | let first = MyBoxPool.alloc(MyStruct).ok().unwrap(); 515 | 516 | let thread = thread::spawn(move || { 517 | let _second = first.clone(); 518 | }); 519 | 520 | let thread_panicked = thread.join().is_err(); 521 | assert!(thread_panicked); 522 | 523 | // we diverge from `alloc::Box` in that we call `T::clone` first and then request 524 | // memory from the allocator whereas `alloc::Box` does it the other way around 525 | // assert!(!STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); 526 | } 527 | 528 | #[test] 529 | fn panicking_clone_does_not_leak_memory() { 530 | static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); 531 | 532 | pub struct MyStruct; 533 | 534 | impl Clone for MyStruct { 535 | fn clone(&self) -> Self { 536 | STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); 537 | panic!() 538 | } 539 | } 540 | 541 | box_pool!(MyBoxPool: MyStruct); 542 | 543 | MyBoxPool.manage(unsafe { 544 | static mut BLOCK: BoxBlock = BoxBlock::new(); 545 | addr_of_mut!(BLOCK).as_mut().unwrap() 546 | }); 547 | MyBoxPool.manage(unsafe { 548 | static mut BLOCK: BoxBlock = BoxBlock::new(); 549 | addr_of_mut!(BLOCK).as_mut().unwrap() 550 | }); 551 | 552 | let boxed = MyBoxPool.alloc(MyStruct).ok().unwrap(); 553 | 554 | let thread = thread::spawn(move || { 555 | let _boxed = boxed.clone(); 556 | }); 557 | 558 | let thread_panicked = thread.join().is_err(); 559 | assert!(thread_panicked); 560 | 561 | assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); 562 | 563 | let once = MyBoxPool.alloc(MyStruct); 564 | let twice = MyBoxPool.alloc(MyStruct); 565 | 566 | assert!(once.is_ok()); 567 | assert!(twice.is_ok()); 568 | } 569 | } 570 | -------------------------------------------------------------------------------- /src/pool/object.rs: -------------------------------------------------------------------------------- 1 | //! Object pool API 2 | //! 3 | //! # Example usage 4 | //! 5 | //! ``` 6 | //! use heapless::{object_pool, pool::object::{Object, ObjectBlock}}; 7 | //! 8 | //! object_pool!(MyObjectPool: [u8; 128]); 9 | //! 10 | //! // cannot request objects without first giving object blocks to the pool 11 | //! assert!(MyObjectPool.request().is_none()); 12 | //! 13 | //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) 14 | //! let block: &'static mut ObjectBlock<[u8; 128]> = unsafe { 15 | //! // unlike the memory pool APIs, an initial value must be specified here 16 | //! static mut BLOCK: ObjectBlock<[u8; 128]>= ObjectBlock::new([0; 128]); 17 | //! addr_of_mut!(BLOCK).as_mut().unwrap() 18 | //! }; 19 | //! 20 | //! // give object block to the pool 21 | //! MyObjectPool.manage(block); 22 | //! 23 | //! // it's now possible to request objects 24 | //! // unlike the memory pool APIs, no initial value is required here 25 | //! let mut object = MyObjectPool.request().unwrap(); 26 | //! 27 | //! // mutation is possible 28 | //! object.iter_mut().for_each(|byte| *byte = byte.wrapping_add(1)); 29 | //! 30 | //! // the number of live objects is limited to the number of blocks managed by the pool 31 | //! let res = MyObjectPool.request(); 32 | //! assert!(res.is_none()); 33 | //! 34 | //! // `object`'s destructor returns the object to the pool 35 | //! drop(object); 36 | //! 37 | //! // it's possible to request an `Object` again 38 | //! let res = MyObjectPool.request(); 39 | //! 40 | //! assert!(res.is_some()); 41 | //! ``` 42 | //! 43 | //! # Array block initialization 44 | //! 45 | //! You can create a static variable that contains an array of memory blocks and give all the blocks 46 | //! to the `ObjectPool`. This requires an intermediate `const` value as shown below: 47 | //! 48 | //! ``` 49 | //! use heapless::{object_pool, pool::object::ObjectBlock}; 50 | //! 51 | //! object_pool!(MyObjectPool: [u8; 128]); 52 | //! 53 | //! const POOL_CAPACITY: usize = 8; 54 | //! 55 | //! let blocks: &'static mut [ObjectBlock<[u8; 128]>] = { 56 | //! const BLOCK: ObjectBlock<[u8; 128]> = ObjectBlock::new([0; 128]); // <= 57 | //! static mut BLOCKS: [ObjectBlock<[u8; 128]>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY]; 58 | //! unsafe { addr_of_mut!(BLOCK).as_mut().unwrap()S } 59 | //! }; 60 | //! 61 | //! for block in blocks { 62 | //! MyObjectPool.manage(block); 63 | //! } 64 | //! ``` 65 | 66 | use core::{ 67 | cmp::Ordering, 68 | fmt, 69 | hash::{Hash, Hasher}, 70 | mem::ManuallyDrop, 71 | ops, ptr, 72 | }; 73 | 74 | use stable_deref_trait::StableDeref; 75 | 76 | use super::treiber::{AtomicPtr, NonNullPtr, Stack, StructNode}; 77 | 78 | /// Creates a new `ObjectPool` singleton with the given `$name` that manages the specified 79 | /// `$data_type` 80 | /// 81 | /// For more extensive documentation see the [module level documentation](crate::pool::object) 82 | #[macro_export] 83 | macro_rules! object_pool { 84 | ($name:ident: $data_type:ty) => { 85 | pub struct $name; 86 | 87 | impl $crate::pool::object::ObjectPool for $name { 88 | type Data = $data_type; 89 | 90 | fn singleton() -> &'static $crate::pool::object::ObjectPoolImpl<$data_type> { 91 | // Even though the static variable is not exposed to user code, it is 92 | // still useful to have a descriptive symbol name for debugging. 93 | #[allow(non_upper_case_globals)] 94 | static $name: $crate::pool::object::ObjectPoolImpl<$data_type> = 95 | $crate::pool::object::ObjectPoolImpl::new(); 96 | 97 | &$name 98 | } 99 | } 100 | 101 | impl $name { 102 | /// Inherent method version of `ObjectPool::request` 103 | #[allow(dead_code)] 104 | pub fn request(&self) -> Option<$crate::pool::object::Object<$name>> { 105 | <$name as $crate::pool::object::ObjectPool>::request() 106 | } 107 | 108 | /// Inherent method version of `ObjectPool::manage` 109 | #[allow(dead_code)] 110 | pub fn manage( 111 | &self, 112 | block: &'static mut $crate::pool::object::ObjectBlock<$data_type>, 113 | ) { 114 | <$name as $crate::pool::object::ObjectPool>::manage(block) 115 | } 116 | } 117 | }; 118 | } 119 | 120 | /// A singleton that manages `pool::object::Object`s 121 | pub trait ObjectPool: Sized { 122 | /// The data type of the objects managed by the object pool 123 | type Data: 'static; 124 | 125 | /// `object_pool!` implementation detail 126 | #[doc(hidden)] 127 | fn singleton() -> &'static ObjectPoolImpl; 128 | 129 | /// Request a new object from the pool 130 | fn request() -> Option> { 131 | Self::singleton() 132 | .request() 133 | .map(|node_ptr| Object { node_ptr }) 134 | } 135 | 136 | /// Adds a statically allocate object to the pool 137 | fn manage(block: &'static mut ObjectBlock) { 138 | Self::singleton().manage(block); 139 | } 140 | } 141 | 142 | /// `object_pool!` implementation detail 143 | #[doc(hidden)] 144 | pub struct ObjectPoolImpl { 145 | stack: Stack>, 146 | } 147 | 148 | impl ObjectPoolImpl { 149 | /// `object_pool!` implementation detail 150 | #[doc(hidden)] 151 | pub const fn new() -> Self { 152 | Self { 153 | stack: Stack::new(), 154 | } 155 | } 156 | 157 | fn request(&self) -> Option>> { 158 | self.stack.try_pop() 159 | } 160 | 161 | fn manage(&self, block: &'static mut ObjectBlock) { 162 | let node: &'static mut _ = &mut block.node; 163 | 164 | unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) } 165 | } 166 | } 167 | 168 | // `T needs` to be Send because returning an object from a thread and then 169 | // requesting it from another is effectively a cross-thread 'send' operation 170 | unsafe impl Sync for ObjectPoolImpl where T: Send {} 171 | 172 | /// An object managed by object pool `P` 173 | pub struct Object

174 | where 175 | P: ObjectPool, 176 | { 177 | node_ptr: NonNullPtr>, 178 | } 179 | 180 | impl AsMut<[T]> for Object 181 | where 182 | A: ObjectPool, 183 | { 184 | fn as_mut(&mut self) -> &mut [T] { 185 | &mut **self 186 | } 187 | } 188 | 189 | impl AsRef<[T]> for Object 190 | where 191 | A: ObjectPool, 192 | { 193 | fn as_ref(&self) -> &[T] { 194 | &**self 195 | } 196 | } 197 | 198 | impl fmt::Debug for Object 199 | where 200 | A: ObjectPool, 201 | A::Data: fmt::Debug, 202 | { 203 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 204 | A::Data::fmt(self, f) 205 | } 206 | } 207 | 208 | impl ops::Deref for Object 209 | where 210 | A: ObjectPool, 211 | { 212 | type Target = A::Data; 213 | 214 | fn deref(&self) -> &Self::Target { 215 | unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr()).data) } 216 | } 217 | } 218 | 219 | impl ops::DerefMut for Object 220 | where 221 | A: ObjectPool, 222 | { 223 | fn deref_mut(&mut self) -> &mut Self::Target { 224 | unsafe { &mut *ptr::addr_of_mut!((*self.node_ptr.as_ptr()).data) } 225 | } 226 | } 227 | 228 | unsafe impl StableDeref for Object where A: ObjectPool {} 229 | 230 | impl fmt::Display for Object 231 | where 232 | A: ObjectPool, 233 | A::Data: fmt::Display, 234 | { 235 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 236 | A::Data::fmt(self, f) 237 | } 238 | } 239 | 240 | impl

Drop for Object

241 | where 242 | P: ObjectPool, 243 | { 244 | fn drop(&mut self) { 245 | unsafe { P::singleton().stack.push(self.node_ptr) } 246 | } 247 | } 248 | 249 | impl Eq for Object 250 | where 251 | A: ObjectPool, 252 | A::Data: Eq, 253 | { 254 | } 255 | 256 | impl Hash for Object 257 | where 258 | A: ObjectPool, 259 | A::Data: Hash, 260 | { 261 | fn hash(&self, state: &mut H) 262 | where 263 | H: Hasher, 264 | { 265 | (**self).hash(state); 266 | } 267 | } 268 | 269 | impl Ord for Object 270 | where 271 | A: ObjectPool, 272 | A::Data: Ord, 273 | { 274 | fn cmp(&self, other: &Self) -> Ordering { 275 | A::Data::cmp(self, other) 276 | } 277 | } 278 | 279 | impl PartialEq> for Object 280 | where 281 | A: ObjectPool, 282 | B: ObjectPool, 283 | A::Data: PartialEq, 284 | { 285 | fn eq(&self, other: &Object) -> bool { 286 | A::Data::eq(self, other) 287 | } 288 | } 289 | 290 | impl PartialOrd> for Object 291 | where 292 | A: ObjectPool, 293 | B: ObjectPool, 294 | A::Data: PartialOrd, 295 | { 296 | fn partial_cmp(&self, other: &Object) -> Option { 297 | A::Data::partial_cmp(self, other) 298 | } 299 | } 300 | 301 | unsafe impl

Send for Object

302 | where 303 | P: ObjectPool, 304 | P::Data: Send, 305 | { 306 | } 307 | 308 | unsafe impl

Sync for Object

309 | where 310 | P: ObjectPool, 311 | P::Data: Sync, 312 | { 313 | } 314 | 315 | /// An object "block" of data type `T` that has not yet been associated to an `ObjectPool` 316 | pub struct ObjectBlock { 317 | node: StructNode, 318 | } 319 | 320 | impl ObjectBlock { 321 | /// Creates a new object block with the given `initial_value` 322 | pub const fn new(initial_value: T) -> Self { 323 | Self { 324 | node: StructNode { 325 | next: ManuallyDrop::new(AtomicPtr::null()), 326 | data: ManuallyDrop::new(initial_value), 327 | }, 328 | } 329 | } 330 | } 331 | 332 | #[cfg(test)] 333 | mod tests { 334 | use core::sync::atomic::{self, AtomicUsize}; 335 | use std::ptr::addr_of_mut; 336 | 337 | use super::*; 338 | 339 | #[test] 340 | fn cannot_request_if_empty() { 341 | object_pool!(MyObjectPool: i32); 342 | 343 | assert_eq!(None, MyObjectPool.request()); 344 | } 345 | 346 | #[test] 347 | fn can_request_if_manages_one_block() { 348 | object_pool!(MyObjectPool: i32); 349 | 350 | let block = unsafe { 351 | static mut BLOCK: ObjectBlock = ObjectBlock::new(1); 352 | addr_of_mut!(BLOCK).as_mut().unwrap() 353 | }; 354 | MyObjectPool.manage(block); 355 | 356 | assert_eq!(1, *MyObjectPool.request().unwrap()); 357 | } 358 | 359 | #[test] 360 | fn request_drop_request() { 361 | object_pool!(MyObjectPool: i32); 362 | 363 | let block = unsafe { 364 | static mut BLOCK: ObjectBlock = ObjectBlock::new(1); 365 | addr_of_mut!(BLOCK).as_mut().unwrap() 366 | }; 367 | MyObjectPool.manage(block); 368 | 369 | let mut object = MyObjectPool.request().unwrap(); 370 | 371 | *object = 2; 372 | drop(object); 373 | 374 | assert_eq!(2, *MyObjectPool.request().unwrap()); 375 | } 376 | 377 | #[test] 378 | fn destructor_does_not_run_on_drop() { 379 | static COUNT: AtomicUsize = AtomicUsize::new(0); 380 | 381 | pub struct MyStruct; 382 | 383 | impl Drop for MyStruct { 384 | fn drop(&mut self) { 385 | COUNT.fetch_add(1, atomic::Ordering::Relaxed); 386 | } 387 | } 388 | 389 | object_pool!(MyObjectPool: MyStruct); 390 | 391 | let block = unsafe { 392 | static mut BLOCK: ObjectBlock = ObjectBlock::new(MyStruct); 393 | addr_of_mut!(BLOCK).as_mut().unwrap() 394 | }; 395 | MyObjectPool.manage(block); 396 | 397 | let object = MyObjectPool.request().unwrap(); 398 | 399 | assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed)); 400 | 401 | drop(object); 402 | 403 | assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed)); 404 | } 405 | 406 | #[test] 407 | fn zst_is_well_aligned() { 408 | #[repr(align(4096))] 409 | pub struct Zst4096; 410 | 411 | object_pool!(MyObjectPool: Zst4096); 412 | 413 | let block = unsafe { 414 | static mut BLOCK: ObjectBlock = ObjectBlock::new(Zst4096); 415 | addr_of_mut!(BLOCK).as_mut().unwrap() 416 | }; 417 | MyObjectPool.manage(block); 418 | 419 | let object = MyObjectPool.request().unwrap(); 420 | 421 | let raw = &*object as *const Zst4096; 422 | assert_eq!(0, raw as usize % 4096); 423 | } 424 | } 425 | -------------------------------------------------------------------------------- /src/pool/treiber.rs: -------------------------------------------------------------------------------- 1 | use core::mem::ManuallyDrop; 2 | 3 | #[cfg_attr(not(arm_llsc), path = "treiber/cas.rs")] 4 | #[cfg_attr(arm_llsc, path = "treiber/llsc.rs")] 5 | mod impl_; 6 | 7 | pub use impl_::{AtomicPtr, NonNullPtr}; 8 | 9 | pub struct Stack 10 | where 11 | N: Node, 12 | { 13 | top: AtomicPtr, 14 | } 15 | 16 | impl Stack 17 | where 18 | N: Node, 19 | { 20 | pub const fn new() -> Self { 21 | Self { 22 | top: AtomicPtr::null(), 23 | } 24 | } 25 | 26 | /// # Safety 27 | /// - `node` must be a valid pointer 28 | /// - aliasing rules must be enforced by the caller. e.g, the same `node` may not be pushed more than once 29 | pub unsafe fn push(&self, node: NonNullPtr) { 30 | impl_::push(self, node); 31 | } 32 | 33 | pub fn try_pop(&self) -> Option> { 34 | impl_::try_pop(self) 35 | } 36 | } 37 | 38 | pub trait Node: Sized { 39 | type Data; 40 | 41 | fn next(&self) -> &AtomicPtr; 42 | 43 | #[allow(dead_code)] // used conditionally 44 | fn next_mut(&mut self) -> &mut AtomicPtr; 45 | } 46 | 47 | pub union UnionNode { 48 | next: ManuallyDrop>>, 49 | pub data: ManuallyDrop, 50 | } 51 | 52 | impl Node for UnionNode { 53 | type Data = T; 54 | 55 | fn next(&self) -> &AtomicPtr { 56 | unsafe { &self.next } 57 | } 58 | 59 | fn next_mut(&mut self) -> &mut AtomicPtr { 60 | unsafe { &mut self.next } 61 | } 62 | } 63 | 64 | pub struct StructNode { 65 | pub next: ManuallyDrop>>, 66 | pub data: ManuallyDrop, 67 | } 68 | 69 | impl Node for StructNode { 70 | type Data = T; 71 | 72 | fn next(&self) -> &AtomicPtr { 73 | &self.next 74 | } 75 | 76 | fn next_mut(&mut self) -> &mut AtomicPtr { 77 | &mut self.next 78 | } 79 | } 80 | 81 | #[cfg(test)] 82 | mod tests { 83 | use core::mem; 84 | 85 | use super::*; 86 | 87 | #[test] 88 | fn node_is_never_zero_sized() { 89 | struct Zst; 90 | 91 | assert_ne!(mem::size_of::>(), 0); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/pool/treiber/cas.rs: -------------------------------------------------------------------------------- 1 | use core::{marker::PhantomData, ptr::NonNull}; 2 | 3 | #[cfg(not(feature = "portable-atomic"))] 4 | use core::sync::atomic; 5 | #[cfg(feature = "portable-atomic")] 6 | use portable_atomic as atomic; 7 | 8 | use atomic::Ordering; 9 | 10 | use super::{Node, Stack}; 11 | 12 | #[cfg(target_pointer_width = "32")] 13 | mod types { 14 | use super::atomic; 15 | 16 | pub type Inner = u64; 17 | pub type InnerAtomic = atomic::AtomicU64; 18 | pub type InnerNonZero = core::num::NonZeroU64; 19 | 20 | pub type Tag = core::num::NonZeroU32; 21 | pub type Address = u32; 22 | } 23 | 24 | #[cfg(target_pointer_width = "64")] 25 | mod types { 26 | use super::atomic; 27 | 28 | pub type Inner = u128; 29 | pub type InnerAtomic = atomic::AtomicU128; 30 | pub type InnerNonZero = core::num::NonZeroU128; 31 | 32 | pub type Tag = core::num::NonZeroU64; 33 | pub type Address = u64; 34 | } 35 | 36 | use types::*; 37 | 38 | pub struct AtomicPtr 39 | where 40 | N: Node, 41 | { 42 | inner: InnerAtomic, 43 | _marker: PhantomData<*mut N>, 44 | } 45 | 46 | impl AtomicPtr 47 | where 48 | N: Node, 49 | { 50 | #[inline] 51 | pub const fn null() -> Self { 52 | Self { 53 | inner: InnerAtomic::new(0), 54 | _marker: PhantomData, 55 | } 56 | } 57 | 58 | fn compare_and_exchange_weak( 59 | &self, 60 | current: Option>, 61 | new: Option>, 62 | success: Ordering, 63 | failure: Ordering, 64 | ) -> Result<(), Option>> { 65 | self.inner 66 | .compare_exchange_weak( 67 | current.map(NonNullPtr::into_inner).unwrap_or_default(), 68 | new.map(NonNullPtr::into_inner).unwrap_or_default(), 69 | success, 70 | failure, 71 | ) 72 | .map(drop) 73 | .map_err(|value| { 74 | // SAFETY: `value` cam from a `NonNullPtr::into_inner` call. 75 | unsafe { NonNullPtr::from_inner(value) } 76 | }) 77 | } 78 | 79 | #[inline] 80 | fn load(&self, order: Ordering) -> Option> { 81 | Some(NonNullPtr { 82 | inner: InnerNonZero::new(self.inner.load(order))?, 83 | _marker: PhantomData, 84 | }) 85 | } 86 | 87 | #[inline] 88 | fn store(&self, value: Option>, order: Ordering) { 89 | self.inner 90 | .store(value.map(NonNullPtr::into_inner).unwrap_or_default(), order); 91 | } 92 | } 93 | 94 | pub struct NonNullPtr 95 | where 96 | N: Node, 97 | { 98 | inner: InnerNonZero, 99 | _marker: PhantomData<*mut N>, 100 | } 101 | 102 | impl Clone for NonNullPtr 103 | where 104 | N: Node, 105 | { 106 | fn clone(&self) -> Self { 107 | *self 108 | } 109 | } 110 | 111 | impl Copy for NonNullPtr where N: Node {} 112 | 113 | impl NonNullPtr 114 | where 115 | N: Node, 116 | { 117 | #[inline] 118 | pub fn as_ptr(&self) -> *mut N { 119 | self.inner.get() as *mut N 120 | } 121 | 122 | #[inline] 123 | pub fn from_static_mut_ref(reference: &'static mut N) -> Self { 124 | // SAFETY: `reference` is a static mutable reference, i.e. a valid pointer. 125 | unsafe { Self::new_unchecked(initial_tag(), NonNull::from(reference)) } 126 | } 127 | 128 | /// # Safety 129 | /// 130 | /// - `ptr` must be a valid pointer. 131 | #[inline] 132 | unsafe fn new_unchecked(tag: Tag, ptr: NonNull) -> Self { 133 | let value = 134 | (Inner::from(tag.get()) << Address::BITS) | Inner::from(ptr.as_ptr() as Address); 135 | 136 | Self { 137 | // SAFETY: `value` is constructed from a `Tag` which is non-zero and half the 138 | // size of the `InnerNonZero` type, and a `NonNull` pointer. 139 | inner: unsafe { InnerNonZero::new_unchecked(value) }, 140 | _marker: PhantomData, 141 | } 142 | } 143 | 144 | /// # Safety 145 | /// 146 | /// - `value` must come from a `Self::into_inner` call. 147 | #[inline] 148 | unsafe fn from_inner(value: Inner) -> Option { 149 | Some(Self { 150 | inner: InnerNonZero::new(value)?, 151 | _marker: PhantomData, 152 | }) 153 | } 154 | 155 | #[inline] 156 | fn non_null(&self) -> NonNull { 157 | // SAFETY: `Self` can only be constructed using a `NonNull`. 158 | unsafe { NonNull::new_unchecked(self.as_ptr()) } 159 | } 160 | 161 | #[inline] 162 | fn into_inner(self) -> Inner { 163 | self.inner.get() 164 | } 165 | 166 | #[inline] 167 | fn tag(&self) -> Tag { 168 | // SAFETY: `self.inner` was constructed from a non-zero `Tag`. 169 | unsafe { Tag::new_unchecked((self.inner.get() >> Address::BITS) as Address) } 170 | } 171 | 172 | fn increment_tag(&mut self) { 173 | let new_tag = self.tag().checked_add(1).unwrap_or_else(initial_tag); 174 | 175 | // SAFETY: `self.non_null()` is a valid pointer. 176 | *self = unsafe { Self::new_unchecked(new_tag, self.non_null()) }; 177 | } 178 | } 179 | 180 | #[inline] 181 | const fn initial_tag() -> Tag { 182 | Tag::MIN 183 | } 184 | 185 | pub unsafe fn push(stack: &Stack, new_top: NonNullPtr) 186 | where 187 | N: Node, 188 | { 189 | let mut top = stack.top.load(Ordering::Relaxed); 190 | 191 | loop { 192 | new_top 193 | .non_null() 194 | .as_ref() 195 | .next() 196 | .store(top, Ordering::Relaxed); 197 | 198 | if let Err(p) = stack.top.compare_and_exchange_weak( 199 | top, 200 | Some(new_top), 201 | Ordering::Release, 202 | Ordering::Relaxed, 203 | ) { 204 | top = p; 205 | } else { 206 | return; 207 | } 208 | } 209 | } 210 | 211 | pub fn try_pop(stack: &Stack) -> Option> 212 | where 213 | N: Node, 214 | { 215 | loop { 216 | if let Some(mut top) = stack.top.load(Ordering::Acquire) { 217 | let next = unsafe { top.non_null().as_ref().next().load(Ordering::Relaxed) }; 218 | 219 | if stack 220 | .top 221 | .compare_and_exchange_weak(Some(top), next, Ordering::Release, Ordering::Relaxed) 222 | .is_ok() 223 | { 224 | // Prevent the ABA problem (https://en.wikipedia.org/wiki/Treiber_stack#Correctness). 225 | // 226 | // Without this, the following would be possible: 227 | // 228 | // | Thread 1 | Thread 2 | Stack | 229 | // |-------------------------------|-------------------------|------------------------------| 230 | // | push((1, 1)) | | (1, 1) | 231 | // | push((1, 2)) | | (1, 2) -> (1, 1) | 232 | // | p = try_pop()::load // (1, 2) | | (1, 2) -> (1, 1) | 233 | // | | p = try_pop() // (1, 2) | (1, 1) | 234 | // | | push((1, 3)) | (1, 3) -> (1, 1) | 235 | // | | push(p) | (1, 2) -> (1, 3) -> (1, 1) | 236 | // | try_pop()::cas(p, p.next) | | (1, 1) | 237 | // 238 | // As can be seen, the `cas` operation succeeds, wrongly removing pointer `3` from the stack. 239 | // 240 | // By incrementing the tag before returning the pointer, it cannot be pushed again with the, 241 | // same tag, preventing the `try_pop()::cas(p, p.next)` operation from succeeding. 242 | // 243 | // With this fix, `try_pop()` in thread 2 returns `(2, 2)` and the comparison between 244 | // `(1, 2)` and `(2, 2)` fails, restarting the loop and correctly removing the new top: 245 | // 246 | // | Thread 1 | Thread 2 | Stack | 247 | // |-------------------------------|-------------------------|------------------------------| 248 | // | push((1, 1)) | | (1, 1) | 249 | // | push((1, 2)) | | (1, 2) -> (1, 1) | 250 | // | p = try_pop()::load // (1, 2) | | (1, 2) -> (1, 1) | 251 | // | | p = try_pop() // (2, 2) | (1, 1) | 252 | // | | push((1, 3)) | (1, 3) -> (1, 1) | 253 | // | | push(p) | (2, 2) -> (1, 3) -> (1, 1) | 254 | // | try_pop()::cas(p, p.next) | | (2, 2) -> (1, 3) -> (1, 1) | 255 | // | p = try_pop()::load // (2, 2) | | (2, 2) -> (1, 3) -> (1, 1) | 256 | // | try_pop()::cas(p, p.next) | | (1, 3) -> (1, 1) | 257 | top.increment_tag(); 258 | 259 | return Some(top); 260 | } 261 | } else { 262 | // stack observed as empty 263 | return None; 264 | } 265 | } 266 | } 267 | -------------------------------------------------------------------------------- /src/pool/treiber/llsc.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | cell::UnsafeCell, 3 | ptr::{self, NonNull}, 4 | }; 5 | 6 | use super::{Node, Stack}; 7 | 8 | pub struct AtomicPtr 9 | where 10 | N: Node, 11 | { 12 | inner: UnsafeCell>>, 13 | } 14 | 15 | impl AtomicPtr 16 | where 17 | N: Node, 18 | { 19 | #[inline] 20 | pub const fn null() -> Self { 21 | Self { 22 | inner: UnsafeCell::new(None), 23 | } 24 | } 25 | } 26 | 27 | pub struct NonNullPtr 28 | where 29 | N: Node, 30 | { 31 | inner: NonNull, 32 | } 33 | 34 | impl NonNullPtr 35 | where 36 | N: Node, 37 | { 38 | #[inline] 39 | pub fn as_ptr(&self) -> *mut N { 40 | self.inner.as_ptr().cast() 41 | } 42 | 43 | #[inline] 44 | pub fn from_static_mut_ref(ref_: &'static mut N) -> Self { 45 | Self { 46 | inner: NonNull::from(ref_), 47 | } 48 | } 49 | } 50 | 51 | impl Clone for NonNullPtr 52 | where 53 | N: Node, 54 | { 55 | fn clone(&self) -> Self { 56 | Self { inner: self.inner } 57 | } 58 | } 59 | 60 | impl Copy for NonNullPtr where N: Node {} 61 | 62 | pub unsafe fn push(stack: &Stack, mut node: NonNullPtr) 63 | where 64 | N: Node, 65 | { 66 | let top_addr = ptr::addr_of!(stack.top) as *mut usize; 67 | 68 | loop { 69 | let top = arch::load_link(top_addr); 70 | 71 | node.inner 72 | .as_mut() 73 | .next_mut() 74 | .inner 75 | .get() 76 | .write(NonNull::new(top as *mut _)); 77 | 78 | if arch::store_conditional(node.inner.as_ptr() as usize, top_addr).is_ok() { 79 | break; 80 | } 81 | } 82 | } 83 | 84 | pub fn try_pop(stack: &Stack) -> Option> 85 | where 86 | N: Node, 87 | { 88 | unsafe { 89 | let top_addr = ptr::addr_of!(stack.top) as *mut usize; 90 | 91 | loop { 92 | let top = arch::load_link(top_addr); 93 | 94 | if let Some(top) = NonNull::new(top as *mut N) { 95 | let next = &top.as_ref().next(); 96 | 97 | if arch::store_conditional( 98 | next.inner 99 | .get() 100 | .read() 101 | .map(|non_null| non_null.as_ptr() as usize) 102 | .unwrap_or_default(), 103 | top_addr, 104 | ) 105 | .is_ok() 106 | { 107 | break Some(NonNullPtr { inner: top }); 108 | } 109 | } else { 110 | arch::clear_load_link(); 111 | 112 | break None; 113 | } 114 | } 115 | } 116 | } 117 | 118 | #[cfg(arm_llsc)] 119 | mod arch { 120 | use core::arch::asm; 121 | 122 | #[inline(always)] 123 | pub fn clear_load_link() { 124 | unsafe { asm!("clrex", options(nomem, nostack)) } 125 | } 126 | 127 | /// # Safety 128 | /// 129 | /// - `addr` must be a valid pointer. 130 | #[inline(always)] 131 | pub unsafe fn load_link(addr: *const usize) -> usize { 132 | let value; 133 | asm!("ldrex {}, [{}]", out(reg) value, in(reg) addr, options(nostack)); 134 | value 135 | } 136 | 137 | /// # Safety 138 | /// 139 | /// - `addr` must be a valid pointer. 140 | #[inline(always)] 141 | pub unsafe fn store_conditional(value: usize, addr: *mut usize) -> Result<(), ()> { 142 | let outcome: usize; 143 | asm!("strex {}, {}, [{}]", out(reg) outcome, in(reg) value, in(reg) addr, options(nostack)); 144 | if outcome == 0 { 145 | Ok(()) 146 | } else { 147 | Err(()) 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/ser.rs: -------------------------------------------------------------------------------- 1 | use core::hash::{BuildHasher, Hash}; 2 | 3 | use crate::{ 4 | binary_heap::{BinaryHeapInner, Kind as BinaryHeapKind}, 5 | deque::DequeInner, 6 | history_buf::{HistoryBufInner, HistoryBufStorage}, 7 | len_type::LenType, 8 | linear_map::{LinearMapInner, LinearMapStorage}, 9 | string::{StringInner, StringStorage}, 10 | vec::{VecInner, VecStorage}, 11 | IndexMap, IndexSet, 12 | }; 13 | use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; 14 | 15 | // Sequential containers 16 | 17 | impl Serialize for BinaryHeapInner 18 | where 19 | T: Ord + Serialize, 20 | KIND: BinaryHeapKind, 21 | S: VecStorage + ?Sized, 22 | { 23 | fn serialize(&self, serializer: SER) -> Result 24 | where 25 | SER: Serializer, 26 | { 27 | let mut seq = serializer.serialize_seq(Some(self.len()))?; 28 | for element in self { 29 | seq.serialize_element(element)?; 30 | } 31 | seq.end() 32 | } 33 | } 34 | 35 | impl Serialize for IndexSet 36 | where 37 | T: Eq + Hash + Serialize, 38 | S: BuildHasher, 39 | { 40 | fn serialize(&self, serializer: SER) -> Result 41 | where 42 | SER: Serializer, 43 | { 44 | let mut seq = serializer.serialize_seq(Some(self.len()))?; 45 | for element in self { 46 | seq.serialize_element(element)?; 47 | } 48 | seq.end() 49 | } 50 | } 51 | 52 | impl> Serialize for VecInner 53 | where 54 | T: Serialize, 55 | { 56 | fn serialize(&self, serializer: S) -> Result 57 | where 58 | S: Serializer, 59 | { 60 | let mut seq = serializer.serialize_seq(Some(self.len()))?; 61 | for element in self { 62 | seq.serialize_element(element)?; 63 | } 64 | seq.end() 65 | } 66 | } 67 | 68 | impl + ?Sized> Serialize for DequeInner 69 | where 70 | T: Serialize, 71 | { 72 | fn serialize(&self, serializer: SER) -> Result 73 | where 74 | SER: Serializer, 75 | { 76 | let mut seq = serializer.serialize_seq(Some(self.storage_len()))?; 77 | for element in self { 78 | seq.serialize_element(element)?; 79 | } 80 | seq.end() 81 | } 82 | } 83 | 84 | impl + ?Sized> Serialize for HistoryBufInner 85 | where 86 | T: Serialize, 87 | { 88 | fn serialize(&self, serializer: SER) -> Result 89 | where 90 | SER: Serializer, 91 | { 92 | let mut seq = serializer.serialize_seq(Some(self.len()))?; 93 | for element in self.oldest_ordered() { 94 | seq.serialize_element(element)?; 95 | } 96 | seq.end() 97 | } 98 | } 99 | 100 | // Dictionaries 101 | 102 | impl Serialize for IndexMap 103 | where 104 | K: Eq + Hash + Serialize, 105 | S: BuildHasher, 106 | V: Serialize, 107 | { 108 | fn serialize(&self, serializer: SER) -> Result 109 | where 110 | SER: Serializer, 111 | { 112 | let mut map = serializer.serialize_map(Some(self.len()))?; 113 | for (k, v) in self { 114 | map.serialize_entry(k, v)?; 115 | } 116 | map.end() 117 | } 118 | } 119 | 120 | impl + ?Sized> Serialize for LinearMapInner 121 | where 122 | K: Eq + Serialize, 123 | V: Serialize, 124 | { 125 | fn serialize(&self, serializer: SER) -> Result 126 | where 127 | SER: Serializer, 128 | { 129 | let mut map = serializer.serialize_map(Some(self.len()))?; 130 | for (k, v) in self { 131 | map.serialize_entry(k, v)?; 132 | } 133 | map.end() 134 | } 135 | } 136 | 137 | // String containers 138 | 139 | impl Serialize for StringInner { 140 | fn serialize(&self, serializer: SER) -> Result 141 | where 142 | SER: Serializer, 143 | { 144 | serializer.serialize_str(self) 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /src/slice.rs: -------------------------------------------------------------------------------- 1 | use core::ops; 2 | 3 | // FIXME: Remove when `slice_range` feature is stable. 4 | #[track_caller] 5 | #[must_use] 6 | pub fn range(range: R, bounds: ops::RangeTo) -> ops::Range 7 | where 8 | R: ops::RangeBounds, 9 | { 10 | let len = bounds.end; 11 | 12 | let start: ops::Bound<&usize> = range.start_bound(); 13 | let start = match start { 14 | ops::Bound::Included(&start) => start, 15 | ops::Bound::Excluded(start) => start 16 | .checked_add(1) 17 | .unwrap_or_else(|| panic!("attempted to index slice from after maximum usize")), 18 | ops::Bound::Unbounded => 0, 19 | }; 20 | 21 | let end: ops::Bound<&usize> = range.end_bound(); 22 | let end = match end { 23 | ops::Bound::Included(end) => end 24 | .checked_add(1) 25 | .unwrap_or_else(|| panic!("attempted to index slice up to maximum usize")), 26 | ops::Bound::Excluded(&end) => end, 27 | ops::Bound::Unbounded => len, 28 | }; 29 | 30 | if start > end { 31 | panic!("slice index starts at {start} but ends at {end}"); 32 | } 33 | if end > len { 34 | panic!("range end index {end} out of range for slice of length {len}"); 35 | } 36 | 37 | ops::Range { start, end } 38 | } 39 | -------------------------------------------------------------------------------- /src/storage.rs: -------------------------------------------------------------------------------- 1 | //! `Storage` trait defining how data is stored in a container. 2 | 3 | use core::borrow::{Borrow, BorrowMut}; 4 | 5 | #[cfg(any( 6 | feature = "portable-atomic", 7 | target_has_atomic = "ptr", 8 | has_atomic_load_store 9 | ))] 10 | use crate::spsc; 11 | 12 | #[cfg(any( 13 | feature = "portable-atomic", 14 | all(feature = "mpmc_large", target_has_atomic = "ptr"), 15 | all(not(feature = "mpmc_large"), target_has_atomic = "8") 16 | ))] 17 | use crate::mpmc; 18 | 19 | pub(crate) trait SealedStorage { 20 | type Buffer: ?Sized + Borrow<[T]> + BorrowMut<[T]>; 21 | /// Obtain the length of the buffer 22 | #[allow(unused)] 23 | fn len(this: *const Self::Buffer) -> usize; 24 | /// Obtain access to the first element of the buffer 25 | #[allow(unused)] 26 | fn as_ptr(this: *mut Self::Buffer) -> *mut T; 27 | 28 | #[cfg(any( 29 | feature = "portable-atomic", 30 | all(feature = "mpmc_large", target_has_atomic = "ptr"), 31 | all(not(feature = "mpmc_large"), target_has_atomic = "8") 32 | ))] 33 | fn as_mpmc_view(this: &mpmc::QueueInner) -> &mpmc::QueueView 34 | where 35 | Self: Storage + Sized; 36 | #[cfg(any( 37 | feature = "portable-atomic", 38 | all(feature = "mpmc_large", target_has_atomic = "ptr"), 39 | all(not(feature = "mpmc_large"), target_has_atomic = "8") 40 | ))] 41 | fn as_mpmc_mut_view(this: &mut mpmc::QueueInner) -> &mut mpmc::QueueView 42 | where 43 | Self: Storage + Sized; 44 | 45 | #[cfg(any( 46 | feature = "portable-atomic", 47 | target_has_atomic = "ptr", 48 | has_atomic_load_store 49 | ))] 50 | /// Convert a `Queue` to a `QueueView` 51 | fn as_queue_view(this: &spsc::QueueInner) -> &spsc::QueueView 52 | where 53 | Self: Storage + Sized; 54 | #[cfg(any( 55 | feature = "portable-atomic", 56 | target_has_atomic = "ptr", 57 | has_atomic_load_store 58 | ))] 59 | /// Convert a `Queue` to a `QueueView` 60 | fn as_mut_queue_view(this: &mut spsc::QueueInner) -> &mut spsc::QueueView 61 | where 62 | Self: Storage + Sized; 63 | } 64 | 65 | /// Trait defining how data for a container is stored. 66 | /// 67 | /// There's two implementations available: 68 | /// 69 | /// - [`OwnedStorage`]: stores the data in an array `[T; N]` whose size is known at compile time. 70 | /// - [`ViewStorage`]: stores the data in an unsized `[T]`. 71 | /// 72 | /// This allows containers to be generic over either sized or unsized storage. For example, 73 | /// the [`vec`](crate::vec) module contains a [`VecInner`](crate::vec::VecInner) struct 74 | /// that's generic on [`Storage`], and two type aliases for convenience: 75 | /// 76 | /// - [`Vec`](crate::vec::Vec) = `VecInner>` 77 | /// - [`VecView`](crate::vec::VecView) = `VecInner` 78 | /// 79 | /// `Vec` can be unsized into `VecView`, either by unsizing coercions such as `&mut Vec -> &mut VecView` or 80 | /// `Box -> Box`, or explicitly with [`.as_view()`](crate::vec::Vec::as_view) or [`.as_mut_view()`](crate::vec::Vec::as_mut_view). 81 | /// 82 | /// This trait is sealed, so you cannot implement it for your own types. You can only use 83 | /// the implementations provided by this crate. 84 | #[allow(private_bounds)] 85 | pub trait Storage: SealedStorage {} 86 | 87 | /// Implementation of [`Storage`] that stores the data in an array `[T; N]` whose size is known at compile time. 88 | pub enum OwnedStorage {} 89 | impl Storage for OwnedStorage {} 90 | impl SealedStorage for OwnedStorage { 91 | type Buffer = [T; N]; 92 | fn len(_: *const Self::Buffer) -> usize { 93 | N 94 | } 95 | fn as_ptr(this: *mut Self::Buffer) -> *mut T { 96 | this.cast() 97 | } 98 | #[cfg(any( 99 | feature = "portable-atomic", 100 | all(feature = "mpmc_large", target_has_atomic = "ptr"), 101 | all(not(feature = "mpmc_large"), target_has_atomic = "8") 102 | ))] 103 | fn as_mpmc_view(this: &mpmc::Queue) -> &mpmc::QueueView 104 | where 105 | Self: Storage + Sized, 106 | { 107 | // Fails to compile without the indirection 108 | this.as_view_private() 109 | } 110 | #[cfg(any( 111 | feature = "portable-atomic", 112 | all(feature = "mpmc_large", target_has_atomic = "ptr"), 113 | all(not(feature = "mpmc_large"), target_has_atomic = "8") 114 | ))] 115 | fn as_mpmc_mut_view(this: &mut mpmc::Queue) -> &mut mpmc::QueueView 116 | where 117 | Self: Storage + Sized, 118 | { 119 | // Fails to compile without the indirection 120 | this.as_view_mut_private() 121 | } 122 | #[cfg(any( 123 | feature = "portable-atomic", 124 | target_has_atomic = "ptr", 125 | has_atomic_load_store 126 | ))] 127 | /// Convert a `Queue` to a `QueueView` 128 | fn as_queue_view(this: &spsc::QueueInner) -> &spsc::QueueView 129 | where 130 | Self: Storage + Sized, 131 | { 132 | // Fails to compile without the indirection 133 | this.as_view_private() 134 | } 135 | #[cfg(any( 136 | feature = "portable-atomic", 137 | target_has_atomic = "ptr", 138 | has_atomic_load_store 139 | ))] 140 | /// Convert a `Queue` to a `QueueView` 141 | fn as_mut_queue_view(this: &mut spsc::QueueInner) -> &mut spsc::QueueView 142 | where 143 | Self: Storage + Sized, 144 | { 145 | // Fails to compile without the indirection 146 | this.as_mut_view_private() 147 | } 148 | } 149 | 150 | /// Implementation of [`Storage`] that stores the data in an unsized `[T]`. 151 | pub enum ViewStorage {} 152 | impl Storage for ViewStorage {} 153 | impl SealedStorage for ViewStorage { 154 | type Buffer = [T]; 155 | fn len(this: *const Self::Buffer) -> usize { 156 | this.len() 157 | } 158 | 159 | fn as_ptr(this: *mut Self::Buffer) -> *mut T { 160 | this.cast() 161 | } 162 | 163 | #[cfg(any( 164 | feature = "portable-atomic", 165 | all(feature = "mpmc_large", target_has_atomic = "ptr"), 166 | all(not(feature = "mpmc_large"), target_has_atomic = "8") 167 | ))] 168 | fn as_mpmc_view(this: &mpmc::QueueInner) -> &mpmc::QueueView 169 | where 170 | Self: Storage + Sized, 171 | { 172 | this 173 | } 174 | 175 | #[cfg(any( 176 | feature = "portable-atomic", 177 | all(feature = "mpmc_large", target_has_atomic = "ptr"), 178 | all(not(feature = "mpmc_large"), target_has_atomic = "8") 179 | ))] 180 | fn as_mpmc_mut_view(this: &mut mpmc::QueueInner) -> &mut mpmc::QueueView 181 | where 182 | Self: Storage + Sized, 183 | { 184 | this 185 | } 186 | 187 | #[cfg(any( 188 | feature = "portable-atomic", 189 | target_has_atomic = "ptr", 190 | has_atomic_load_store 191 | ))] 192 | /// Convert a `Queue` to a `QueueView` 193 | fn as_queue_view(this: &spsc::QueueInner) -> &spsc::QueueView 194 | where 195 | Self: Storage + Sized, 196 | { 197 | this 198 | } 199 | #[cfg(any( 200 | feature = "portable-atomic", 201 | target_has_atomic = "ptr", 202 | has_atomic_load_store 203 | ))] 204 | /// Convert a `Queue` to a `QueueView` 205 | fn as_mut_queue_view(this: &mut spsc::QueueInner) -> &mut spsc::QueueView 206 | where 207 | Self: Storage + Sized, 208 | { 209 | this 210 | } 211 | } 212 | -------------------------------------------------------------------------------- /src/string/drain.rs: -------------------------------------------------------------------------------- 1 | use core::{fmt, iter::FusedIterator, str::Chars}; 2 | 3 | use crate::LenType; 4 | 5 | use super::StringView; 6 | 7 | /// A draining iterator for `String`. 8 | /// 9 | /// This struct is created by the [`drain`] method on [`crate::String`]. See its 10 | /// documentation for more. 11 | /// 12 | /// [`drain`]: crate::String::drain 13 | pub struct Drain<'a, LenT: LenType> { 14 | /// Will be used as &'a mut String in the destructor 15 | pub(super) string: *mut StringView, 16 | /// Stast of part to remove 17 | pub(super) start: LenT, 18 | /// End of part to remove 19 | pub(super) end: LenT, 20 | /// Current remaining range to remove 21 | pub(super) iter: Chars<'a>, 22 | } 23 | 24 | impl fmt::Debug for Drain<'_, LenT> { 25 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 26 | f.debug_tuple("Drain").field(&self.as_str()).finish() 27 | } 28 | } 29 | 30 | unsafe impl Sync for Drain<'_, LenT> {} 31 | unsafe impl Send for Drain<'_, LenT> {} 32 | 33 | impl Drop for Drain<'_, LenT> { 34 | fn drop(&mut self) { 35 | unsafe { 36 | // Use `Vec::drain`. “Reaffirm” the bounds checks to avoid 37 | // panic code being inserted again. 38 | let self_vec = (*self.string).as_mut_vec(); 39 | let start = self.start.into_usize(); 40 | let end = self.end.into_usize(); 41 | if start <= end && end <= self_vec.len() { 42 | self_vec.drain(start..end); 43 | } 44 | } 45 | } 46 | } 47 | 48 | impl Drain<'_, LenT> { 49 | /// Returns the remaining (sub)string of this iterator as a slice. 50 | /// 51 | /// # Examples 52 | /// 53 | /// ``` 54 | /// use heapless::String; 55 | /// 56 | /// let mut s = String::<8>::try_from("abc").unwrap(); 57 | /// let mut drain = s.drain(..); 58 | /// assert_eq!(drain.as_str(), "abc"); 59 | /// let _ = drain.next().unwrap(); 60 | /// assert_eq!(drain.as_str(), "bc"); 61 | /// ``` 62 | #[must_use] 63 | pub fn as_str(&self) -> &str { 64 | self.iter.as_str() 65 | } 66 | } 67 | 68 | impl AsRef for Drain<'_, LenT> { 69 | fn as_ref(&self) -> &str { 70 | self.as_str() 71 | } 72 | } 73 | 74 | impl AsRef<[u8]> for Drain<'_, LenT> { 75 | fn as_ref(&self) -> &[u8] { 76 | self.as_str().as_bytes() 77 | } 78 | } 79 | 80 | impl Iterator for Drain<'_, LenT> { 81 | type Item = char; 82 | 83 | #[inline] 84 | fn next(&mut self) -> Option { 85 | self.iter.next() 86 | } 87 | 88 | fn size_hint(&self) -> (usize, Option) { 89 | self.iter.size_hint() 90 | } 91 | 92 | #[inline] 93 | fn last(mut self) -> Option { 94 | self.next_back() 95 | } 96 | } 97 | 98 | impl DoubleEndedIterator for Drain<'_, LenT> { 99 | #[inline] 100 | fn next_back(&mut self) -> Option { 101 | self.iter.next_back() 102 | } 103 | } 104 | 105 | impl FusedIterator for Drain<'_, LenT> {} 106 | 107 | #[cfg(test)] 108 | mod tests { 109 | use crate::String; 110 | 111 | #[test] 112 | fn drain_front() { 113 | let mut s = String::<8>::try_from("abcd").unwrap(); 114 | let mut it = s.drain(..1); 115 | assert_eq!(it.next(), Some('a')); 116 | drop(it); 117 | assert_eq!(s, "bcd"); 118 | } 119 | 120 | #[test] 121 | fn drain_middle() { 122 | let mut s = String::<8>::try_from("abcd").unwrap(); 123 | let mut it = s.drain(1..3); 124 | assert_eq!(it.next(), Some('b')); 125 | assert_eq!(it.next(), Some('c')); 126 | drop(it); 127 | assert_eq!(s, "ad"); 128 | } 129 | 130 | #[test] 131 | fn drain_end() { 132 | let mut s = String::<8>::try_from("abcd").unwrap(); 133 | let mut it = s.drain(3..); 134 | assert_eq!(it.next(), Some('d')); 135 | drop(it); 136 | assert_eq!(s, "abc"); 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /src/test_helpers.rs: -------------------------------------------------------------------------------- 1 | macro_rules! droppable { 2 | () => { 3 | static COUNT: core::sync::atomic::AtomicI32 = core::sync::atomic::AtomicI32::new(0); 4 | 5 | #[derive(Eq, Ord, PartialEq, PartialOrd)] 6 | struct Droppable(i32); 7 | impl Droppable { 8 | fn new() -> Self { 9 | COUNT.fetch_add(1, core::sync::atomic::Ordering::Relaxed); 10 | Droppable(Self::count()) 11 | } 12 | 13 | fn count() -> i32 { 14 | COUNT.load(core::sync::atomic::Ordering::Relaxed) 15 | } 16 | } 17 | impl Drop for Droppable { 18 | fn drop(&mut self) { 19 | COUNT.fetch_sub(1, core::sync::atomic::Ordering::Relaxed); 20 | } 21 | } 22 | }; 23 | } 24 | -------------------------------------------------------------------------------- /src/ufmt.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | c_string::{self, CString}, 3 | len_type::LenType, 4 | string::{StringInner, StringStorage}, 5 | vec::{VecInner, VecStorage}, 6 | CapacityError, 7 | }; 8 | use ufmt::uDisplay; 9 | use ufmt_write::uWrite; 10 | 11 | impl uDisplay for StringInner { 12 | #[inline] 13 | fn fmt(&self, f: &mut ufmt::Formatter<'_, W>) -> Result<(), W::Error> 14 | where 15 | W: uWrite + ?Sized, 16 | { 17 | f.write_str(self.as_str()) 18 | } 19 | } 20 | 21 | impl uWrite for StringInner { 22 | type Error = CapacityError; 23 | 24 | #[inline] 25 | fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { 26 | self.push_str(s) 27 | } 28 | } 29 | 30 | impl + ?Sized> uWrite for VecInner { 31 | type Error = CapacityError; 32 | 33 | #[inline] 34 | fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { 35 | self.extend_from_slice(s.as_bytes()) 36 | } 37 | } 38 | 39 | impl uWrite for CString { 40 | type Error = c_string::ExtendError; 41 | 42 | #[inline] 43 | fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { 44 | self.extend_from_bytes(s.as_bytes()) 45 | } 46 | } 47 | 48 | #[cfg(test)] 49 | mod tests { 50 | use crate::{String, Vec}; 51 | 52 | use ufmt::{derive::uDebug, uwrite}; 53 | 54 | #[derive(uDebug)] 55 | struct Pair { 56 | x: u32, 57 | y: u32, 58 | } 59 | 60 | #[test] 61 | fn test_udisplay_string() { 62 | let str_a = String::<32>::try_from("world").unwrap(); 63 | let mut str_b = String::<32>::new(); 64 | uwrite!(str_b, "Hello {}!", str_a).unwrap(); 65 | 66 | assert_eq!(str_b, "Hello world!"); 67 | } 68 | 69 | #[test] 70 | fn test_uwrite_string() { 71 | let a = 123; 72 | let b = Pair { x: 0, y: 1234 }; 73 | 74 | let mut s = String::<32>::new(); 75 | uwrite!(s, "{} -> {:?}", a, b).unwrap(); 76 | 77 | assert_eq!(s, "123 -> Pair { x: 0, y: 1234 }"); 78 | } 79 | 80 | #[test] 81 | fn test_uwrite_string_err() { 82 | let p = Pair { x: 0, y: 1234 }; 83 | let mut s = String::<4>::new(); 84 | assert!(uwrite!(s, "{:?}", p).is_err()); 85 | } 86 | 87 | #[test] 88 | fn test_uwrite_vec() { 89 | let a = 123; 90 | let b = Pair { x: 0, y: 1234 }; 91 | 92 | let mut v = Vec::::new(); 93 | uwrite!(v, "{} -> {:?}", a, b).unwrap(); 94 | 95 | assert_eq!(v, b"123 -> Pair { x: 0, y: 1234 }"); 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/vec/drain.rs: -------------------------------------------------------------------------------- 1 | use core::{ 2 | fmt, 3 | iter::FusedIterator, 4 | mem::{self, size_of}, 5 | ptr::{self, NonNull}, 6 | slice, 7 | }; 8 | 9 | use crate::len_type::LenType; 10 | 11 | use super::VecView; 12 | 13 | /// A draining iterator for [`Vec`](super::Vec). 14 | /// 15 | /// This `struct` is created by [`Vec::drain`](super::Vec::drain). 16 | /// See its documentation for more. 17 | pub struct Drain<'a, T: 'a, LenT: LenType> { 18 | /// Index of tail to preserve 19 | pub(super) tail_start: LenT, 20 | /// Length of tail 21 | pub(super) tail_len: LenT, 22 | /// Current remaining range to remove 23 | pub(super) iter: slice::Iter<'a, T>, 24 | pub(super) vec: NonNull>, 25 | } 26 | 27 | impl fmt::Debug for Drain<'_, T, LenT> { 28 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 29 | f.debug_tuple("Drain").field(&self.iter.as_slice()).finish() 30 | } 31 | } 32 | 33 | impl Drain<'_, T, LenT> { 34 | /// Returns the remaining items of this iterator as a slice. 35 | /// 36 | /// # Examples 37 | /// 38 | /// ``` 39 | /// use heapless::{vec, Vec}; 40 | /// 41 | /// let mut vec = Vec::<_, 3>::from_array(['a', 'b', 'c']); 42 | /// let mut drain = vec.drain(..); 43 | /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']); 44 | /// let _ = drain.next().unwrap(); 45 | /// assert_eq!(drain.as_slice(), &['b', 'c']); 46 | /// ``` 47 | #[must_use] 48 | pub fn as_slice(&self) -> &[T] { 49 | self.iter.as_slice() 50 | } 51 | } 52 | 53 | impl AsRef<[T]> for Drain<'_, T, LenT> { 54 | fn as_ref(&self) -> &[T] { 55 | self.as_slice() 56 | } 57 | } 58 | 59 | unsafe impl Sync for Drain<'_, T, LenT> {} 60 | unsafe impl Send for Drain<'_, T, LenT> {} 61 | 62 | impl Iterator for Drain<'_, T, LenT> { 63 | type Item = T; 64 | 65 | #[inline] 66 | fn next(&mut self) -> Option { 67 | self.iter 68 | .next() 69 | .map(|elt| unsafe { ptr::read(core::ptr::from_ref(elt)) }) 70 | } 71 | 72 | fn size_hint(&self) -> (usize, Option) { 73 | self.iter.size_hint() 74 | } 75 | } 76 | 77 | impl DoubleEndedIterator for Drain<'_, T, LenT> { 78 | #[inline] 79 | fn next_back(&mut self) -> Option { 80 | self.iter 81 | .next_back() 82 | .map(|elt| unsafe { ptr::read(core::ptr::from_ref(elt)) }) 83 | } 84 | } 85 | 86 | impl Drop for Drain<'_, T, LenT> { 87 | fn drop(&mut self) { 88 | /// Moves back the un-`Drain`ed elements to restore the original `Vec`. 89 | struct DropGuard<'r, 'a, T, LenT: LenType>(&'r mut Drain<'a, T, LenT>); 90 | 91 | impl Drop for DropGuard<'_, '_, T, LenT> { 92 | fn drop(&mut self) { 93 | if self.0.tail_len > LenT::ZERO { 94 | unsafe { 95 | let source_vec = self.0.vec.as_mut(); 96 | // memmove back untouched tail, update to new length 97 | let start = source_vec.len(); 98 | let tail = self.0.tail_start.into_usize(); 99 | let tail_len = self.0.tail_len.into_usize(); 100 | if tail != start { 101 | let dst = source_vec.as_mut_ptr().add(start); 102 | let src = source_vec.as_ptr().add(tail); 103 | ptr::copy(src, dst, tail_len); 104 | } 105 | source_vec.set_len(start + tail_len); 106 | } 107 | } 108 | } 109 | } 110 | 111 | let iter = mem::take(&mut self.iter); 112 | let drop_len = iter.len(); 113 | 114 | let mut vec = self.vec; 115 | 116 | if size_of::() == 0 { 117 | // ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount. 118 | // this can be achieved by manipulating the `Vec` length instead of moving values out from `iter`. 119 | unsafe { 120 | let vec = vec.as_mut(); 121 | let old_len = vec.len(); 122 | let tail_len = self.tail_len.into_usize(); 123 | vec.set_len(old_len + drop_len + tail_len); 124 | vec.truncate(old_len + tail_len); 125 | } 126 | 127 | return; 128 | } 129 | 130 | // ensure elements are moved back into their appropriate places, even when drop_in_place panics 131 | let _guard = DropGuard(self); 132 | 133 | if drop_len == 0 { 134 | return; 135 | } 136 | 137 | // as_slice() must only be called when iter.len() is > 0 because 138 | // it also gets touched by vec::Splice which may turn it into a dangling pointer 139 | // which would make it and the vec pointer point to different allocations which would 140 | // lead to invalid pointer arithmetic below. 141 | let drop_ptr = iter.as_slice().as_ptr(); 142 | 143 | unsafe { 144 | // drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place 145 | // a pointer with mutable provenance is necessary. Therefore we must reconstruct 146 | // it from the original vec but also avoid creating a &mut to the front since that could 147 | // invalidate raw pointers to it which some unsafe code might rely on. 148 | let vec_ptr = vec.as_mut().as_mut_ptr(); 149 | // FIXME: Replace with `sub_ptr` once stable. 150 | let drop_offset = (drop_ptr as usize - vec_ptr as usize) / size_of::(); 151 | let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len); 152 | ptr::drop_in_place(to_drop); 153 | } 154 | } 155 | } 156 | 157 | impl ExactSizeIterator for Drain<'_, T, LenT> {} 158 | 159 | impl FusedIterator for Drain<'_, T, LenT> {} 160 | 161 | #[cfg(test)] 162 | mod tests { 163 | use super::super::Vec; 164 | 165 | #[test] 166 | fn drain_front() { 167 | let mut vec = Vec::<_, 8>::from_array([1, 2, 3, 4]); 168 | let mut it = vec.drain(..1); 169 | assert_eq!(it.next(), Some(1)); 170 | drop(it); 171 | assert_eq!(vec, &[2, 3, 4]); 172 | } 173 | 174 | #[test] 175 | fn drain_middle() { 176 | let mut vec = Vec::<_, 8>::from_array([1, 2, 3, 4]); 177 | let mut it = vec.drain(1..3); 178 | assert_eq!(it.next(), Some(2)); 179 | assert_eq!(it.next(), Some(3)); 180 | drop(it); 181 | assert_eq!(vec, &[1, 4]); 182 | } 183 | 184 | #[test] 185 | fn drain_end() { 186 | let mut vec = Vec::<_, 8>::from_array([1, 2, 3, 4]); 187 | let mut it = vec.drain(3..); 188 | assert_eq!(it.next(), Some(4)); 189 | drop(it); 190 | assert_eq!(vec, &[1, 2, 3]); 191 | } 192 | 193 | #[test] 194 | fn drain_drop_rest() { 195 | droppable!(); 196 | 197 | let mut vec = Vec::<_, 8>::from_array([ 198 | Droppable::new(), 199 | Droppable::new(), 200 | Droppable::new(), 201 | Droppable::new(), 202 | ]); 203 | assert_eq!(Droppable::count(), 4); 204 | 205 | let mut iter = vec.drain(2..); 206 | assert_eq!(iter.next().unwrap().0, 3); 207 | drop(iter); 208 | assert_eq!(Droppable::count(), 2); 209 | 210 | assert_eq!(vec.len(), 2); 211 | assert_eq!(vec.remove(0).0, 1); 212 | assert_eq!(Droppable::count(), 1); 213 | 214 | drop(vec); 215 | assert_eq!(Droppable::count(), 0); 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /suppressions.txt: -------------------------------------------------------------------------------- 1 | race:std::panic::catch_unwind 2 | race:std::thread::scope 3 | 4 | # std::thread::spawn false positive; seen on Ubuntu 20.04 but not on Arch Linux (2022-04-29) 5 | race:drop_in_place*JoinHandle 6 | race:alloc::sync::Arc<*>::drop_slow 7 | race:__call_tls_dtors 8 | 9 | # false positives in memcpy (?) 10 | race:*memcpy* 11 | -------------------------------------------------------------------------------- /tests/cpass.rs: -------------------------------------------------------------------------------- 1 | //! Collections of `Send`-able things are `Send` 2 | 3 | use heapless::{ 4 | history_buf::HistoryBufView, 5 | spsc::{Consumer, ConsumerView, Producer, ProducerView, Queue, QueueView}, 6 | HistoryBuf, Vec, VecView, 7 | }; 8 | 9 | #[test] 10 | fn send() { 11 | struct IsSend; 12 | 13 | unsafe impl Send for IsSend {} 14 | 15 | fn is_send() 16 | where 17 | T: Send + ?Sized, 18 | { 19 | } 20 | 21 | is_send::>(); 22 | is_send::>(); 23 | is_send::>(); 24 | is_send::>(); 25 | is_send::>(); 26 | is_send::>(); 27 | is_send::>(); 28 | is_send::>(); 29 | is_send::>(); 30 | is_send::>(); 31 | } 32 | -------------------------------------------------------------------------------- /tests/tsan.rs: -------------------------------------------------------------------------------- 1 | #![deny(rust_2018_compatibility)] 2 | #![deny(rust_2018_idioms)] 3 | 4 | use std::{ptr::addr_of_mut, thread}; 5 | 6 | use heapless::spsc; 7 | 8 | #[test] 9 | fn once() { 10 | static mut RB: spsc::Queue = spsc::Queue::new(); 11 | 12 | let rb = unsafe { &mut *addr_of_mut!(RB) }; 13 | 14 | rb.enqueue(0).unwrap(); 15 | 16 | let (mut p, mut c) = rb.split(); 17 | 18 | p.enqueue(1).unwrap(); 19 | 20 | thread::spawn(move || { 21 | p.enqueue(1).unwrap(); 22 | }); 23 | 24 | thread::spawn(move || { 25 | c.dequeue().unwrap(); 26 | }); 27 | } 28 | 29 | #[test] 30 | fn twice() { 31 | static mut RB: spsc::Queue = spsc::Queue::new(); 32 | 33 | let rb = unsafe { &mut *addr_of_mut!(RB) }; 34 | 35 | rb.enqueue(0).unwrap(); 36 | rb.enqueue(1).unwrap(); 37 | 38 | let (mut p, mut c) = rb.split(); 39 | 40 | thread::spawn(move || { 41 | p.enqueue(2).unwrap(); 42 | p.enqueue(3).unwrap(); 43 | }); 44 | 45 | thread::spawn(move || { 46 | c.dequeue().unwrap(); 47 | c.dequeue().unwrap(); 48 | }); 49 | } 50 | 51 | #[test] 52 | fn scoped() { 53 | let mut rb: spsc::Queue = spsc::Queue::new(); 54 | 55 | rb.enqueue(0).unwrap(); 56 | 57 | { 58 | let (mut p, mut c) = rb.split(); 59 | 60 | thread::scope(move |scope| { 61 | scope.spawn(move || { 62 | p.enqueue(1).unwrap(); 63 | }); 64 | 65 | scope.spawn(move || { 66 | c.dequeue().unwrap(); 67 | }); 68 | }); 69 | } 70 | 71 | rb.dequeue().unwrap(); 72 | } 73 | 74 | #[test] 75 | #[cfg_attr(miri, ignore)] // too slow 76 | fn contention() { 77 | const N: usize = 1024; 78 | 79 | let mut rb: spsc::Queue = spsc::Queue::new(); 80 | 81 | { 82 | let (mut p, mut c) = rb.split(); 83 | 84 | thread::scope(move |scope| { 85 | scope.spawn(move || { 86 | let mut sum: u32 = 0; 87 | 88 | for i in 0..(2 * N) { 89 | sum = sum.wrapping_add(i as u32); 90 | while p.enqueue(i as u8).is_err() {} 91 | } 92 | 93 | println!("producer: {}", sum); 94 | }); 95 | 96 | scope.spawn(move || { 97 | let mut sum: u32 = 0; 98 | 99 | for _ in 0..(2 * N) { 100 | loop { 101 | if let Some(v) = c.dequeue() { 102 | sum = sum.wrapping_add(v as u32); 103 | break; 104 | } 105 | } 106 | } 107 | 108 | println!("consumer: {}", sum); 109 | }); 110 | }); 111 | } 112 | 113 | assert!(rb.is_empty()); 114 | } 115 | 116 | #[test] 117 | #[cfg_attr(miri, ignore)] // too slow 118 | fn mpmc_contention() { 119 | use std::sync::mpsc; 120 | 121 | use heapless::mpmc::Queue; 122 | 123 | const N: u32 = 64; 124 | 125 | static Q: Queue = Queue::new(); 126 | 127 | let (s, r) = mpsc::channel(); 128 | thread::scope(|scope| { 129 | let s1 = s.clone(); 130 | scope.spawn(move || { 131 | let mut sum: u32 = 0; 132 | 133 | for i in 0..(16 * N) { 134 | sum = sum.wrapping_add(i); 135 | println!("enqueue {}", i); 136 | while Q.enqueue(i).is_err() {} 137 | } 138 | 139 | s1.send(sum).unwrap(); 140 | }); 141 | 142 | let s2 = s.clone(); 143 | scope.spawn(move || { 144 | let mut sum: u32 = 0; 145 | 146 | for _ in 0..(16 * N) { 147 | loop { 148 | if let Some(v) = Q.dequeue() { 149 | sum = sum.wrapping_add(v); 150 | println!("dequeue {}", v); 151 | break; 152 | } 153 | } 154 | } 155 | 156 | s2.send(sum).unwrap(); 157 | }); 158 | }); 159 | 160 | assert_eq!(r.recv().unwrap(), r.recv().unwrap()); 161 | } 162 | 163 | #[test] 164 | #[cfg_attr(miri, ignore)] // too slow 165 | fn unchecked() { 166 | const N: usize = 1024; 167 | 168 | let mut rb: spsc::Queue = spsc::Queue::new(); 169 | 170 | for _ in 0..N / 2 - 1 { 171 | rb.enqueue(1).unwrap(); 172 | } 173 | 174 | { 175 | let (mut p, mut c) = rb.split(); 176 | 177 | thread::scope(move |scope| { 178 | scope.spawn(move || { 179 | for _ in 0..N / 2 - 1 { 180 | p.enqueue(2).unwrap(); 181 | } 182 | }); 183 | 184 | scope.spawn(move || { 185 | let mut sum: usize = 0; 186 | 187 | for _ in 0..N / 2 - 1 { 188 | sum = sum.wrapping_add(usize::from(c.dequeue().unwrap())); 189 | } 190 | 191 | assert_eq!(sum, N / 2 - 1); 192 | }); 193 | }); 194 | } 195 | 196 | assert_eq!(rb.len(), N / 2 - 1); 197 | } 198 | 199 | #[test] 200 | fn len_properly_wraps() { 201 | const N: usize = 4; 202 | let mut rb: spsc::Queue = spsc::Queue::new(); 203 | 204 | rb.enqueue(1).unwrap(); 205 | assert_eq!(rb.len(), 1); 206 | rb.dequeue(); 207 | assert_eq!(rb.len(), 0); 208 | rb.enqueue(2).unwrap(); 209 | assert_eq!(rb.len(), 1); 210 | rb.enqueue(3).unwrap(); 211 | assert_eq!(rb.len(), 2); 212 | rb.enqueue(4).unwrap(); 213 | assert_eq!(rb.len(), 3); 214 | } 215 | 216 | #[test] 217 | fn iterator_properly_wraps() { 218 | const N: usize = 4; 219 | let mut rb: spsc::Queue = spsc::Queue::new(); 220 | 221 | rb.enqueue(1).unwrap(); 222 | rb.dequeue(); 223 | rb.enqueue(2).unwrap(); 224 | rb.enqueue(3).unwrap(); 225 | rb.enqueue(4).unwrap(); 226 | let expected = [2, 3, 4]; 227 | let mut actual = [0, 0, 0]; 228 | for (idx, el) in rb.iter().enumerate() { 229 | actual[idx] = *el; 230 | } 231 | assert_eq!(expected, actual); 232 | } 233 | --------------------------------------------------------------------------------

241 | where 242 | P: ArcPool, 243 | { 244 | fn clone(&self) -> Self { 245 | let old_size = self.inner().strong.fetch_add(1, Ordering::Relaxed); 246 | 247 | if old_size > MAX_REFCOUNT { 248 | // XXX original code calls `intrinsics::abort` which is unstable API 249 | panic!(); 250 | } 251 | 252 | Self::from_inner(self.node_ptr) 253 | } 254 | } 255 | 256 | impl fmt::Debug for Arc 257 | where 258 | A: ArcPool, 259 | A::Data: fmt::Debug, 260 | { 261 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 262 | A::Data::fmt(self, f) 263 | } 264 | } 265 | 266 | impl