├── waf ├── test ├── wscript_build ├── src │ ├── test_no_locking_policy.cpp │ ├── test_unique_pool.cpp │ └── test_resource_pool.cpp └── recycle_tests.cpp ├── wscript ├── resolve.json ├── .clang-format ├── lock_version_resolve.json ├── .gitignore ├── CMakeLists.txt ├── LICENSE.rst ├── NEWS.rst ├── src └── recycle │ ├── no_locking_policy.hpp │ ├── unique_pool.hpp │ └── shared_pool.hpp ├── .github └── workflows │ └── cpp-internal.yml └── README.rst /waf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/steinwurf/recycle/HEAD/waf -------------------------------------------------------------------------------- /test/wscript_build: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | 3 | bld.program( 4 | features="cxx test", 5 | source=["recycle_tests.cpp"] + bld.path.ant_glob("src/*.cpp"), 6 | target="recycle_tests", 7 | use=["recycle_includes", "gtest"], 8 | ) 9 | -------------------------------------------------------------------------------- /test/src/test_no_locking_policy.cpp: -------------------------------------------------------------------------------- 1 | // Copyright Steinwurf ApS 2014. 2 | // All Rights Reserved 3 | // 4 | // Distributed under the "BSD License". See the accompanying LICENSE.rst file. 5 | 6 | #include 7 | 8 | #include 9 | 10 | TEST(test_no_locking_policy, empty) 11 | { 12 | recycle::no_locking_policy policy; 13 | (void)policy; 14 | } 15 | -------------------------------------------------------------------------------- /wscript: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # encoding: utf-8 3 | 4 | APPNAME = "recycle" 5 | VERSION = "8.0.0" 6 | 7 | def options(ctx): 8 | ctx.load("cmake") 9 | 10 | def configure(ctx): 11 | 12 | ctx.load("cmake") 13 | 14 | if ctx.is_toplevel(): 15 | ctx.cmake_configure() 16 | 17 | 18 | def build(ctx): 19 | 20 | ctx.load("cmake") 21 | 22 | if ctx.is_toplevel(): 23 | ctx.cmake_build() 24 | 25 | -------------------------------------------------------------------------------- /test/recycle_tests.cpp: -------------------------------------------------------------------------------- 1 | // Copyright Steinwurf ApS 2014. 2 | // All Rights Reserved 3 | // 4 | // Distributed under the "BSD License". See the accompanying LICENSE.rst file. 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | GTEST_API_ int main(int argc, char** argv) 12 | { 13 | srand(static_cast(time(0))); 14 | 15 | testing::InitGoogleTest(&argc, argv); 16 | return RUN_ALL_TESTS(); 17 | } -------------------------------------------------------------------------------- /resolve.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "gtest", 4 | "internal": true, 5 | "resolver": "git", 6 | "method": "semver", 7 | "major": 6, 8 | "sources": [ 9 | "github.com/steinwurf/gtest.git" 10 | ] 11 | }, 12 | { 13 | "name": "toolchains", 14 | "internal": true, 15 | "resolver": "git", 16 | "method": "semver", 17 | "major": 1, 18 | "sources": [ 19 | "github.com/steinwurf/cmake-toolchains.git" 20 | ] 21 | } 22 | ] -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | AlignAfterOpenBracket: Align 3 | AlignEscapedNewlinesLeft: 'true' 4 | AlignOperands: 'true' 5 | AlwaysBreakTemplateDeclarations: 'true' 6 | AccessModifierOffset: -4 7 | BreakBeforeBraces: Allman 8 | Standard: Cpp11 9 | IndentWidth: 4 10 | IndentCaseLabels: 'false' 11 | PointerAlignment: Left 12 | TabWidth: 4 13 | UseTab: Never 14 | AllowShortFunctionsOnASingleLine: None 15 | AllowAllParametersOfDeclarationOnNextLine: 'true' 16 | FixNamespaceComments: 'false' 17 | BreakConstructorInitializers: AfterColon 18 | ContinuationIndentWidth: 4 19 | Cpp11BracedListStyle: 'true' 20 | ... 21 | -------------------------------------------------------------------------------- /lock_version_resolve.json: -------------------------------------------------------------------------------- 1 | { 2 | "gtest": { 3 | "commit_id": "8e1f80ae8618fdba4389f1b56c811ea7241569d1", 4 | "resolver_info": "6.0.2", 5 | "sha1": "44c35f8798a738692c902fe2b67dc0cb76a1df53" 6 | }, 7 | "gtest-source": { 8 | "commit_id": "52eb8108c5bdec04579160ae17225d66034bd723", 9 | "resolver_info": "v1.17.0", 10 | "sha1": "8600eafa3daedc694d3115677da0dda3af6a8633" 11 | }, 12 | "toolchains": { 13 | "commit_id": "f7b9747ab22d7dbbeb23c9f863e735bac883566e", 14 | "resolver_info": "1.0.3", 15 | "sha1": "443cb23b0570dffd39283d8679ec393ed809302f" 16 | } 17 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files 2 | *.slo 3 | *.lo 4 | *.o 5 | 6 | # Compiled Dynamic libraries 7 | *.so 8 | 9 | # Compiled Static libraries 10 | *.lai 11 | *.la 12 | *.a 13 | 14 | # Python 15 | *.pyc 16 | 17 | # Compiled Doxygen documentation 18 | /doxygen/html 19 | 20 | # Waf files 21 | waf-* 22 | waf3-* 23 | .waf-* 24 | .waf3-* 25 | .lock-* 26 | build 27 | build_current 28 | resolve_symlinks 29 | resolved_dependencies 30 | 31 | # Gnu Global tag files 32 | GPATH 33 | GRTAGS 34 | GSYMS 35 | GTAGS 36 | 37 | # Emacs temp / auto save 38 | \#*# 39 | *.#* 40 | *~ 41 | 42 | #Eclipse ignore 43 | .cproject 44 | *.project 45 | .metadata 46 | local.properties 47 | .classpath 48 | .settings/ 49 | 50 | # Visual Studio ignore 51 | *.bat 52 | *.sln 53 | *.suo 54 | *.user 55 | *.ncb 56 | *.sdf 57 | *.opensdf 58 | *.log 59 | *.vcxproj* 60 | VSProjects 61 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | project(recycle) 3 | 4 | 5 | # Include common CMake settings 6 | include("${STEINWURF_RESOLVE}/toolchains/common_settings.cmake") 7 | 8 | # Define library 9 | add_library(recycle INTERFACE) 10 | target_compile_features(recycle INTERFACE cxx_std_14) 11 | target_include_directories(recycle INTERFACE src/) 12 | add_library(steinwurf::recycle ALIAS recycle) 13 | 14 | # Install headers 15 | install( 16 | DIRECTORY ./src/recycle 17 | DESTINATION ${CMAKE_INSTALL_PREFIX}/include 18 | FILES_MATCHING 19 | PATTERN *.hpp) 20 | 21 | # Is top level project? 22 | if(${CMAKE_PROJECT_NAME} STREQUAL ${PROJECT_NAME}) 23 | 24 | # Setup testing 25 | enable_testing() 26 | 27 | # Google Test dependency 28 | add_subdirectory("${STEINWURF_RESOLVE}/gtest" EXCLUDE_FROM_ALL) 29 | 30 | # Build test executable 31 | file(GLOB_RECURSE recycle_test_sources ./test/*.cpp) 32 | 33 | 34 | add_executable(recycle_test ${recycle_test_sources}) 35 | target_link_libraries(recycle_test recycle) 36 | target_link_libraries(recycle_test steinwurf::gtest) 37 | 38 | add_test(NAME recycle_test COMMAND recycle_test) 39 | 40 | endif() 41 | -------------------------------------------------------------------------------- /LICENSE.rst: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014, Steinwurf ApS 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without modification, 6 | are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright notice, 9 | this list of conditions and the following disclaimer. 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | * Neither the name of recycle nor the names of its contributors 14 | may be used to endorse or promote products derived from this software 15 | without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 24 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 25 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 26 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 27 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /NEWS.rst: -------------------------------------------------------------------------------- 1 | News for recycle 2 | ================ 3 | 4 | This file lists the major changes between versions. For a more detailed list of 5 | every change, see the Git log. 6 | 7 | Latest 8 | ------ 9 | * tbd 10 | 11 | 8.0.0 12 | ----- 13 | * Major: Only builds with CMake 14 | 15 | 7.0.0 16 | ----- 17 | * Major: Use waf-tools 5. 18 | * Minor: Updated waf. 19 | 20 | 6.0.0 21 | ----- 22 | * Major: Change cmake build to be object library based. 23 | 24 | 5.1.0 25 | ----- 26 | * Minor: Added install step to CMake. 27 | 28 | 5.0.0 29 | ----- 30 | * Major: Use std::size_t for size and positions. 31 | 32 | 4.1.2 33 | ----- 34 | * Patch: Fix ``target_compile_features``. 35 | 36 | 4.1.1 37 | ----- 38 | * Patch: Added ``target_compile_features`` to CMake script so that c++14 is 39 | used. 40 | 41 | 4.1.0 42 | ----- 43 | * Minor: Added CMake build file. 44 | 45 | 4.0.0 46 | ----- 47 | * Major: Rename resource_pool to shared_pool 48 | * Minor: Adding unique_pool 49 | 50 | 3.0.0 51 | ----- 52 | * Major: Upgrade to waf-tools 4 53 | * Minor: Upgrade to gtest 4 54 | 55 | 2.0.0 56 | ----- 57 | * Major: Upgrade to waf-tools 3 58 | * Minor: Upgrade to gtest 3 59 | 60 | 1.2.0 61 | ----- 62 | * Patch: Fix a memory leak caused by a circular dependency when using objects 63 | inheriting from ``std::enable_shared_from_this``. 64 | * Minor: Added buildbot.py for coverage reports. 65 | * Patch: Fixed comparison warnings in unit tests. 66 | 67 | 1.1.1 68 | ----- 69 | * Patch: Fix version define. 70 | 71 | 1.1.0 72 | ----- 73 | * Minor: Added version define. 74 | 75 | 1.0.1 76 | ----- 77 | * Patch: Added test for no_locking_policy.hpp 78 | * Patch: Fixed includes 79 | 80 | 1.0.0 81 | ----- 82 | * Major: Initial release of the project. 83 | -------------------------------------------------------------------------------- /src/recycle/no_locking_policy.hpp: -------------------------------------------------------------------------------- 1 | // Copyright Steinwurf ApS 2014. 2 | // All Rights Reserved 3 | // 4 | // Distributed under the "BSD License". See the accompanying LICENSE.rst file. 5 | 6 | #pragma once 7 | 8 | namespace recycle 9 | { 10 | /// Defines the default non thread-safe locking policy for the 11 | /// recycle::resource_pool. 12 | /// 13 | /// Custom locking policies may be defined to create a thread-safe 14 | /// resource pool for different threading libraries. 15 | /// 16 | /// A valid locking policy defines two types, namely the mutex and 17 | /// the lock. 18 | /// 19 | /// The following small example illustrates the expected behavior: 20 | /// 21 | /// using mutex = locking_policy::mutex_type; 22 | /// using lock = locking_policy::lock_type; 23 | /// 24 | /// { 25 | /// mutex m; // creates mutex m in unlocked state 26 | /// lock l(m); // associates and locks the mutex m with the lock l. 27 | /// 28 | /// ... // when l's destructor runs it unlocks m 29 | /// } 30 | /// 31 | /// If you wanted to use std::thread then a suitable locking 32 | /// policy could be: 33 | /// 34 | /// struct lock_policy 35 | /// { 36 | /// using mutex_type = std::mutex; 37 | /// using lock_type = std::lock_guard; 38 | /// }; 39 | /// 40 | struct no_locking_policy 41 | { 42 | /// Define dummy mutex type 43 | struct no_mutex 44 | { 45 | }; 46 | 47 | /// Define dummy lock type 48 | struct no_lock 49 | { 50 | no_lock(no_mutex&) 51 | { 52 | } 53 | }; 54 | 55 | /// The locking policy mutex type 56 | using mutex_type = no_mutex; 57 | 58 | /// The locking policy lock type 59 | using lock_type = no_lock; 60 | }; 61 | } 62 | -------------------------------------------------------------------------------- /.github/workflows/cpp-internal.yml: -------------------------------------------------------------------------------- 1 | name: C++ Internal 2 | 3 | on: 4 | schedule: 5 | - cron: 0 1 * * * # Nightly at 01:00 UTC 6 | push: 7 | branches: 8 | - master 9 | pull_request: 10 | 11 | jobs: 12 | linux_cmake: 13 | timeout-minutes: 45 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | config: 18 | - runner: ubuntu-current 19 | name: GCC Latest 20 | toolchain: "./resolve_symlinks/toolchains/gcc-toolchain.cmake" 21 | - runner: ubuntu-current 22 | name: Clang Latest 23 | toolchain: "./resolve_symlinks/toolchains/clang-toolchain.cmake" 24 | - runner: ubuntu-current 25 | name: Clang ASAN Latest 26 | toolchain: "./resolve_symlinks/toolchains/clang-asan-toolchain.cmake" 27 | - runner: ubuntu-current 28 | name: Clang TSAN Latest 29 | toolchain: "./resolve_symlinks/toolchains/clang-tsan-toolchain.cmake" 30 | - runner: ubuntu-current 31 | name: Clang UBSAN Latest 32 | toolchain: "./resolve_symlinks/toolchains/clang-ubsan-toolchain.cmake" 33 | - runner: ubuntu-old 34 | name: GCC Oldest 35 | toolchain: "./resolve_symlinks/toolchains/gcc-toolchain.cmake" 36 | - runner: ubuntu-old 37 | name: Clang Oldest 38 | toolchain: "./resolve_symlinks/toolchains/clang-toolchain.cmake" 39 | runs-on: 40 | - self-hosted 41 | - vm 42 | - ${{ matrix.config.runner }} 43 | name: ${{ matrix.config.name }} 44 | steps: 45 | # This is sometimes needed when running docker builds since these 46 | # sometimes produce files with root ownership 47 | - name: Ensure correct owner of repository 48 | run: sudo chown -R actions-runner:actions-runner . 49 | - name: Checkout source code 50 | uses: actions/checkout@v3 51 | - name: Waf Clean 52 | run: python3 waf clean --no_resolve 53 | - name: Waf Configure 54 | run: python3 waf configure --git_protocol=git@ --cmake_toolchain=${{ matrix.config.toolchain }} --cmake_verbose 55 | - name: Waf Build 56 | run: python3 waf build --run_tests 57 | 58 | valgrind: 59 | timeout-minutes: 45 60 | runs-on: 61 | - self-hosted 62 | - vm 63 | - ubuntu-current 64 | name: Valgrind 65 | steps: 66 | - name: Ensure correct owner of repository 67 | run: sudo chown -R actions-runner:actions-runner . 68 | 69 | - name: Checkout source code 70 | uses: actions/checkout@v3 71 | 72 | - name: Waf Clean 73 | run: python3 waf clean --no_resolve 74 | 75 | - name: Waf Configure 76 | run: python3 waf configure --git_protocol=git@ --cmake_toolchain=./resolve_symlinks/toolchains/gcc-toolchain.cmake --cmake_verbose 77 | 78 | - name: Waf Build 79 | run: python3 waf build --run_tests --ctest_valgrind 80 | 81 | zig_toolchain_build: 82 | name: Zig Toolchain Build (Docker) 83 | runs-on: [self-hosted, vm, ubuntu-current] 84 | container: 85 | image: ghcr.io/steinwurf/build-images/zig-cpp 86 | options: --user 0:0 87 | volumes: 88 | - /root/.ssh:/root/.ssh 89 | steps: 90 | - name: Checkout source code 91 | uses: actions/checkout@v4 92 | - name: Install dependencies 93 | run: | 94 | apk add --no-cache \ 95 | python3 \ 96 | py3-pip \ 97 | git \ 98 | cmake \ 99 | build-base 100 | - name: Waf Clean 101 | run: python3 waf clean --no_resolve 102 | - name: Waf Configure with Zig Toolchain 103 | run: python3 waf configure --git_protocol=git@ --cmake_toolchain=./resolve_symlinks/toolchains/zig-toolchain-x86_64-linux-musl.cmake --cmake_verbose 104 | - name: Waf Build with Zig Toolchain 105 | run: python3 waf build --run_tests 106 | 107 | macos_cmake: 108 | timeout-minutes: 45 109 | strategy: 110 | fail-fast: false 111 | matrix: 112 | config: 113 | - arch: ARM64 114 | os: big_sur 115 | name: Apple Big Sur (ARM) 116 | toolchain: "./resolve_symlinks/toolchains/clang-toolchain.cmake" 117 | runs-on: 118 | - self-hosted 119 | - macOS 120 | - ${{ matrix.config.os }} 121 | - ${{ matrix.config.arch }} 122 | - cmake 123 | - builder 124 | name: ${{ matrix.config.name }} 125 | steps: 126 | - name: Checkout 127 | uses: actions/checkout@v3 128 | - name: Waf Clean 129 | run: python3 waf clean --no_resolve 130 | - name: Waf Configure 131 | run: python3 waf configure --git_protocol=git@ --cmake_toolchain=${{ matrix.config.toolchain }} --cmake_verbose 132 | - name: Waf Build 133 | run: python3 waf build --run_tests 134 | 135 | windows_cmake: 136 | timeout-minutes: 45 137 | strategy: 138 | fail-fast: false 139 | runs-on: [self-hosted, windows, vm, windows-current] 140 | name: Windows 141 | steps: 142 | - name: Checkout 143 | uses: actions/checkout@v3 144 | - name: Waf Clean 145 | run: python waf clean --no_resolve 146 | - name: Waf Configure 147 | run: python waf configure --git_protocol=git@ --cmake_verbose 148 | - name: Waf Build 149 | run: python waf build --run_tests 150 | 151 | clang-format: 152 | timeout-minutes: 45 153 | name: Clang-Format 154 | runs-on: [self-hosted, vm, ubuntu-current] 155 | steps: 156 | - name: Ensure correct owner of repository 157 | run: sudo chown -R actions-runner:actions-runner . 158 | - name: Clang format version 159 | run: clang-format --version 160 | - name: Checkout source code 161 | uses: actions/checkout@v3 162 | - name: Run Clang-format 163 | run: find ./ -iname *.hpp -o -iname *.cpp -o -iname *.c -o -iname *.h | xargs clang-format --dry-run --Werror 164 | 165 | workflow-keepalive: 166 | if: github.event_name == 'schedule' 167 | runs-on: [self-hosted, vm, ubuntu-current] 168 | permissions: 169 | actions: write 170 | steps: 171 | - name: Install GitHub CLI 172 | run: | 173 | sudo apt update 174 | sudo apt install -y gh 175 | - uses: liskin/gh-workflow-keepalive@v1 176 | 177 | concurrency: 178 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }} 179 | cancel-in-progress: true -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | recycle 2 | ======= 3 | 4 | |Linux make-specs| |Windows make-specs| |MacOS make-specs| |Linux CMake| |Windows CMake| |MacOS CMake| |Raspberry Pi| |Valgrind| |No Assertions| |Clang Format| |Cppcheck| 5 | 6 | .. |Linux make-specs| image:: https://github.com/steinwurf/recycle/actions/workflows/linux_mkspecs.yml/badge.svg 7 | :target: https://github.com/steinwurf/recycle/actions/workflows/linux_mkspecs.yml 8 | 9 | .. |Windows make-specs| image:: https://github.com/steinwurf/recycle/actions/workflows/windows_mkspecs.yml/badge.svg 10 | :target: https://github.com/steinwurf/recycle/actions/workflows/windows_mkspecs.yml 11 | 12 | .. |MacOS make-specs| image:: https://github.com/steinwurf/recycle/actions/workflows/macos_mkspecs.yml/badge.svg 13 | :target: https://github.com/steinwurf/recycle/actions/workflows/macos_mkspecs.yml 14 | 15 | .. |Linux CMake| image:: https://github.com/steinwurf/recycle/actions/workflows/linux_cmake.yml/badge.svg 16 | :target: https://github.com/steinwurf/recycle/actions/workflows/linux_cmake.yml 17 | 18 | .. |Windows CMake| image:: https://github.com/steinwurf/recycle/actions/workflows/windows_cmake.yml/badge.svg 19 | :target: https://github.com/steinwurf/recycle/actions/workflows/windows_cmake.yml 20 | 21 | .. |MacOS CMake| image:: https://github.com/steinwurf/recycle/actions/workflows/macos_cmake.yml/badge.svg 22 | :target: https://github.com/steinwurf/recycle/actions/workflows/macos_cmake.yml 23 | 24 | .. |Raspberry Pi| image:: https://github.com/steinwurf/recycle/actions/workflows/raspberry_pi.yml/badge.svg 25 | :target: https://github.com/steinwurf/recycle/actions/workflows/raspberry_pi.yml 26 | 27 | .. |Clang Format| image:: https://github.com/steinwurf/recycle/actions/workflows/clang-format.yml/badge.svg 28 | :target: https://github.com/steinwurf/recycle/actions/workflows/clang-format.yml 29 | 30 | .. |No Assertions| image:: https://github.com/steinwurf/recycle/actions/workflows/nodebug.yml/badge.svg 31 | :target: https://github.com/steinwurf/recycle/actions/workflows/nodebug.yml 32 | 33 | .. |Valgrind| image:: https://github.com/steinwurf/recycle/actions/workflows/valgrind.yml/badge.svg 34 | :target: https://github.com/steinwurf/recycle/actions/workflows/valgrind.yml 35 | 36 | .. |Cppcheck| image:: https://github.com/steinwurf/recycle/actions/workflows/cppcheck.yml/badge.svg 37 | :target: https://github.com/steinwurf/recycle/actions/workflows/cppcheck.yml 38 | 39 | recycle is an implementation of a simple C++ resource pool. 40 | 41 | .. contents:: Table of Contents: 42 | :local: 43 | 44 | Usage 45 | ----- 46 | 47 | The ``recycle`` project contains two types of resource pools: 48 | 49 | 1. The ``recycle::shared_pool`` is useful when managing expensive to 50 | construct objects. The life-time of the managed objects is controlled 51 | by using ``std::shared_ptr``. A custom deleter is used to reclaim 52 | objects in the pool when the last remaining ``std::shared_ptr`` owning 53 | the object is destroyed. 54 | 55 | 2. The ``recycle::unique_pool`` works the same way as the 56 | ``recycle::shared_pool`` but instead uses ``std::unique_ptr`` for 57 | managing the resources. Still we need a custom deleter - with 58 | ``std::unique_ptr`` this has to be part of the type. So the 59 | ``std::unique_ptr`` returned by ``recycle::unique_pool`` is 60 | of type ``recycle::unique_pool::pool_ptr``. 61 | 62 | Besides the fact that ``recycle::shared_pool`` manages ``std::shared_ptr`` and 63 | ``recycle::unique_pool`` manages ``std::unique_ptr`` the API should be the 64 | same. So in the following you can replace ``shared`` with ``unique`` to 65 | swap the behavior. 66 | 67 | Header-only 68 | ........... 69 | 70 | The library itself is header-only so essentially to use it you just 71 | have to clone the repository and setup the right include paths in the 72 | project where you would like to use it. 73 | 74 | The library uses C++14 features, so you need a relatively recent compiler 75 | to use it. 76 | 77 | Allocating Objects 78 | ------------------ 79 | 80 | There are two ways we can control how objects are allocated: 81 | 82 | Using the Default Allocator 83 | ........................... 84 | 85 | Example: 86 | 87 | .. code-block:: cpp 88 | 89 | #include 90 | #include 91 | 92 | struct heavy_object 93 | { 94 | // ... some expensive resource 95 | }; 96 | 97 | 98 | recycle::shared_pool pool; 99 | 100 | // Initially the pool is empty 101 | assert(pool.unused_resources() == 0U); 102 | 103 | { 104 | auto o1 = pool.allocate(); 105 | } 106 | 107 | // Heavy object is back in the pool 108 | assert(pool.unused_resources() == 1U); 109 | 110 | In this case we use the default constructor of the 111 | ``recycle::shared_pool`` this will only work if the object in this 112 | case ``heavy_object`` is default constructible (i.e. has a constructor 113 | which takes no arguments). Internally the resource pool uses 114 | ``std::make_shared`` to allocate the object. 115 | 116 | Using a Custom Allocator 117 | ........................ 118 | 119 | Example: 120 | 121 | .. code-block:: cpp 122 | 123 | #include 124 | #include 125 | 126 | struct heavy_object 127 | { 128 | heavy_object(std::size_t size); 129 | 130 | // ... some expensive resource 131 | }; 132 | 133 | auto make = []()->std::shared_ptr 134 | { 135 | return std::make_shared(300000U); 136 | }; 137 | 138 | recycle::shared_pool pool(make); 139 | 140 | auto o1 = pool.allocate(); 141 | 142 | In this case we provide a custom allocator function which takes no 143 | arguments and returns a ``std::shared_ptr``. 144 | 145 | Recycling Objects 146 | ----------------- 147 | 148 | When recycling objects it is sometimes necessary to ensure that 149 | certain clean-up operations are performed before objects get stored in 150 | the pool. This can be open file handles etc. which should be 151 | closed. We cannot rely on the destructor for this when using a resource pool. 152 | 153 | To support this the ``recycle::shared_pool`` support a custom 154 | recycle function which will be called right before an object is about 155 | to go back into the pool. 156 | 157 | Example: 158 | 159 | .. code-block:: cpp 160 | 161 | #include 162 | #include 163 | 164 | struct heavy_object 165 | { 166 | heavy_object(std::size_t size); 167 | 168 | // ... some expensive resource 169 | }; 170 | 171 | auto make = []()->std::shared_ptr 172 | { 173 | return std::make_shared(300000U); 174 | }; 175 | 176 | auto recycle = [](std::shared_ptr o) 177 | { 178 | o->close_sockets(); 179 | }; 180 | 181 | 182 | recycle::shared_pool pool(make, recycle); 183 | 184 | { 185 | auto o1 = pool.allocate(); 186 | 187 | // As we exit the scope here recycle will be called 188 | // with o1 as argument. 189 | } 190 | 191 | Thread Safety 192 | ------------- 193 | 194 | Since the free lunch is over we want to make sure that the resource 195 | pool is thread safe. 196 | 197 | This can be achieved by specifying a lock policy (we were inspired by the 198 | flyweight library in Boost). 199 | 200 | Example: 201 | 202 | .. code-block:: cpp 203 | 204 | #include 205 | #include 206 | #include 207 | 208 | struct heavy_object 209 | { 210 | // ... some expensive resource 211 | }; 212 | 213 | struct lock_policy 214 | { 215 | using mutex_type = std::mutex; 216 | using lock_type = std::lock_guard; 217 | }; 218 | 219 | recycle::shared_pool pool; 220 | 221 | // Lambda the threads will execute captures a reference to the pool 222 | // so they will all operate on the same pool concurrently 223 | auto run = [&pool]() 224 | { 225 | auto a1 = pool.allocate(); 226 | }; 227 | 228 | const std::size_t number_threads = 8; 229 | std::thread t[number_threads]; 230 | 231 | //Launch a group of threads 232 | for (std::size_t i = 0; i < number_threads; ++i) 233 | { 234 | t[i] = std::thread(run); 235 | } 236 | 237 | //Join the threads with the main thread 238 | for (std::size_t i = 0; i < number_threads; ++i) 239 | { 240 | t[i].join(); 241 | } 242 | 243 | Use as Dependency in CMake 244 | -------------------------- 245 | 246 | To depend on this project when using the CMake build system, add the following 247 | in your CMake build script: 248 | 249 | .. code-block:: cmake 250 | 251 | add_subdirectory("/path/to/recycle" recycle) 252 | target_link_libraries( steinwurf::recycle) 253 | 254 | Where ```` is replaced by your target. 255 | -------------------------------------------------------------------------------- /src/recycle/unique_pool.hpp: -------------------------------------------------------------------------------- 1 | // Copyright Steinwurf ApS 2014. 2 | // All Rights Reserved 3 | // 4 | // Distributed under the "BSD License". See the accompanying LICENSE.rst file. 5 | 6 | #pragma once 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include "no_locking_policy.hpp" 18 | 19 | namespace recycle 20 | { 21 | /// @brief The unique_pool stores value objects and recycles them. 22 | /// 23 | /// The unique_pool is a useful construct if you have some 24 | /// expensive to create objects where you would like to create a 25 | /// factory capable of recycling the objects. 26 | /// 27 | /// Note, when using the unique pool in a multithreaded environment 28 | /// you should use a locking policy and make sure that the objects 29 | /// you allocate are thread safe. The default locking policy 30 | /// is no_locking_policy which means that the pool is not thread 31 | /// safe. 32 | template 33 | class unique_pool 34 | { 35 | private: 36 | /// Forward declare 37 | struct deleter; 38 | struct impl; 39 | 40 | public: 41 | /// The type managed 42 | using value_type = Value; 43 | 44 | /// The pointer to the resource 45 | using pool_ptr = std::unique_ptr; 46 | 47 | /// The owning pointer to the resource 48 | using value_ptr = std::unique_ptr; 49 | 50 | /// The allocate function type 51 | /// Should take no arguments and return an std::unique_ptr to the Value 52 | using allocate_function = std::function; 53 | 54 | /// The recycle function type 55 | /// If specified the recycle function will be called every time a 56 | /// resource gets recycled into the pool. This allows temporary 57 | /// resources, e.g., file handles to be closed when an object is longer 58 | /// used. 59 | using recycle_function = std::function; 60 | 61 | /// The locking policy mutex type 62 | using mutex_type = typename LockingPolicy::mutex_type; 63 | 64 | /// The locking policy lock type 65 | using lock_type = typename LockingPolicy::lock_type; 66 | 67 | public: 68 | /// Default constructor, we only want this to be available 69 | /// i.e. the unique_pool to be default constructible if the 70 | /// value_type we build is default constructible. 71 | /// 72 | /// This means that we only want 73 | /// std::is_default_constructible>::value to 74 | /// be true if the type T is default constructible. 75 | /// 76 | /// Unfortunately this does not work if we don't do the 77 | /// template magic seen below. What we do there is to use 78 | /// SFINAE to disable the default constructor for non default 79 | /// constructible types. 80 | /// 81 | /// It looks quite ugly and if somebody can fix in a simpler way 82 | /// please do :) 83 | template ::value, 85 | uint8_t>::type = 0> 86 | unique_pool() : 87 | m_pool(std::make_shared( 88 | allocate_function(std::make_unique))) 89 | { 90 | } 91 | 92 | /// Create a unique_pool using a specific allocate function. 93 | /// @param allocate Allocation function 94 | unique_pool(allocate_function allocate) : 95 | m_pool(std::make_shared(std::move(allocate))) 96 | { 97 | } 98 | 99 | /// Create a unique_pool using a specific allocate function and 100 | /// recycle function. 101 | /// @param allocate Allocation function 102 | /// @param recycle Recycle function. If used in a threaded environment 103 | /// the recycle function should be thread safe. 104 | unique_pool(allocate_function allocate, recycle_function recycle) : 105 | m_pool(std::make_shared(std::move(allocate), std::move(recycle))) 106 | { 107 | } 108 | 109 | /// Copy constructor 110 | unique_pool(const unique_pool& other) : 111 | m_pool(std::make_shared(*other.m_pool)) 112 | { 113 | } 114 | 115 | /// Move constructor 116 | unique_pool(unique_pool&& other) : m_pool(std::move(other.m_pool)) 117 | { 118 | assert(m_pool); 119 | } 120 | 121 | /// Copy assignment 122 | unique_pool& operator=(const unique_pool& other) 123 | { 124 | unique_pool tmp(other); 125 | std::swap(*this, tmp); 126 | return *this; 127 | } 128 | 129 | /// Move assignment 130 | unique_pool& operator=(unique_pool&& other) 131 | { 132 | m_pool = std::move(other.m_pool); 133 | return *this; 134 | } 135 | 136 | /// @returns the number of unused resources 137 | std::size_t unused_resources() const 138 | { 139 | assert(m_pool); 140 | return m_pool->unused_resources(); 141 | } 142 | 143 | /// Frees all unused resources 144 | void free_unused() 145 | { 146 | assert(m_pool); 147 | m_pool->free_unused(); 148 | } 149 | 150 | /// @return A resource from the pool. 151 | pool_ptr allocate() 152 | { 153 | assert(m_pool); 154 | return m_pool->allocate(); 155 | } 156 | 157 | private: 158 | /// The actual pool implementation. We use the 159 | /// enable_shared_from_this helper to make sure we can pass a 160 | /// "back-pointer" to the pooled objects. The idea behind this 161 | /// is that we need objects to be able to add themselves back 162 | /// into the pool once they go out of scope. 163 | struct impl : public std::enable_shared_from_this 164 | { 165 | /// @copydoc unique_pool::unique_pool(allocate_function) 166 | impl(allocate_function allocate) : m_allocate(std::move(allocate)) 167 | { 168 | assert(m_allocate); 169 | } 170 | 171 | /// @copydoc unique_pool::unique_pool(allocate_function, 172 | /// recycle_function) 173 | impl(allocate_function allocate, recycle_function recycle) : 174 | m_allocate(std::move(allocate)), m_recycle(std::move(recycle)) 175 | { 176 | assert(m_allocate); 177 | assert(m_recycle); 178 | } 179 | 180 | /// Copy constructor 181 | impl(const impl& other) : 182 | std::enable_shared_from_this(other), 183 | m_allocate(other.m_allocate), m_recycle(other.m_recycle) 184 | { 185 | std::size_t size = other.unused_resources(); 186 | for (std::size_t i = 0; i < size; ++i) 187 | { 188 | m_free_list.push_back(m_allocate()); 189 | } 190 | } 191 | 192 | /// Move constructor 193 | impl(impl&& other) : 194 | std::enable_shared_from_this(other), 195 | m_allocate(std::move(other.m_allocate)), 196 | m_recycle(std::move(other.m_recycle)), 197 | m_free_list(std::move(other.m_free_list)) 198 | { 199 | } 200 | 201 | /// Copy assignment 202 | impl& operator=(const impl& other) 203 | { 204 | impl tmp(other); 205 | std::swap(*this, tmp); 206 | return *this; 207 | } 208 | 209 | /// Move assignment 210 | impl& operator=(impl&& other) 211 | { 212 | m_allocate = std::move(other.m_allocate); 213 | m_recycle = std::move(other.m_recycle); 214 | m_free_list = std::move(other.m_free_list); 215 | return *this; 216 | } 217 | 218 | /// Allocate a new value from the pool 219 | pool_ptr allocate() 220 | { 221 | value_ptr resource; 222 | 223 | { 224 | lock_type lock(m_mutex); 225 | 226 | if (m_free_list.size() > 0) 227 | { 228 | resource = std::move(m_free_list.back()); 229 | m_free_list.pop_back(); 230 | } 231 | } 232 | 233 | if (!resource) 234 | { 235 | assert(m_allocate); 236 | resource = m_allocate(); 237 | } 238 | 239 | auto pool = impl::shared_from_this(); 240 | 241 | // Here we create a std::unique_ptr with a naked 242 | // pointer to the resource and a custom deleter 243 | // object. The custom deleter object stores two 244 | // things: 245 | // 246 | // 1. A std::weak_ptr to the pool (used when we 247 | // need to put the resource back in the pool). If 248 | // the pool dies before the resource then we can 249 | // detect this with the weak_ptr and no try to 250 | // access it. 251 | // 252 | // 2. A std::unique_ptr that points to the actual 253 | // resource and is the one actually keeping it alive. 254 | 255 | value_type* naked_ptr = resource.get(); 256 | return pool_ptr(naked_ptr, deleter(pool, std::move(resource))); 257 | } 258 | 259 | /// @copydoc unique_pool::free_unused() 260 | void free_unused() 261 | { 262 | lock_type lock(m_mutex); 263 | m_free_list.clear(); 264 | } 265 | 266 | /// @copydoc unique_pool::unused_resources() 267 | std::size_t unused_resources() const 268 | { 269 | lock_type lock(m_mutex); 270 | return m_free_list.size(); 271 | } 272 | 273 | /// This function called when a resource should be added 274 | /// back into the pool 275 | void recycle(value_ptr resource) 276 | { 277 | if (m_recycle) 278 | { 279 | m_recycle(resource); 280 | } 281 | 282 | lock_type lock(m_mutex); 283 | m_free_list.push_back(std::move(resource)); 284 | } 285 | 286 | private: 287 | /// The allocator to use 288 | allocate_function m_allocate; 289 | 290 | /// The recycle function 291 | recycle_function m_recycle; 292 | 293 | /// Stores all the free resources 294 | std::list m_free_list; 295 | 296 | /// Mutex used to coordinate access to the pool. We had to 297 | /// make it mutable as we have to lock in the 298 | /// unused_resources() function. Otherwise we can have a 299 | /// race condition on the size it returns. I.e. if one 300 | /// threads releases a resource into the free list while 301 | /// another tries to read its size. 302 | mutable mutex_type m_mutex; 303 | }; 304 | 305 | /// The custom deleter object used by the std::unique_ptr 306 | /// to de-allocate the object if the pool goes out of 307 | /// scope. When a std::unique_ptr wants to de-allocate the 308 | /// object contained it will call the operator() define here. 309 | struct deleter 310 | { 311 | /// Constructor 312 | deleter() = default; 313 | 314 | /// @param pool A weak_ptr to the pool 315 | /// @param resource The owning unique_ptr 316 | deleter(const std::weak_ptr& pool, 317 | std::unique_ptr resource) : 318 | m_pool(pool), m_resource(std::move(resource)) 319 | { 320 | assert(!m_pool.expired()); 321 | assert(m_resource); 322 | } 323 | 324 | /// Call operator called by std::unique_ptr when 325 | /// de-allocating the object. 326 | void operator()(value_type*) 327 | { 328 | // Place the resource in the free list 329 | auto pool = m_pool.lock(); 330 | 331 | if (pool) 332 | { 333 | pool->recycle(std::move(m_resource)); 334 | } 335 | } 336 | 337 | // Pointer to the pool needed for recycling 338 | std::weak_ptr m_pool; 339 | 340 | // The resource object 341 | std::unique_ptr m_resource; 342 | }; 343 | 344 | private: 345 | // The pool impl 346 | std::shared_ptr m_pool; 347 | }; 348 | } // namespace recycle 349 | -------------------------------------------------------------------------------- /test/src/test_unique_pool.cpp: -------------------------------------------------------------------------------- 1 | // Copyright Steinwurf ApS 2014. 2 | // All Rights Reserved 3 | // 4 | // Distributed under the "BSD License". See the accompanying LICENSE.rst file. 5 | 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | 17 | // Put tests classes in an anonymous namespace to avoid violations of 18 | // ODF (one-definition-rule) in other translation units 19 | namespace 20 | { 21 | // Default constructible dummy object 22 | struct dummy_one 23 | { 24 | dummy_one() 25 | { 26 | ++m_count; 27 | } 28 | 29 | ~dummy_one() 30 | { 31 | --m_count; 32 | } 33 | 34 | // Counter which will check how many object have been allocate 35 | // and deallocated 36 | static int32_t m_count; 37 | }; 38 | 39 | int32_t dummy_one::m_count = 0; 40 | 41 | std::unique_ptr make_dummy_one() 42 | { 43 | return std::make_unique(); 44 | } 45 | 46 | // Non Default constructible dummy object 47 | struct dummy_two 48 | { 49 | dummy_two(std::size_t) 50 | { 51 | ++m_count; 52 | } 53 | 54 | ~dummy_two() 55 | { 56 | --m_count; 57 | } 58 | 59 | static int32_t m_count; 60 | }; 61 | 62 | int32_t dummy_two::m_count = 0; 63 | 64 | std::unique_ptr make_dummy_two(std::size_t v) 65 | { 66 | return std::make_unique(v); 67 | } 68 | 69 | // enable_shared_from_this dummy object 70 | struct dummy_three : std::enable_shared_from_this 71 | { 72 | 73 | dummy_three() 74 | { 75 | ++m_count; 76 | } 77 | 78 | ~dummy_three() 79 | { 80 | --m_count; 81 | } 82 | 83 | // Counter which will check how many object have been allocate 84 | // and deallocated 85 | static int32_t m_count; 86 | }; 87 | 88 | int32_t dummy_three::m_count = 0; 89 | 90 | // Thread safe dummy object 91 | struct dummy_four 92 | { 93 | dummy_four(std::size_t) 94 | { 95 | ++m_count; 96 | } 97 | 98 | ~dummy_four() 99 | { 100 | --m_count; 101 | } 102 | 103 | // Counter which will check how many object have been allocate 104 | // and deallocated 105 | static std::atomic m_count; 106 | }; 107 | 108 | std::atomic dummy_four::m_count = 0; 109 | } 110 | 111 | /// Test that our resource pool is a regular type. We are not 112 | /// implementing equality or less than here, but maybe we could. 113 | namespace 114 | { 115 | /// This code checks whether a type is regular or not. See the 116 | /// Eric Niebler's talk from C++Now 117 | /// 2014. http://youtu.be/zgOF4NrQllo 118 | template 119 | struct is_regular 120 | : std::integral_constant::value && 121 | std::is_copy_constructible::value && 122 | std::is_move_constructible::value && 123 | std::is_copy_assignable::value && 124 | std::is_move_assignable::value> 125 | { 126 | }; 127 | } 128 | 129 | TEST(test_unique_pool, regular_type) 130 | { 131 | EXPECT_TRUE(is_regular>::value); 132 | EXPECT_FALSE(is_regular>::value); 133 | } 134 | 135 | /// Test the basic API construct and free some objects 136 | TEST(test_unique_pool, api) 137 | { 138 | { 139 | recycle::unique_pool pool; 140 | 141 | EXPECT_EQ(pool.unused_resources(), 0U); 142 | 143 | { 144 | auto d1 = pool.allocate(); 145 | EXPECT_EQ(pool.unused_resources(), 0U); 146 | } 147 | 148 | EXPECT_EQ(pool.unused_resources(), 1U); 149 | 150 | auto d2 = pool.allocate(); 151 | 152 | EXPECT_EQ(pool.unused_resources(), 0U); 153 | 154 | auto d3 = pool.allocate(); 155 | 156 | EXPECT_EQ(pool.unused_resources(), 0U); 157 | EXPECT_EQ(dummy_one::m_count, 2); 158 | 159 | { 160 | auto d4 = pool.allocate(); 161 | EXPECT_EQ(pool.unused_resources(), 0U); 162 | } 163 | 164 | EXPECT_EQ(pool.unused_resources(), 1U); 165 | 166 | pool.free_unused(); 167 | 168 | EXPECT_EQ(pool.unused_resources(), 0U); 169 | } 170 | 171 | EXPECT_EQ(dummy_one::m_count, 0); 172 | } 173 | 174 | /// Test the pool works with std::bind 175 | TEST(test_unique_pool, bind) 176 | { 177 | { 178 | recycle::unique_pool pool_one(std::bind(make_dummy_one)); 179 | 180 | recycle::unique_pool pool_two(std::bind(make_dummy_two, 4U)); 181 | 182 | auto o1 = pool_one.allocate(); 183 | auto o2 = pool_two.allocate(); 184 | 185 | EXPECT_EQ(dummy_one::m_count, 1); 186 | EXPECT_EQ(dummy_two::m_count, 1); 187 | } 188 | 189 | EXPECT_EQ(dummy_one::m_count, 0); 190 | EXPECT_EQ(dummy_two::m_count, 0); 191 | } 192 | 193 | /// Test that the pool works for non default constructable objects, if 194 | /// we provide the allocator 195 | TEST(test_unique_pool, non_default_constructable) 196 | { 197 | { 198 | recycle::unique_pool pool(std::bind(make_dummy_two, 4U)); 199 | 200 | auto o1 = pool.allocate(); 201 | auto o2 = pool.allocate(); 202 | 203 | EXPECT_EQ(dummy_two::m_count, 2); 204 | } 205 | 206 | EXPECT_EQ(dummy_two::m_count, 0); 207 | 208 | { 209 | auto make = []() -> std::unique_ptr 210 | { return std::make_unique(3U); }; 211 | 212 | recycle::unique_pool pool(make); 213 | 214 | auto o1 = pool.allocate(); 215 | auto o2 = pool.allocate(); 216 | 217 | EXPECT_EQ(dummy_two::m_count, 2); 218 | } 219 | 220 | EXPECT_EQ(dummy_two::m_count, 0); 221 | } 222 | 223 | /// Test that the pool works for non constructable objects, even if 224 | /// we do not provide the allocator 225 | TEST(test_unique_pool, default_constructable) 226 | { 227 | { 228 | recycle::unique_pool pool; 229 | 230 | auto o1 = pool.allocate(); 231 | auto o2 = pool.allocate(); 232 | 233 | EXPECT_EQ(dummy_one::m_count, 2); 234 | } 235 | 236 | EXPECT_EQ(dummy_one::m_count, 0); 237 | } 238 | 239 | /// Test that everything works even if the pool dies before the 240 | /// objects allocated 241 | TEST(test_unique_pool, pool_die_before_object) 242 | { 243 | { 244 | recycle::unique_pool::pool_ptr d1; 245 | recycle::unique_pool::pool_ptr d2; 246 | recycle::unique_pool::pool_ptr d3; 247 | 248 | { 249 | recycle::unique_pool pool; 250 | 251 | d1 = pool.allocate(); 252 | d2 = pool.allocate(); 253 | d3 = pool.allocate(); 254 | 255 | EXPECT_EQ(dummy_one::m_count, 3); 256 | } 257 | 258 | EXPECT_EQ(dummy_one::m_count, 3); 259 | } 260 | 261 | EXPECT_EQ(dummy_one::m_count, 0); 262 | } 263 | 264 | /// Test that the recycle functionality works 265 | TEST(test_unique_pool, recycle) 266 | { 267 | std::size_t recycled = 0; 268 | 269 | auto recycle = [&recycled](std::unique_ptr& o) 270 | { 271 | EXPECT_TRUE((bool)o); 272 | ++recycled; 273 | }; 274 | 275 | auto make = []() -> std::unique_ptr 276 | { return std::make_unique(3U); }; 277 | 278 | recycle::unique_pool pool(make, recycle); 279 | 280 | auto o1 = pool.allocate(); 281 | o1.reset(); 282 | 283 | EXPECT_EQ(recycled, 1U); 284 | } 285 | 286 | /// Test that copying the shared_pool works as expected. 287 | /// 288 | /// For a type to be regular then: 289 | /// 290 | /// T a = b; assert(a == b); 291 | /// T a; a = b; <-> T a = b; 292 | /// T a = c; T b = c; a = d; assert(b == c); 293 | /// T a = c; T b = c; zap(a); assert(b == c && a != b); 294 | /// 295 | TEST(test_unique_pool, copy_constructor) 296 | { 297 | recycle::unique_pool pool; 298 | 299 | auto o1 = pool.allocate(); 300 | auto o2 = pool.allocate(); 301 | 302 | o1.reset(); 303 | 304 | recycle::unique_pool new_pool(pool); 305 | 306 | EXPECT_EQ(pool.unused_resources(), 1U); 307 | EXPECT_EQ(new_pool.unused_resources(), 1U); 308 | 309 | o2.reset(); 310 | 311 | EXPECT_EQ(pool.unused_resources(), 2U); 312 | EXPECT_EQ(new_pool.unused_resources(), 1U); 313 | 314 | EXPECT_EQ(dummy_one::m_count, 3); 315 | 316 | pool.free_unused(); 317 | new_pool.free_unused(); 318 | 319 | EXPECT_EQ(dummy_one::m_count, 0); 320 | } 321 | 322 | /// Test copy assignment works 323 | TEST(test_unique_pool, copy_assignment) 324 | { 325 | recycle::unique_pool pool; 326 | 327 | auto o1 = pool.allocate(); 328 | auto o2 = pool.allocate(); 329 | 330 | o1.reset(); 331 | 332 | recycle::unique_pool new_pool; 333 | new_pool = pool; 334 | 335 | EXPECT_EQ(dummy_one::m_count, 3); 336 | auto o3 = new_pool.allocate(); 337 | EXPECT_EQ(dummy_one::m_count, 3); 338 | } 339 | 340 | /// Test move constructor 341 | TEST(test_unique_pool, move_constructor) 342 | { 343 | recycle::unique_pool pool; 344 | 345 | auto o1 = pool.allocate(); 346 | auto o2 = pool.allocate(); 347 | 348 | o1.reset(); 349 | 350 | recycle::unique_pool new_pool(std::move(pool)); 351 | 352 | o2.reset(); 353 | EXPECT_EQ(new_pool.unused_resources(), 2U); 354 | } 355 | 356 | /// Test move assignment 357 | TEST(test_unique_pool, move_assignment) 358 | { 359 | recycle::unique_pool pool; 360 | 361 | auto o1 = pool.allocate(); 362 | auto o2 = pool.allocate(); 363 | 364 | o1.reset(); 365 | 366 | recycle::unique_pool new_pool; 367 | new_pool = std::move(pool); 368 | 369 | o2.reset(); 370 | 371 | EXPECT_EQ(new_pool.unused_resources(), 2U); 372 | } 373 | 374 | /// Test that copy assignment works when we copy from an object with 375 | /// recycle functionality 376 | TEST(test_unique_pool, copy_recycle) 377 | { 378 | std::size_t recycled = 0; 379 | 380 | auto recycle = [&recycled](std::unique_ptr& o) 381 | { 382 | EXPECT_TRUE((bool)o); 383 | ++recycled; 384 | }; 385 | 386 | auto make = []() -> std::unique_ptr 387 | { return std::make_unique(3U); }; 388 | 389 | recycle::unique_pool pool(make, recycle); 390 | recycle::unique_pool new_pool = pool; 391 | 392 | EXPECT_EQ(pool.unused_resources(), 0U); 393 | EXPECT_EQ(new_pool.unused_resources(), 0U); 394 | 395 | auto o1 = new_pool.allocate(); 396 | 397 | EXPECT_EQ(dummy_two::m_count, 1); 398 | 399 | o1.reset(); 400 | EXPECT_EQ(recycled, 1U); 401 | 402 | new_pool.free_unused(); 403 | 404 | EXPECT_EQ(dummy_two::m_count, 0); 405 | } 406 | 407 | /// Test that we are thread safe 408 | namespace 409 | { 410 | struct lock_policy 411 | { 412 | using mutex_type = std::mutex; 413 | using lock_type = std::lock_guard; 414 | }; 415 | } 416 | 417 | TEST(test_unique_pool, thread) 418 | { 419 | std::atomic recycled = 0; 420 | 421 | auto recycle = [&recycled](std::unique_ptr& o) 422 | { 423 | EXPECT_TRUE((bool)o); 424 | ++recycled; 425 | }; 426 | 427 | auto make = []() -> std::unique_ptr 428 | { return std::make_unique(3U); }; 429 | 430 | // The pool we will use 431 | using pool_type = recycle::unique_pool; 432 | 433 | pool_type pool(make, recycle); 434 | 435 | // Lambda the threads will execute captures a reference to the pool 436 | // so they will all operate on the same pool concurrently 437 | auto run = [&pool]() 438 | { 439 | { 440 | auto a1 = pool.allocate(); 441 | } 442 | 443 | auto a2 = pool.allocate(); 444 | auto a3 = pool.allocate(); 445 | 446 | { 447 | auto a4 = pool.allocate(); 448 | } 449 | 450 | pool_type new_pool = pool; 451 | 452 | auto b1 = new_pool.allocate(); 453 | auto b2 = new_pool.allocate(); 454 | 455 | pool.free_unused(); 456 | }; 457 | 458 | const std::size_t number_threads = 8; 459 | std::thread t[number_threads]; 460 | 461 | // Launch a group of threads 462 | for (std::size_t i = 0; i < number_threads; ++i) 463 | { 464 | t[i] = std::thread(run); 465 | } 466 | 467 | // Join the threads with the main thread 468 | for (std::size_t i = 0; i < number_threads; ++i) 469 | { 470 | t[i].join(); 471 | } 472 | } 473 | 474 | /// Test that the pool works for enable_shared_from_this objects, even if 475 | /// we do not provide the allocator 476 | TEST(test_unique_pool, enable_shared_from_this) 477 | { 478 | { 479 | recycle::unique_pool pool; 480 | 481 | auto o1 = pool.allocate(); 482 | 483 | EXPECT_EQ(dummy_three::m_count, 1); 484 | } 485 | 486 | EXPECT_EQ(dummy_three::m_count, 0); 487 | } 488 | -------------------------------------------------------------------------------- /test/src/test_resource_pool.cpp: -------------------------------------------------------------------------------- 1 | // Copyright Steinwurf ApS 2014. 2 | // All Rights Reserved 3 | // 4 | // Distributed under the "BSD License". See the accompanying LICENSE.rst file. 5 | 6 | #include 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | 17 | // Put tests classes in an anonymous namespace to avoid violations of 18 | // ODF (one-definition-rule) in other translation units 19 | namespace 20 | { 21 | // Default constructible dummy object 22 | struct dummy_one 23 | { 24 | dummy_one() 25 | { 26 | ++m_count; 27 | } 28 | 29 | ~dummy_one() 30 | { 31 | --m_count; 32 | } 33 | 34 | // Counter which will check how many object have been allocate 35 | // and deallocated 36 | static int32_t m_count; 37 | }; 38 | 39 | int32_t dummy_one::m_count = 0; 40 | 41 | std::shared_ptr make_dummy_one() 42 | { 43 | return std::make_shared(); 44 | } 45 | 46 | // Non Default constructible dummy object 47 | struct dummy_two 48 | { 49 | dummy_two(std::size_t) 50 | { 51 | ++m_count; 52 | } 53 | 54 | ~dummy_two() 55 | { 56 | --m_count; 57 | } 58 | 59 | static int32_t m_count; 60 | }; 61 | 62 | int32_t dummy_two::m_count = 0; 63 | 64 | std::shared_ptr make_dummy_two(std::size_t v) 65 | { 66 | return std::make_shared(v); 67 | } 68 | 69 | // enable_shared_from_this dummy object 70 | struct dummy_three : std::enable_shared_from_this 71 | { 72 | 73 | dummy_three() 74 | { 75 | ++m_count; 76 | } 77 | 78 | ~dummy_three() 79 | { 80 | --m_count; 81 | } 82 | 83 | // Counter which will check how many object have been allocate 84 | // and deallocated 85 | static int32_t m_count; 86 | }; 87 | 88 | int32_t dummy_three::m_count = 0; 89 | 90 | // Thread safe dummy object 91 | struct dummy_four 92 | { 93 | dummy_four(std::size_t) 94 | { 95 | ++m_count; 96 | } 97 | 98 | ~dummy_four() 99 | { 100 | --m_count; 101 | } 102 | 103 | // Counter which will check how many object have been allocate 104 | // and deallocated 105 | static std::atomic m_count; 106 | }; 107 | 108 | std::atomic dummy_four::m_count = 0; 109 | } 110 | 111 | /// Test that our resource pool is a regular type. We are not 112 | /// implementing equality or less than here, but maybe we could. 113 | namespace 114 | { 115 | /// This code checks whether a type is regular or not. See the 116 | /// Eric Niebler's talk from C++Now 117 | /// 2014. http://youtu.be/zgOF4NrQllo 118 | template 119 | struct is_regular 120 | : std::integral_constant::value && 121 | std::is_copy_constructible::value && 122 | std::is_move_constructible::value && 123 | std::is_copy_assignable::value && 124 | std::is_move_assignable::value> 125 | { 126 | }; 127 | } 128 | 129 | TEST(test_shared_pool, regular_type) 130 | { 131 | EXPECT_TRUE(is_regular>::value); 132 | EXPECT_FALSE(is_regular>::value); 133 | } 134 | 135 | /// Test the basic API construct and free some objects 136 | TEST(test_shared_pool, api) 137 | { 138 | { 139 | recycle::shared_pool pool; 140 | 141 | EXPECT_EQ(pool.unused_resources(), 0U); 142 | 143 | { 144 | auto d1 = pool.allocate(); 145 | EXPECT_EQ(pool.unused_resources(), 0U); 146 | } 147 | 148 | EXPECT_EQ(pool.unused_resources(), 1U); 149 | 150 | auto d2 = pool.allocate(); 151 | 152 | EXPECT_EQ(pool.unused_resources(), 0U); 153 | 154 | auto d3 = pool.allocate(); 155 | 156 | EXPECT_EQ(pool.unused_resources(), 0U); 157 | EXPECT_EQ(dummy_one::m_count, 2); 158 | 159 | { 160 | auto d4 = pool.allocate(); 161 | EXPECT_EQ(pool.unused_resources(), 0U); 162 | } 163 | 164 | EXPECT_EQ(pool.unused_resources(), 1U); 165 | 166 | pool.free_unused(); 167 | 168 | EXPECT_EQ(pool.unused_resources(), 0U); 169 | } 170 | 171 | EXPECT_EQ(dummy_one::m_count, 0); 172 | } 173 | 174 | /// Test the pool works with std::bind 175 | TEST(test_shared_pool, bind) 176 | { 177 | { 178 | recycle::shared_pool pool_one(std::bind(make_dummy_one)); 179 | 180 | recycle::shared_pool pool_two(std::bind(make_dummy_two, 4U)); 181 | 182 | auto o1 = pool_one.allocate(); 183 | auto o2 = pool_two.allocate(); 184 | 185 | EXPECT_EQ(dummy_one::m_count, 1); 186 | EXPECT_EQ(dummy_two::m_count, 1); 187 | } 188 | 189 | EXPECT_EQ(dummy_one::m_count, 0); 190 | EXPECT_EQ(dummy_two::m_count, 0); 191 | } 192 | 193 | /// Test that the pool works for non default constructable objects, if 194 | /// we provide the allocator 195 | TEST(test_shared_pool, non_default_constructable) 196 | { 197 | { 198 | recycle::shared_pool pool(std::bind(make_dummy_two, 4U)); 199 | 200 | auto o1 = pool.allocate(); 201 | auto o2 = pool.allocate(); 202 | 203 | EXPECT_EQ(dummy_two::m_count, 2); 204 | } 205 | 206 | EXPECT_EQ(dummy_two::m_count, 0); 207 | 208 | { 209 | auto make = []() -> std::shared_ptr 210 | { return std::make_shared(3U); }; 211 | 212 | recycle::shared_pool pool(make); 213 | 214 | auto o1 = pool.allocate(); 215 | auto o2 = pool.allocate(); 216 | 217 | EXPECT_EQ(dummy_two::m_count, 2); 218 | } 219 | 220 | EXPECT_EQ(dummy_two::m_count, 0); 221 | } 222 | 223 | /// Test that the pool works for non constructable objects, even if 224 | /// we do not provide the allocator 225 | TEST(test_shared_pool, default_constructable) 226 | { 227 | { 228 | recycle::shared_pool pool; 229 | 230 | auto o1 = pool.allocate(); 231 | auto o2 = pool.allocate(); 232 | 233 | EXPECT_EQ(dummy_one::m_count, 2); 234 | } 235 | 236 | EXPECT_EQ(dummy_one::m_count, 0); 237 | } 238 | 239 | /// Test that everything works even if the pool dies before the 240 | /// objects allocated 241 | TEST(test_shared_pool, pool_die_before_object) 242 | { 243 | { 244 | std::shared_ptr d1; 245 | std::shared_ptr d2; 246 | std::shared_ptr d3; 247 | 248 | { 249 | recycle::shared_pool pool; 250 | 251 | d1 = pool.allocate(); 252 | d2 = pool.allocate(); 253 | d3 = pool.allocate(); 254 | 255 | EXPECT_EQ(dummy_one::m_count, 3); 256 | } 257 | 258 | EXPECT_EQ(dummy_one::m_count, 3); 259 | } 260 | 261 | EXPECT_EQ(dummy_one::m_count, 0); 262 | } 263 | 264 | /// Test that the recycle functionality works 265 | TEST(test_shared_pool, recycle) 266 | { 267 | std::size_t recycled = 0; 268 | 269 | auto recycle = [&recycled](std::shared_ptr o) 270 | { 271 | EXPECT_TRUE((bool)o); 272 | ++recycled; 273 | }; 274 | 275 | auto make = []() -> std::shared_ptr 276 | { return std::make_shared(3U); }; 277 | 278 | recycle::shared_pool pool(make, recycle); 279 | 280 | auto o1 = pool.allocate(); 281 | o1.reset(); 282 | 283 | EXPECT_EQ(recycled, 1U); 284 | } 285 | 286 | /// Test that copying the shared_pool works as expected. 287 | /// 288 | /// For a type to be regular then: 289 | /// 290 | /// T a = b; assert(a == b); 291 | /// T a; a = b; <-> T a = b; 292 | /// T a = c; T b = c; a = d; assert(b == c); 293 | /// T a = c; T b = c; zap(a); assert(b == c && a != b); 294 | /// 295 | TEST(test_shared_pool, copy_constructor) 296 | { 297 | recycle::shared_pool pool; 298 | 299 | auto o1 = pool.allocate(); 300 | auto o2 = pool.allocate(); 301 | 302 | o1.reset(); 303 | 304 | recycle::shared_pool new_pool(pool); 305 | 306 | EXPECT_EQ(pool.unused_resources(), 1U); 307 | EXPECT_EQ(new_pool.unused_resources(), 1U); 308 | 309 | o2.reset(); 310 | 311 | EXPECT_EQ(pool.unused_resources(), 2U); 312 | EXPECT_EQ(new_pool.unused_resources(), 1U); 313 | 314 | EXPECT_EQ(dummy_one::m_count, 3); 315 | 316 | pool.free_unused(); 317 | new_pool.free_unused(); 318 | 319 | EXPECT_EQ(dummy_one::m_count, 0); 320 | } 321 | 322 | /// Test copy assignment works 323 | TEST(test_shared_pool, copy_assignment) 324 | { 325 | recycle::shared_pool pool; 326 | 327 | auto o1 = pool.allocate(); 328 | auto o2 = pool.allocate(); 329 | 330 | o1.reset(); 331 | 332 | recycle::shared_pool new_pool; 333 | new_pool = pool; 334 | 335 | EXPECT_EQ(dummy_one::m_count, 3); 336 | auto o3 = new_pool.allocate(); 337 | EXPECT_EQ(dummy_one::m_count, 3); 338 | } 339 | 340 | /// Test move constructor 341 | TEST(test_shared_pool, move_constructor) 342 | { 343 | recycle::shared_pool pool; 344 | 345 | auto o1 = pool.allocate(); 346 | auto o2 = pool.allocate(); 347 | 348 | o1.reset(); 349 | 350 | recycle::shared_pool new_pool(std::move(pool)); 351 | 352 | o2.reset(); 353 | EXPECT_EQ(new_pool.unused_resources(), 2U); 354 | } 355 | 356 | /// Test move assignment 357 | TEST(test_shared_pool, move_assignment) 358 | { 359 | recycle::shared_pool pool; 360 | 361 | auto o1 = pool.allocate(); 362 | auto o2 = pool.allocate(); 363 | 364 | o1.reset(); 365 | 366 | recycle::shared_pool new_pool; 367 | new_pool = std::move(pool); 368 | 369 | o2.reset(); 370 | 371 | EXPECT_EQ(new_pool.unused_resources(), 2U); 372 | } 373 | 374 | /// Test that copy assignment works when we copy from an object with 375 | /// recycle functionality 376 | TEST(test_shared_pool, copy_recycle) 377 | { 378 | std::size_t recycled = 0; 379 | 380 | auto recycle = [&recycled](std::shared_ptr o) 381 | { 382 | EXPECT_TRUE((bool)o); 383 | ++recycled; 384 | }; 385 | 386 | auto make = []() -> std::shared_ptr 387 | { return std::make_shared(3U); }; 388 | 389 | recycle::shared_pool pool(make, recycle); 390 | recycle::shared_pool new_pool = pool; 391 | 392 | EXPECT_EQ(pool.unused_resources(), 0U); 393 | EXPECT_EQ(new_pool.unused_resources(), 0U); 394 | 395 | auto o1 = new_pool.allocate(); 396 | 397 | EXPECT_EQ(dummy_two::m_count, 1); 398 | 399 | o1.reset(); 400 | EXPECT_EQ(recycled, 1U); 401 | 402 | new_pool.free_unused(); 403 | 404 | EXPECT_EQ(dummy_two::m_count, 0); 405 | } 406 | 407 | /// Test that we are thread safe 408 | namespace 409 | { 410 | struct lock_policy 411 | { 412 | using mutex_type = std::mutex; 413 | using lock_type = std::lock_guard; 414 | }; 415 | } 416 | 417 | TEST(test_shared_pool, thread) 418 | { 419 | std::atomic recycled = 0; 420 | 421 | using dummy_four = std::vector; 422 | 423 | auto recycle = [&recycled](std::shared_ptr o) 424 | { 425 | EXPECT_TRUE((bool)o); 426 | ++recycled; 427 | }; 428 | 429 | auto make = []() -> std::shared_ptr 430 | { return std::make_shared(3U); }; 431 | 432 | // The pool we will use 433 | using pool_type = recycle::shared_pool; 434 | 435 | pool_type pool(make, recycle); 436 | 437 | // Lambda the threads will execute captures a reference to the pool 438 | // so they will all operate on the same pool concurrently 439 | auto run = [&pool]() 440 | { 441 | { 442 | auto a1 = pool.allocate(); 443 | } 444 | 445 | auto a2 = pool.allocate(); 446 | auto a3 = pool.allocate(); 447 | 448 | { 449 | auto a4 = pool.allocate(); 450 | } 451 | 452 | pool_type new_pool = pool; 453 | 454 | auto b1 = new_pool.allocate(); 455 | auto b2 = new_pool.allocate(); 456 | 457 | pool.free_unused(); 458 | }; 459 | 460 | const std::size_t number_threads = 8; 461 | std::thread t[number_threads]; 462 | 463 | // Launch a group of threads 464 | for (std::size_t i = 0; i < number_threads; ++i) 465 | { 466 | t[i] = std::thread(run); 467 | } 468 | 469 | // Join the threads with the main thread 470 | for (std::size_t i = 0; i < number_threads; ++i) 471 | { 472 | t[i].join(); 473 | } 474 | } 475 | 476 | /// Test that the pool works for enable_shared_from_this objects, even if 477 | /// we do not provide the allocator 478 | TEST(test_shared_pool, enable_shared_from_this) 479 | { 480 | { 481 | recycle::shared_pool pool; 482 | 483 | auto o1 = pool.allocate(); 484 | EXPECT_EQ(o1.use_count(), 1); 485 | 486 | EXPECT_EQ(dummy_three::m_count, 1); 487 | } 488 | 489 | EXPECT_EQ(dummy_three::m_count, 0); 490 | } 491 | -------------------------------------------------------------------------------- /src/recycle/shared_pool.hpp: -------------------------------------------------------------------------------- 1 | // Copyright Steinwurf ApS 2014. 2 | // All Rights Reserved 3 | // 4 | // Distributed under the "BSD License". See the accompanying LICENSE.rst file. 5 | 6 | #pragma once 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include "no_locking_policy.hpp" 18 | 19 | namespace recycle 20 | { 21 | /// @brief The shared pool stores value objects and recycles them. 22 | /// 23 | /// The shared pool is a useful construct if you have some 24 | /// expensive to create objects where you would like to create a 25 | /// factory capable of recycling the objects. 26 | /// 27 | /// Note, when using the shared pool in a multithreaded environment 28 | /// you should use a locking policy and make sure that the objects 29 | /// you allocate are thread safe. The default locking policy 30 | /// is no_locking_policy which means that the pool is not thread 31 | /// safe. 32 | template 33 | class shared_pool 34 | { 35 | public: 36 | /// The type managed 37 | using value_type = Value; 38 | 39 | /// The pointer to the resource 40 | using value_ptr = std::shared_ptr; 41 | 42 | /// The allocate function type 43 | /// Should take no arguments and return an std::shared_ptr to the Value 44 | using allocate_function = std::function; 45 | 46 | /// The recycle function type 47 | /// If specified the recycle function will be called every time a 48 | /// resource gets recycled into the pool. This allows temporary 49 | /// resources, e.g., file handles to be closed when an object is longer 50 | /// used. 51 | using recycle_function = std::function; 52 | 53 | /// The locking policy mutex type 54 | using mutex_type = typename LockingPolicy::mutex_type; 55 | 56 | /// The locking policy lock type 57 | using lock_type = typename LockingPolicy::lock_type; 58 | 59 | public: 60 | /// Default constructor, we only want this to be available 61 | /// i.e. the shared_pool to be default constructible if the 62 | /// value_type we build is default constructible. 63 | /// 64 | /// This means that we only want 65 | /// std::is_default_constructible>::value to 66 | /// be true if the type T is default constructible. 67 | /// 68 | /// Unfortunately this does not work if we don't do the 69 | /// template magic seen below. What we do there is to use 70 | /// SFINAE to disable the default constructor for non default 71 | /// constructible types. 72 | /// 73 | /// It looks quite ugly and if somebody can fix in a simpler way 74 | /// please do :) 75 | template ::value, 77 | uint8_t>::type = 0> 78 | shared_pool() : 79 | m_pool(std::make_shared( 80 | allocate_function(std::make_shared))) 81 | { 82 | } 83 | 84 | /// Create a shared pool using a specific allocate function. 85 | /// @param allocate Allocation function 86 | shared_pool(allocate_function allocate) : 87 | m_pool(std::make_shared(std::move(allocate))) 88 | { 89 | } 90 | 91 | /// Create a shared pool using a specific allocate function and 92 | /// recycle function. 93 | /// @param allocate Allocation function 94 | /// @param recycle Recycle function. If used in a threaded environment 95 | /// the recycle function should be thread safe. 96 | shared_pool(allocate_function allocate, recycle_function recycle) : 97 | m_pool(std::make_shared(std::move(allocate), std::move(recycle))) 98 | { 99 | } 100 | 101 | /// Copy constructor 102 | shared_pool(const shared_pool& other) : 103 | m_pool(std::make_shared(*other.m_pool)) 104 | { 105 | } 106 | 107 | /// Move constructor 108 | shared_pool(shared_pool&& other) : m_pool(std::move(other.m_pool)) 109 | { 110 | assert(m_pool); 111 | } 112 | 113 | /// Copy assignment 114 | shared_pool& operator=(const shared_pool& other) 115 | { 116 | shared_pool tmp(other); 117 | std::swap(*this, tmp); 118 | return *this; 119 | } 120 | 121 | /// Move assignment 122 | shared_pool& operator=(shared_pool&& other) 123 | { 124 | m_pool = std::move(other.m_pool); 125 | return *this; 126 | } 127 | 128 | /// @returns the number of unused resources 129 | std::size_t unused_resources() const 130 | { 131 | assert(m_pool); 132 | return m_pool->unused_resources(); 133 | } 134 | 135 | /// Frees all unused resources 136 | void free_unused() 137 | { 138 | assert(m_pool); 139 | m_pool->free_unused(); 140 | } 141 | 142 | /// @return A resource from the pool. 143 | value_ptr allocate() 144 | { 145 | assert(m_pool); 146 | return m_pool->allocate(); 147 | } 148 | 149 | private: 150 | /// The actual pool implementation. We use the 151 | /// enable_shared_from_this helper to make sure we can pass a 152 | /// "back-pointer" to the pooled objects. The idea behind this 153 | /// is that we need objects to be able to add themselves back 154 | /// into the pool once they go out of scope. 155 | struct impl : public std::enable_shared_from_this 156 | { 157 | /// @copydoc shared_pool::shared_pool(allocate_function) 158 | impl(allocate_function allocate) : m_allocate(std::move(allocate)) 159 | { 160 | assert(m_allocate); 161 | } 162 | 163 | /// @copydoc shared_pool::shared_pool(allocate_function, 164 | /// recycle_function) 165 | impl(allocate_function allocate, recycle_function recycle) : 166 | m_allocate(std::move(allocate)), m_recycle(std::move(recycle)) 167 | { 168 | assert(m_allocate); 169 | assert(m_recycle); 170 | } 171 | 172 | /// Copy constructor 173 | impl(const impl& other) : 174 | std::enable_shared_from_this(other), 175 | m_allocate(other.m_allocate), m_recycle(other.m_recycle) 176 | { 177 | std::size_t size = other.unused_resources(); 178 | for (std::size_t i = 0; i < size; ++i) 179 | { 180 | m_free_list.push_back(m_allocate()); 181 | } 182 | } 183 | 184 | /// Move constructor 185 | impl(impl&& other) : 186 | std::enable_shared_from_this(other), 187 | m_allocate(std::move(other.m_allocate)), 188 | m_recycle(std::move(other.m_recycle)), 189 | m_free_list(std::move(other.m_free_list)) 190 | { 191 | } 192 | 193 | /// Copy assignment 194 | impl& operator=(const impl& other) 195 | { 196 | impl tmp(other); 197 | std::swap(*this, tmp); 198 | return *this; 199 | } 200 | 201 | /// Move assignment 202 | impl& operator=(impl&& other) 203 | { 204 | m_allocate = std::move(other.m_allocate); 205 | m_recycle = std::move(other.m_recycle); 206 | m_free_list = std::move(other.m_free_list); 207 | return *this; 208 | } 209 | 210 | /// Allocate a new value from the pool 211 | value_ptr allocate() 212 | { 213 | value_ptr resource; 214 | 215 | { 216 | lock_type lock(m_mutex); 217 | 218 | if (m_free_list.size() > 0) 219 | { 220 | resource = m_free_list.back(); 221 | m_free_list.pop_back(); 222 | } 223 | } 224 | 225 | if (!resource) 226 | { 227 | assert(m_allocate); 228 | resource = m_allocate(); 229 | } 230 | 231 | auto pool = impl::shared_from_this(); 232 | 233 | // Here we create a std::shared_ptr with a naked 234 | // pointer to the resource and a custom deleter 235 | // object. The custom deleter object stores two 236 | // things: 237 | // 238 | // 1. A std::weak_ptr to the pool (used when we 239 | // need to put the resource back in the pool). If 240 | // the pool dies before the resource then we can 241 | // detect this with the weak_ptr and no try to 242 | // access it. 243 | // 244 | // 2. A std::shared_ptr that points to the actual 245 | // resource and is the one actually keeping it alive. 246 | 247 | return value_ptr(resource.get(), deleter(pool, resource)); 248 | } 249 | 250 | /// @copydoc shared_pool::free_unused() 251 | void free_unused() 252 | { 253 | lock_type lock(m_mutex); 254 | m_free_list.clear(); 255 | } 256 | 257 | /// @copydoc shared_pool::unused_resources() 258 | std::size_t unused_resources() const 259 | { 260 | lock_type lock(m_mutex); 261 | return m_free_list.size(); 262 | } 263 | 264 | /// This function called when a resource should be added 265 | /// back into the pool 266 | void recycle(const value_ptr& resource) 267 | { 268 | if (m_recycle) 269 | { 270 | m_recycle(resource); 271 | } 272 | 273 | lock_type lock(m_mutex); 274 | m_free_list.push_back(resource); 275 | } 276 | 277 | private: 278 | /// The allocator to use 279 | allocate_function m_allocate; 280 | 281 | /// The recycle function 282 | recycle_function m_recycle; 283 | 284 | /// Stores all the free resources 285 | std::list m_free_list; 286 | 287 | /// Mutex used to coordinate access to the pool. We had to 288 | /// make it mutable as we have to lock in the 289 | /// unused_resources() function. Otherwise we can have a 290 | /// race condition on the size it returns. I.e. if one 291 | /// threads releases a resource into the free list while 292 | /// another tries to read its size. 293 | mutable mutex_type m_mutex; 294 | }; 295 | 296 | /// The custom deleter object used by the std::shared_ptr 297 | /// to de-allocate the object if the pool goes out of 298 | /// scope. When a std::shared_ptr wants to de-allocate the 299 | /// object contained it will call the operator() define here. 300 | struct deleter 301 | { 302 | /// @param pool. A weak_ptr to the pool 303 | deleter(const std::weak_ptr& pool, const value_ptr& resource) : 304 | m_pool(pool), m_resource(resource) 305 | { 306 | assert(!m_pool.expired()); 307 | assert(m_resource); 308 | } 309 | 310 | /// Call operator called by std::shared_ptr when 311 | /// de-allocating the object. 312 | void operator()(value_type*) 313 | { 314 | // Place the resource in the free list 315 | auto pool = m_pool.lock(); 316 | 317 | if (pool) 318 | { 319 | pool->recycle(m_resource); 320 | } 321 | 322 | // This reset() is needed because otherwise a circular 323 | // dependency can arise here in special situations. 324 | // 325 | // One example of such a situation is when the value_type 326 | // derives from std::enable_shared_from_this in that case, 327 | // the following will happen: 328 | // 329 | // The std::enable_shared_from_this implementation works by 330 | // storing a std::weak_ptr to itself. This std::weak_ptr 331 | // internally points to an "counted" object keeping track 332 | // of the reference count managing the raw pointer's release 333 | // policy (e.g. storing the custom deleter etc.) for all 334 | // the shared_ptr's. The "counted" object is both kept 335 | // alive by all std::shared_ptr and std::weak_ptr objects. 336 | // 337 | // In this specific case of std::enable_shared_from_this, 338 | // the custom deleter is not destroyed because the internal 339 | // std::weak_ptr still points to the "counted" object and 340 | // inside the custom deleter we are keeping the managed 341 | // object alive because we have a std::shared_ptr to it. 342 | // 343 | // The following diagram show the circular dependency where 344 | // the arrows indicate what is keeping what alive: 345 | // 346 | // +----------------+ +--------------+ 347 | // | custom deleter +--------------+ | real deleter | 348 | // +----------------+ | +--------------+ 349 | // ^ | ^ 350 | // | | | 351 | // | | | 352 | // +-----+--------+ | +-------+------+ 353 | // | shared_count | | | shared_count | 354 | // +--------------+ | +--------------+ 355 | // ^ ^ | ^ 356 | // | | | | 357 | // | | | | 358 | // | | v | 359 | // | | +------------+ +------------+ | 360 | // | +--+ shared_ptr | | shared_ptr +-+ 361 | // | +------------+ +----+-------+ 362 | // | | 363 | // | | 364 | // +----+-----+ +--------+ | 365 | // | weak_ptr |<-----------+ object |<-+ 366 | // +----------+ +--------+ 367 | // 368 | // The std::shared_ptr on the right is the one managed by the 369 | // shared pool, it is the one actually deleting the 370 | // object when it goes out of scope. The shared_ptr on the 371 | // left is the one which contains the custom 372 | // deleter that will return the object into the resource 373 | // pool when it goes out of scope. 374 | // 375 | // By calling reset on the shared_ptr in the custom deleter 376 | // we break the cyclic dependency. 377 | m_resource.reset(); 378 | } 379 | 380 | // Pointer to the pool needed for recycling 381 | std::weak_ptr m_pool; 382 | 383 | // The resource object 384 | value_ptr m_resource; 385 | }; 386 | 387 | private: 388 | // The pool impl 389 | std::shared_ptr m_pool; 390 | }; 391 | } // namespace recycle 392 | --------------------------------------------------------------------------------